2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 17:23:55 +08:00

Merge 3.10-rc6 into usb-next

We want the fixes in this branch as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2013-06-17 12:02:38 -07:00
commit 1508124d8a
241 changed files with 2627 additions and 1847 deletions

View File

@ -319,7 +319,10 @@ cache<0..n>
Symlink to each of the cache devices comprising this cache set.
cache_available_percent
Percentage of cache device free.
Percentage of cache device which doesn't contain dirty data, and could
potentially be used for writeback. This doesn't mean this space isn't used
for clean cached data; the unused statistic (in priority_stats) is typically
much lower.
clear_stats
Clears the statistics associated with this cache
@ -423,8 +426,11 @@ nbuckets
Total buckets in this cache
priority_stats
Statistics about how recently data in the cache has been accessed. This can
reveal your working set size.
Statistics about how recently data in the cache has been accessed.
This can reveal your working set size. Unused is the percentage of
the cache that doesn't contain any data. Metadata is bcache's
metadata overhead. Average is the average priority of cache buckets.
Next is a list of quantiles with the priority threshold of each.
written
Sum of all data that has been written to the cache; comparison with

View File

@ -498,12 +498,8 @@ Your cooperation is appreciated.
Each device type has 5 bits (32 minors).
13 block 8-bit MFM/RLL/IDE controller
0 = /dev/xda First XT disk whole disk
64 = /dev/xdb Second XT disk whole disk
Partitions are handled in the same way as IDE disks
(see major number 3).
13 block Previously used for the XT disk (/dev/xdN)
Deleted in kernel v3.9.
14 char Open Sound System (OSS)
0 = /dev/mixer Mixer control

View File

@ -1,7 +1,7 @@
Atmel AT91RM9200 Real Time Clock
Required properties:
- compatible: should be: "atmel,at91rm9200-rtc"
- compatible: should be: "atmel,at91rm9200-rtc" or "atmel,at91sam9x5-rtc"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: rtc alarm/event interrupt

View File

@ -3351,9 +3351,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
plus one apbt timer for broadcast timer.
x86_mrst_timer=apbt_only | lapic_and_apbt
xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks.
xd_geo= See header of drivers/block/xd.c.
xen_emul_unplug= [HW,X86,XEN]
Unplug Xen emulated devices
Format: [unplug0,][unplug1]

View File

@ -80,8 +80,6 @@ Valid names are:
/dev/sdd: -> 0x0830 (forth SCSI disk)
/dev/sde: -> 0x0840 (fifth SCSI disk)
/dev/fd : -> 0x0200 (floppy disk)
/dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k)
/dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k)
The name must be followed by a decimal number, that stands for the
partition number. Internally, the value of the number is just

View File

@ -5766,7 +5766,7 @@ M: Matthew Wilcox <willy@linux.intel.com>
L: linux-nvme@lists.infradead.org
T: git git://git.infradead.org/users/willy/linux-nvme.git
S: Supported
F: drivers/block/nvme.c
F: drivers/block/nvme*
F: include/linux/nvme.h
OMAP SUPPORT
@ -7624,7 +7624,7 @@ F: drivers/clk/spear/
SPI SUBSYSTEM
M: Mark Brown <broonie@kernel.org>
M: Grant Likely <grant.likely@linaro.org>
L: spi-devel-general@lists.sourceforge.net
L: linux-spi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
S: Maintained
@ -9004,7 +9004,7 @@ S: Maintained
F: drivers/net/wireless/wl3501*
WM97XX TOUCHSCREEN DRIVERS
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
M: Mark Brown <broonie@kernel.org>
M: Liam Girdwood <lrg@slimlogic.co.uk>
L: linux-input@vger.kernel.org
T: git git://opensource.wolfsonmicro.com/linux-2.6-touch
@ -9014,7 +9014,6 @@ F: drivers/input/touchscreen/*wm97*
F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
L: patches@opensource.wolfsonmicro.com
T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Unicycling Gorilla
# *DOCUMENTATION*

View File

@ -124,7 +124,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all -DZIMAGE
asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \

View File

@ -1,6 +1,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifndef CONFIG_DEBUG_SEMIHOSTING
#include CONFIG_DEBUG_LL_INCLUDE
ENTRY(putc)
@ -10,3 +12,29 @@ ENTRY(putc)
busyuart r3, r1
mov pc, lr
ENDPROC(putc)
#else
ENTRY(putc)
adr r1, 1f
ldmia r1, {r2, r3}
add r2, r2, r1
ldr r1, [r2, r3]
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
THUMB( svc #0xab )
mov pc, lr
.align 2
1: .word _GLOBAL_OFFSET_TABLE_ - .
.word semi_writec_buf(GOT)
ENDPROC(putc)
.bss
.global semi_writec_buf
.type semi_writec_buf, %object
semi_writec_buf:
.space 4
.size semi_writec_buf, 4
#endif

View File

@ -11,6 +11,7 @@
#include <asm/mach-types.h>
.section ".start", "ax"
.arch armv4
__SA1100_start:

View File

@ -18,6 +18,7 @@
.section ".start", "ax"
.arch armv4
b __beginning
__ofw_data: .long 0 @ the number of memory blocks

View File

@ -11,6 +11,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
.arch armv7-a
/*
* Debugging stuff
*
@ -805,8 +806,8 @@ call_cache_fn: adr r12, proc_types
.align 2
.type proc_types,#object
proc_types:
.word 0x00000000 @ old ARM ID
.word 0x0000f000
.word 0x41000000 @ old ARM ID
.word 0xff00f000
mov pc, lr
THUMB( nop )
mov pc, lr

View File

@ -409,8 +409,8 @@
ti,hwmods = "gpmc";
reg = <0x50000000 0x2000>;
interrupts = <100>;
num-cs = <7>;
num-waitpins = <2>;
gpmc,num-cs = <7>;
gpmc,num-waitpins = <2>;
#address-cells = <2>;
#size-cells = <1>;
status = "disabled";

View File

@ -39,8 +39,9 @@
};
soc {
ranges = <0 0 0xd0000000 0x100000
0xf0000000 0 0xf0000000 0x1000000>;
ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB */>;
internal-regs {
serial@12000 {

View File

@ -27,8 +27,9 @@
};
soc {
ranges = <0 0 0xd0000000 0x100000
0xf0000000 0 0xf0000000 0x8000000>;
ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
0xf0000000 0 0xf0000000 0x8000000 /* Device Bus, NOR 128MiB */>;
internal-regs {
serial@12000 {

View File

@ -56,9 +56,23 @@
};
};
&omap4_pmx_wkup {
pinctrl-names = "default";
pinctrl-0 = <
&twl6030_wkup_pins
>;
twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
pinctrl-single,pins = <
0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
>;
};
};
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
&twl6030_pins
&twl6040_pins
&mcpdm_pins
&mcbsp1_pins
@ -66,6 +80,12 @@
&tpd12s015_pins
>;
twl6030_pins: pinmux_twl6030_pins {
pinctrl-single,pins = <
0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
>;
};
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */

View File

@ -142,9 +142,23 @@
};
};
&omap4_pmx_wkup {
pinctrl-names = "default";
pinctrl-0 = <
&twl6030_wkup_pins
>;
twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
pinctrl-single,pins = <
0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
>;
};
};
&omap4_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
&twl6030_pins
&twl6040_pins
&mcpdm_pins
&dmic_pins
@ -179,6 +193,12 @@
>;
};
twl6030_pins: pinmux_twl6030_pins {
pinctrl-single,pins = <
0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
>;
};
twl6040_pins: pinmux_twl6040_pins {
pinctrl-single,pins = <
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */

View File

@ -538,6 +538,7 @@
interrupts = <0 41 0x4>;
ti,hwmods = "timer5";
ti,timer-dsp;
ti,timer-pwm;
};
timer6: timer@4013a000 {
@ -574,6 +575,7 @@
reg = <0x4803e000 0x80>;
interrupts = <0 45 0x4>;
ti,hwmods = "timer9";
ti,timer-pwm;
};
timer10: timer@48086000 {
@ -581,6 +583,7 @@
reg = <0x48086000 0x80>;
interrupts = <0 46 0x4>;
ti,hwmods = "timer10";
ti,timer-pwm;
};
timer11: timer@48088000 {

View File

@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
/* Read TPIDRPRW */
asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
register unsigned long *sp asm ("sp");
/*
* Read TPIDRPRW.
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
return off;
}
#define __my_cpu_offset __my_cpu_offset()

View File

@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/node.h>
@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
* cpu topology table
*/
struct cputopo_arm cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{

View File

@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void)
kirkwood_pcie_id(&dev, &rev);
if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
(dev == MV88F6282_DEV_ID))
if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
return MPP_F6281_MASK;
if (dev == MV88F6282_DEV_ID)
return MPP_F6282_MASK;
if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
return MPP_F6192_MASK;
if (dev == MV88F6180_DEV_ID)

View File

@ -20,11 +20,12 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "clock.h"
#include "clock36xx.h"
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
/**
* omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
@ -39,29 +40,28 @@
*/
int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
{
struct clk_hw_omap *parent;
struct clk_divider *parent;
struct clk_hw *parent_hw;
u32 dummy_v, orig_v, clksel_shift;
u32 dummy_v, orig_v;
int ret;
/* Clear PWRDN bit of HSDIVIDER */
ret = omap2_dflt_clk_enable(clk);
parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
parent = to_clk_hw_omap(parent_hw);
parent = to_clk_divider(parent_hw);
/* Restore the dividers */
if (!ret) {
clksel_shift = __ffs(parent->clksel_mask);
orig_v = __raw_readl(parent->clksel_reg);
orig_v = __raw_readl(parent->reg);
dummy_v = orig_v;
/* Write any other value different from the Read value */
dummy_v ^= (1 << clksel_shift);
__raw_writel(dummy_v, parent->clksel_reg);
dummy_v ^= (1 << parent->shift);
__raw_writel(dummy_v, parent->reg);
/* Write the original divider */
__raw_writel(orig_v, parent->clksel_reg);
__raw_writel(orig_v, parent->reg);
}
return ret;

View File

@ -2007,6 +2007,13 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
},
};
/* uart2 */
static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
{ .name = "tx", .dma_req = 28, },
{ .name = "rx", .dma_req = 29, },
{ .dma_req = -1 }
};
static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
{ .irq = 73 + OMAP_INTC_START, },
{ .irq = -1 },
@ -2018,7 +2025,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
.clkdm_name = "l4ls_clkdm",
.flags = HWMOD_SWSUP_SIDLE_ACT,
.mpu_irqs = am33xx_uart2_irqs,
.sdma_reqs = uart1_edma_reqs,
.sdma_reqs = uart2_edma_reqs,
.main_clk = "dpll_per_m2_div4_ck",
.prcm = {
.omap4 = {

View File

@ -546,8 +546,10 @@ static void __init prcm_setup_regs(void)
/* Clear any pending PRCM interrupts */
omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
if (omap3_has_iva())
omap3_iva_idle();
/*
* We need to idle iva2_pwrdm even on am3703 with no iva2.
*/
omap3_iva_idle();
omap3_d2d_idle();
}

View File

@ -101,8 +101,10 @@ static int __init sirfsoc_of_pwrc_init(void)
struct device_node *np;
np = of_find_matching_node(NULL, pwrc_ids);
if (!np)
panic("unable to find compatible pwrc node in dtb\n");
if (!np) {
pr_err("unable to find compatible sirf pwrc node in dtb\n");
return -ENOENT;
}
/*
* pwrc behind rtciobrg is not located in memory space

View File

@ -28,8 +28,10 @@ static int __init sirfsoc_of_rstc_init(void)
struct device_node *np;
np = of_find_matching_node(NULL, rstc_ids);
if (!np)
panic("unable to find compatible rstc node in dtb\n");
if (!np) {
pr_err("unable to find compatible sirf rstc node in dtb\n");
return -ENOENT;
}
sirfsoc_rstc_base = of_iomap(np, 0);
if (!sirfsoc_rstc_base)

View File

@ -16,6 +16,7 @@
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/serial_core.h>
#include <linux/io.h>
@ -261,7 +262,8 @@ static int s3c_pm_enter(suspend_state_t state)
* require a full power-cycle)
*/
if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
if (!of_have_populated_dt() &&
!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
!any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
printk(KERN_ERR "%s: Aborting sleep\n", __func__);
@ -270,8 +272,11 @@ static int s3c_pm_enter(suspend_state_t state)
/* save all necessary core registers not covered by the drivers */
samsung_pm_save_gpios();
samsung_pm_saved_gpios();
if (!of_have_populated_dt()) {
samsung_pm_save_gpios();
samsung_pm_saved_gpios();
}
s3c_pm_save_uarts();
s3c_pm_save_core();
@ -310,8 +315,11 @@ static int s3c_pm_enter(suspend_state_t state)
s3c_pm_restore_core();
s3c_pm_restore_uarts();
samsung_pm_restore_gpios();
s3c_pm_restored_gpios();
if (!of_have_populated_dt()) {
samsung_pm_restore_gpios();
s3c_pm_restored_gpios();
}
s3c_pm_debug_init();

View File

@ -117,7 +117,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if (! ((asid += ASID_INC) & ASID_MASK) ) {
if (cpu_has_vtag_icache)
flush_icache_all();
#ifdef CONFIG_VIRTUALIZATION
#ifdef CONFIG_KVM
kvm_local_flush_tlb_all(); /* start new asid cycle */
#else
local_flush_tlb_all(); /* start new asid cycle */

View File

@ -58,56 +58,53 @@ struct kvm_fpu {
* bits[2..0] - Register 'sel' index.
* bits[7..3] - Register 'rd' index.
* bits[15..8] - Must be zero.
* bits[63..16] - 1 -> CP0 registers.
* bits[31..16] - 1 -> CP0 registers.
* bits[51..32] - Must be zero.
* bits[63..52] - As per linux/kvm.h
*
* Other sets registers may be added in the future. Each set would
* have its own identifier in bits[63..16].
*
* The addr field of struct kvm_one_reg must point to an aligned
* 64-bit wide location. For registers that are narrower than
* 64-bits, the value is stored in the low order bits of the location,
* and sign extended to 64-bits.
* have its own identifier in bits[31..16].
*
* The registers defined in struct kvm_regs are also accessible, the
* id values for these are below.
*/
#define KVM_REG_MIPS_R0 0
#define KVM_REG_MIPS_R1 1
#define KVM_REG_MIPS_R2 2
#define KVM_REG_MIPS_R3 3
#define KVM_REG_MIPS_R4 4
#define KVM_REG_MIPS_R5 5
#define KVM_REG_MIPS_R6 6
#define KVM_REG_MIPS_R7 7
#define KVM_REG_MIPS_R8 8
#define KVM_REG_MIPS_R9 9
#define KVM_REG_MIPS_R10 10
#define KVM_REG_MIPS_R11 11
#define KVM_REG_MIPS_R12 12
#define KVM_REG_MIPS_R13 13
#define KVM_REG_MIPS_R14 14
#define KVM_REG_MIPS_R15 15
#define KVM_REG_MIPS_R16 16
#define KVM_REG_MIPS_R17 17
#define KVM_REG_MIPS_R18 18
#define KVM_REG_MIPS_R19 19
#define KVM_REG_MIPS_R20 20
#define KVM_REG_MIPS_R21 21
#define KVM_REG_MIPS_R22 22
#define KVM_REG_MIPS_R23 23
#define KVM_REG_MIPS_R24 24
#define KVM_REG_MIPS_R25 25
#define KVM_REG_MIPS_R26 26
#define KVM_REG_MIPS_R27 27
#define KVM_REG_MIPS_R28 28
#define KVM_REG_MIPS_R29 29
#define KVM_REG_MIPS_R30 30
#define KVM_REG_MIPS_R31 31
#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
#define KVM_REG_MIPS_HI 32
#define KVM_REG_MIPS_LO 33
#define KVM_REG_MIPS_PC 34
#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
/*
* KVM MIPS specific structures and definitions

View File

@ -25,12 +25,16 @@
#define MCOUNT_OFFSET_INSNS 4
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/* Arch override because MIPS doesn't need to run this from stop_machine() */
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
#endif
/*
* Check if the address is in kernel space
*

View File

@ -93,26 +93,27 @@ static void rm7k_wait_irqoff(void)
}
/*
* The Au1xxx wait is available only if using 32khz counter or
* external timer source, but specifically not CP0 Counter.
* alchemy/common/time.c may override cpu_wait!
* Au1 'wait' is only useful when the 32kHz counter is used as timer,
* since coreclock (and the cp0 counter) stops upon executing it. Only an
* interrupt can wake it, so they must be enabled before entering idle modes.
*/
static void au1k_wait(void)
{
unsigned long c0status = read_c0_status() | 1; /* irqs on */
__asm__(
" .set mips3 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
" sync \n"
" nop \n"
" mtc0 %1, $12 \n" /* wr c0status */
" wait \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" .set mips0 \n"
: : "r" (au1k_wait));
local_irq_enable();
: : "r" (au1k_wait), "r" (c0status));
}
static int __initdata nowait;

View File

@ -485,29 +485,35 @@ kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
#define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0)
#define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0)
#define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0)
#define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0)
#define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2)
#define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0)
#define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1)
#define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0)
#define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0)
#define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0)
#define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0)
#define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0)
#define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0)
#define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0)
#define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0)
#define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1)
#define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0)
#define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1)
#define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2)
#define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3)
#define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7)
#define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0)
#define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0)
#define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
#define MIPS_CP0_64(_R, _S) \
(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_R0,
@ -567,8 +573,6 @@ static u64 kvm_mips_get_one_regs[] = {
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
u64 __user *uaddr = (u64 __user *)(long)reg->addr;
struct mips_coproc *cop0 = vcpu->arch.cop0;
s64 v;
@ -631,18 +635,39 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
default:
return -EINVAL;
}
return put_user(v, uaddr);
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
return put_user(v, uaddr64);
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
u32 v32 = (u32)v;
return put_user(v32, uaddr32);
} else {
return -EINVAL;
}
}
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
u64 __user *uaddr = (u64 __user *)(long)reg->addr;
struct mips_coproc *cop0 = vcpu->arch.cop0;
u64 v;
if (get_user(v, uaddr) != 0)
return -EFAULT;
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
if (get_user(v, uaddr64) != 0)
return -EFAULT;
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
s32 v32;
if (get_user(v32, uaddr32) != 0)
return -EFAULT;
v = (s64)v32;
} else {
return -EINVAL;
}
switch (reg->id) {
case KVM_REG_MIPS_R0:

View File

@ -176,6 +176,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#ifndef __ASSEMBLY__
@ -394,19 +395,20 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
CPU_FTR_HVMODE)
CPU_FTR_HVMODE | CPU_FTR_DABRX)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
CPU_FTR_DABRX)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@ -415,7 +417,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR)
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@ -430,14 +432,15 @@ extern const char *powerpc_base_platform;
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
CPU_FTR_UNALIGNED_LD_STD)
CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_PURR | CPU_FTR_REAL_LE)
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
CPU_FTR_ICSWX | CPU_FTR_DABRX )
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E

View File

@ -513,7 +513,7 @@ label##_common: \
*/
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,

View File

@ -54,8 +54,16 @@
#define BOOKE_INTERRUPT_DEBUG 15
/* E500 */
#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
#define BOOKE_INTERRUPT_SPE_FP_DATA 33
#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
/*
* TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
*/
#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define BOOKE_INTERRUPT_DOORBELL 36
@ -67,10 +75,6 @@
#define BOOKE_INTERRUPT_HV_SYSCALL 40
#define BOOKE_INTERRUPT_HV_PRIV 41
/* altivec */
#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42
#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43
/* book3s */
#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100

View File

@ -452,8 +452,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = 0,
.oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.platform = "power8",
@ -506,8 +506,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = 0,
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = "ppc64/power8",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.platform = "power8",

View File

@ -465,20 +465,6 @@ BEGIN_FTR_SECTION
std r0, THREAD_EBBHR(r3)
mfspr r0, SPRN_EBBRR
std r0, THREAD_EBBRR(r3)
/* PMU registers made user read/(write) by EBB */
mfspr r0, SPRN_SIAR
std r0, THREAD_SIAR(r3)
mfspr r0, SPRN_SDAR
std r0, THREAD_SDAR(r3)
mfspr r0, SPRN_SIER
std r0, THREAD_SIER(r3)
mfspr r0, SPRN_MMCR0
std r0, THREAD_MMCR0(r3)
mfspr r0, SPRN_MMCR2
std r0, THREAD_MMCR2(r3)
mfspr r0, SPRN_MMCRA
std r0, THREAD_MMCRA(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
#endif
@ -581,20 +567,6 @@ BEGIN_FTR_SECTION
ld r0, THREAD_EBBRR(r4)
mtspr SPRN_EBBRR, r0
/* PMU registers made user read/(write) by EBB */
ld r0, THREAD_SIAR(r4)
mtspr SPRN_SIAR, r0
ld r0, THREAD_SDAR(r4)
mtspr SPRN_SDAR, r0
ld r0, THREAD_SIER(r4)
mtspr SPRN_SIER, r0
ld r0, THREAD_MMCR0(r4)
mtspr SPRN_MMCR0, r0
ld r0, THREAD_MMCR2(r4)
mtspr SPRN_MMCR2, r0
ld r0, THREAD_MMCRA(r4)
mtspr SPRN_MMCRA, r0
ld r0,THREAD_TAR(r4)
mtspr SPRN_TAR,r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)

View File

@ -454,38 +454,14 @@ BEGIN_FTR_SECTION
xori r10,r10,(MSR_FE0|MSR_FE1)
mtmsrd r10
sync
fmr 0,0
fmr 1,1
fmr 2,2
fmr 3,3
fmr 4,4
fmr 5,5
fmr 6,6
fmr 7,7
fmr 8,8
fmr 9,9
fmr 10,10
fmr 11,11
fmr 12,12
fmr 13,13
fmr 14,14
fmr 15,15
fmr 16,16
fmr 17,17
fmr 18,18
fmr 19,19
fmr 20,20
fmr 21,21
fmr 22,22
fmr 23,23
fmr 24,24
fmr 25,25
fmr 26,26
fmr 27,27
fmr 28,28
fmr 29,29
fmr 30,30
fmr 31,31
#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
#define FMR4(n) FMR2(n) ; FMR2(n+2)
#define FMR8(n) FMR4(n) ; FMR4(n+4)
#define FMR16(n) FMR8(n) ; FMR8(n+8)
#define FMR32(n) FMR16(n) ; FMR16(n+16)
FMR32(0)
FTR_SECTION_ELSE
/*
* To denormalise we need to move a copy of the register to itself.
@ -495,39 +471,25 @@ FTR_SECTION_ELSE
oris r10,r10,MSR_VSX@h
mtmsrd r10
sync
XVCPSGNDP(0,0,0)
XVCPSGNDP(1,1,1)
XVCPSGNDP(2,2,2)
XVCPSGNDP(3,3,3)
XVCPSGNDP(4,4,4)
XVCPSGNDP(5,5,5)
XVCPSGNDP(6,6,6)
XVCPSGNDP(7,7,7)
XVCPSGNDP(8,8,8)
XVCPSGNDP(9,9,9)
XVCPSGNDP(10,10,10)
XVCPSGNDP(11,11,11)
XVCPSGNDP(12,12,12)
XVCPSGNDP(13,13,13)
XVCPSGNDP(14,14,14)
XVCPSGNDP(15,15,15)
XVCPSGNDP(16,16,16)
XVCPSGNDP(17,17,17)
XVCPSGNDP(18,18,18)
XVCPSGNDP(19,19,19)
XVCPSGNDP(20,20,20)
XVCPSGNDP(21,21,21)
XVCPSGNDP(22,22,22)
XVCPSGNDP(23,23,23)
XVCPSGNDP(24,24,24)
XVCPSGNDP(25,25,25)
XVCPSGNDP(26,26,26)
XVCPSGNDP(27,27,27)
XVCPSGNDP(28,28,28)
XVCPSGNDP(29,29,29)
XVCPSGNDP(30,30,30)
XVCPSGNDP(31,31,31)
#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
XVCPSGNDP32(0)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
b denorm_done
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/*
* To denormalise we need to move a copy of the register to itself.
* For POWER8 we need to do that for all 64 VSX registers
*/
XVCPSGNDP32(32)
denorm_done:
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
@ -721,7 +683,7 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
#ifdef CONFIG_PPC_DOORBELL
STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)

View File

@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void)
* in case we also had a rollover while hard disabled
*/
local_paca->irq_happened &= ~PACA_IRQ_DEC;
if (decrementer_check_overflow())
if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
return 0x900;
/* Finally check if an external interrupt happened */

View File

@ -827,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
struct pci_bus_region reg;
if (!res->flags)
continue;
@ -835,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
* at 0 as unset as well, except if PCI_PROBE_ONLY is also set
* since in that case, we don't want to re-assign anything
*/
pcibios_resource_to_bus(dev, &reg, res);
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
(res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
(reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
/* Only print message if not re-assigning */
if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "

View File

@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
{
mtspr(SPRN_DABR, dabr);
mtspr(SPRN_DABRX, dabrx);
if (cpu_has_feature(CPU_FTR_DABRX))
mtspr(SPRN_DABRX, dabrx);
return 0;
}
#else
@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
#ifdef CONFIG_PPC64
/* Called with hard IRQs off */
void __ppc64_runlatch_on(void)
void notrace __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void)
}
/* Called with hard IRQs off */
void __ppc64_runlatch_off(void)
void notrace __ppc64_runlatch_off(void)
{
struct thread_info *ti = current_thread_info();
unsigned long ctrl;

View File

@ -1165,6 +1165,16 @@ bail:
exception_exit(prev_state);
}
/*
* This occurs when running in hypervisor mode on POWER6 or later
* and an illegal instruction is encountered.
*/
void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
{
regs->msr |= REASON_ILLEGAL;
program_check_exception(regs);
}
void alignment_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();

View File

@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe;
unsigned int gtlb_index;
int idx;
gtlb_index = kvmppc_get_gpr(vcpu, ra);
if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
return EMULATE_FAIL;
}
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (tlbe_is_host_safe(vcpu, tlbe)) {
gva_t eaddr;
gpa_t gpaddr;
@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
tlbe->word2);

View File

@ -832,6 +832,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int r = RESUME_HOST;
int s;
int idx;
#ifdef CONFIG_PPC64
WARN_ON(local_paca->irq_happened != 0);
#endif
/*
* We enter with interrupts disabled in hardware, but
* we need to call hard_irq_disable anyway to ensure that
* the software state is kept in sync.
*/
hard_irq_disable();
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@ -1053,6 +1065,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;
@ -1075,6 +1089,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
@ -1098,6 +1113,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;
@ -1114,6 +1131,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}

View File

@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel;
int recal = 0;
int idx;
tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel);
@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_set_tlb1map_range(vcpu, gtlbe);
}
idx = srcu_read_lock(&vcpu->kvm->srcu);
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr = get_tlb_eaddr(gtlbe);
@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
return EMULATE_DONE;
}

View File

@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
r = 0;
else
r = -ENOTSUPP;

View File

@ -1758,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
}
}
}
if ((!found) && printk_ratelimit())
if (!found && !nmi && printk_ratelimit())
printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*

View File

@ -83,7 +83,11 @@ static int pseries_eeh_init(void)
ibm_configure_pe = rtas_token("ibm,configure-pe");
ibm_configure_bridge = rtas_token("ibm,configure-bridge");
/* necessary sanity check */
/*
* Necessary sanity check. We needn't check "get-config-addr-info"
* and its variant since the old firmware probably support address
* of domain/bus/slot/function for EEH RTAS operations.
*/
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
__func__);
@ -102,12 +106,6 @@ static int pseries_eeh_init(void)
pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
__func__);
return -EINVAL;
} else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
"<ibm,get-config-addr-info> invalid\n",
__func__);
return -EINVAL;
} else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
pr_warning("%s: RTAS service <ibm,configure-pe> and "

View File

@ -623,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep)
" csg %0,%1,%2\n"
" jl 0b\n"
: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
: "Q" (ptep[PTRS_PER_PTE]) : "cc");
: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
#endif
return __pgste(new);
}
@ -635,11 +635,19 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
" nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
" stg %1,%0\n"
: "=Q" (ptep[PTRS_PER_PTE])
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
: "cc", "memory");
preempt_enable();
#endif
}
static inline void pgste_set(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
#endif
}
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
@ -704,17 +712,19 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
{
#ifdef CONFIG_PGSTE
unsigned long address;
unsigned long okey, nkey;
unsigned long nkey;
if (pte_val(entry) & _PAGE_INVALID)
return;
VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
address = pte_val(entry) & PAGE_MASK;
okey = nkey = page_get_storage_key(address);
nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set page access key and fetch protection bit from pgste */
nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
if (okey != nkey)
page_set_storage_key(address, nkey, 0);
/*
* Set page access key and fetch protection bit from pgste.
* The guest C/R information is still in the PGSTE, set real
* key C/R to 0.
*/
nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
page_set_storage_key(address, nkey, 0);
#endif
}
@ -1099,8 +1109,10 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
if (mm_has_pgste(mm))
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
pgste_set(ptep, pgste);
}
return pte;
}

View File

@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
static void show_trace(struct task_struct *task, unsigned long *stack)
{
const unsigned long frame_size =
STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
register unsigned long __r15 asm ("15");
unsigned long sp;
@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
sp = task ? task->thread.ksp : __r15;
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK
sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
S390_lowcore.panic_stack);
sp = __show_trace(sp,
S390_lowcore.panic_stack + frame_size - 4096,
S390_lowcore.panic_stack + frame_size);
#endif
sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
sp = __show_trace(sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
if (task)
__show_trace(sp, (unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);

View File

@ -311,3 +311,67 @@ void measurement_alert_subclass_unregister(void)
spin_unlock(&ma_subclass_lock);
}
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
void synchronize_irq(unsigned int irq)
{
/*
* Not needed, the handler is protected by a lock and IRQs that occur
* after the handler is deleted are just NOPs.
*/
}
EXPORT_SYMBOL_GPL(synchronize_irq);
#ifndef CONFIG_PCI
/* Only PCI devices have dynamically-defined IRQ handlers */
int request_irq(unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
return -EINVAL;
}
EXPORT_SYMBOL_GPL(request_irq);
void free_irq(unsigned int irq, void *dev_id)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(free_irq);
void enable_irq(unsigned int irq)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(enable_irq);
void disable_irq(unsigned int irq)
{
WARN_ON(1);
}
EXPORT_SYMBOL_GPL(disable_irq);
#endif /* !CONFIG_PCI */
void disable_irq_nosync(unsigned int irq)
{
disable_irq(irq);
}
EXPORT_SYMBOL_GPL(disable_irq_nosync);
unsigned long probe_irq_on(void)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_on);
int probe_irq_off(unsigned long val)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_off);
unsigned int probe_irq_mask(unsigned long val)
{
return val;
}
EXPORT_SYMBOL_GPL(probe_irq_mask);

View File

@ -225,7 +225,7 @@ _sclp_print:
ahi %r2,1
ltr %r0,%r0 # end of string?
jz .LfinalizemtoS4
chi %r0,0x15 # end of line (NL)?
chi %r0,0x0a # end of line (NL)?
jz .LfinalizemtoS4
stc %r0,0(%r6,%r7) # copy to mto
la %r11,0(%r6,%r7)

View File

@ -302,15 +302,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
return rc;
}
void synchronize_irq(unsigned int irq)
{
/*
* Not needed, the handler is protected by a lock and IRQs that occur
* after the handler is deleted are just NOPs.
*/
}
EXPORT_SYMBOL_GPL(synchronize_irq);
void enable_irq(unsigned int irq)
{
struct msi_desc *msi = irq_get_msi_desc(irq);
@ -327,30 +318,6 @@ void disable_irq(unsigned int irq)
}
EXPORT_SYMBOL_GPL(disable_irq);
void disable_irq_nosync(unsigned int irq)
{
disable_irq(irq);
}
EXPORT_SYMBOL_GPL(disable_irq_nosync);
unsigned long probe_irq_on(void)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_on);
int probe_irq_off(unsigned long val)
{
return 0;
}
EXPORT_SYMBOL_GPL(probe_irq_off);
unsigned int probe_irq_mask(unsigned long val)
{
return val;
}
EXPORT_SYMBOL_GPL(probe_irq_mask);
void pcibios_fixup_bus(struct pci_bus *bus)
{
}

View File

@ -54,6 +54,7 @@ EXPORT_SYMBOL(of_set_property_mutex);
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
{
struct property **prevp;
unsigned long flags;
void *new_val;
int err;
@ -64,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
raw_spin_lock(&devtree_lock);
raw_spin_lock_irqsave(&devtree_lock, flags);
prevp = &dp->properties;
while (*prevp) {
struct property *prop = *prevp;
@ -91,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
}
prevp = &(*prevp)->next;
}
raw_spin_unlock(&devtree_lock);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
mutex_unlock(&of_set_property_mutex);
/* XXX Upate procfs if necessary... */

View File

@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
*size = len;
}
static efi_status_t setup_efi_vars(struct boot_params *params)
{
struct setup_data *data;
struct efi_var_bootdata *efidata;
u64 store_size, remaining_size, var_size;
efi_status_t status;
if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
&remaining_size, &var_size);
if (status != EFI_SUCCESS)
return status;
status = efi_call_phys3(sys_table->boottime->allocate_pool,
EFI_LOADER_DATA, sizeof(*efidata), &efidata);
if (status != EFI_SUCCESS)
return status;
efidata->data.type = SETUP_EFI_VARS;
efidata->data.len = sizeof(struct efi_var_bootdata) -
sizeof(struct setup_data);
efidata->data.next = 0;
efidata->store_size = store_size;
efidata->remaining_size = remaining_size;
efidata->max_var_size = var_size;
if (data)
data->next = (unsigned long)efidata;
else
params->hdr.setup_data = (unsigned long)efidata;
}
static efi_status_t setup_efi_pci(struct boot_params *params)
{
efi_pci_io_protocol *pci;
@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
setup_graphics(boot_params);
setup_efi_vars(boot_params);
setup_efi_pci(boot_params);
status = efi_call_phys3(sys_table->boottime->allocate_pool,

View File

@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
struct efi_var_bootdata {
struct setup_data data;
u64 store_size;
u64 remaining_size;
u64 max_var_size;
};
#ifdef CONFIG_EFI
static inline bool efi_is_native(void)

View File

@ -6,7 +6,6 @@
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
#define SETUP_PCI 3
#define SETUP_EFI_VARS 4
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF

View File

@ -160,7 +160,7 @@ identity_mapped:
xorq %rbp, %rbp
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r9
xorq %r10, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13

View File

@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
end_pfn = limit_pfn;
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
if (!after_bootmem)
adjust_range_page_size_mask(mr, nr_range);
/* try to merge same page size and continuous */
for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
unsigned long old_start;
@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
nr_range--;
}
if (!after_bootmem)
adjust_range_page_size_mask(mr, nr_range);
for (i = 0; i < nr_range; i++)
printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
mr[i].start, mr[i].end - 1,

View File

@ -42,7 +42,6 @@
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/bcd.h>
#include <linux/ucs2_string.h>
#include <asm/setup.h>
#include <asm/efi.h>
@ -54,12 +53,12 @@
#define EFI_DEBUG 1
/*
* There's some additional metadata associated with each
* variable. Intel's reference implementation is 60 bytes - bump that
* to account for potential alignment constraints
*/
#define VAR_METADATA_SIZE 64
#define EFI_MIN_RESERVE 5120
#define EFI_DUMMY_GUID \
EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
struct efi __read_mostly efi = {
.mps = EFI_INVALID_TABLE_ADDR,
@ -79,13 +78,6 @@ struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
static u64 efi_var_store_size;
static u64 efi_var_remaining_size;
static u64 efi_var_max_var_size;
static u64 boot_used_size;
static u64 boot_var_size;
static u64 active_size;
unsigned long x86_efi_facility;
/*
@ -188,53 +180,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
efi_status_t status;
static bool finished = false;
static u64 var_size;
status = efi_call_virt3(get_next_variable,
name_size, name, vendor);
if (status == EFI_NOT_FOUND) {
finished = true;
if (var_size < boot_used_size) {
boot_var_size = boot_used_size - var_size;
active_size += boot_var_size;
} else {
printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
}
}
if (boot_used_size && !finished) {
unsigned long size = 0;
u32 attr;
efi_status_t s;
void *tmp;
s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
if (s != EFI_BUFFER_TOO_SMALL || !size)
return status;
tmp = kmalloc(size, GFP_ATOMIC);
if (!tmp)
return status;
s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
var_size += size;
var_size += ucs2_strsize(name, 1024);
active_size += size;
active_size += VAR_METADATA_SIZE;
active_size += ucs2_strsize(name, 1024);
}
kfree(tmp);
}
return status;
return efi_call_virt3(get_next_variable,
name_size, name, vendor);
}
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@ -243,34 +190,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
unsigned long data_size,
void *data)
{
efi_status_t status;
u32 orig_attr = 0;
unsigned long orig_size = 0;
status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
NULL);
if (status != EFI_BUFFER_TOO_SMALL)
orig_size = 0;
status = efi_call_virt5(set_variable,
name, vendor, attr,
data_size, data);
if (status == EFI_SUCCESS) {
if (orig_size) {
active_size -= orig_size;
active_size -= ucs2_strsize(name, 1024);
active_size -= VAR_METADATA_SIZE;
}
if (data_size) {
active_size += data_size;
active_size += ucs2_strsize(name, 1024);
active_size += VAR_METADATA_SIZE;
}
}
return status;
return efi_call_virt5(set_variable,
name, vendor, attr,
data_size, data);
}
static efi_status_t virt_efi_query_variable_info(u32 attr,
@ -786,9 +708,6 @@ void __init efi_init(void)
char vendor[100] = "unknown";
int i = 0;
void *tmp;
struct setup_data *data;
struct efi_var_bootdata *efi_var_data;
u64 pa_data;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
@ -806,22 +725,6 @@ void __init efi_init(void)
if (efi_systab_init(efi_phys.systab))
return;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
data = early_ioremap(pa_data, sizeof(*efi_var_data));
if (data->type == SETUP_EFI_VARS) {
efi_var_data = (struct efi_var_bootdata *)data;
efi_var_store_size = efi_var_data->store_size;
efi_var_remaining_size = efi_var_data->remaining_size;
efi_var_max_var_size = efi_var_data->max_var_size;
}
pa_data = data->next;
early_iounmap(data, sizeof(*efi_var_data));
}
boot_used_size = efi_var_store_size - efi_var_remaining_size;
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
/*
@ -1085,6 +988,13 @@ void __init efi_enter_virtual_mode(void)
runtime_code_page_mkexec();
kfree(new_memmap);
/* clean DUMMY object */
efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS,
0, NULL);
}
/*
@ -1136,33 +1046,65 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
efi_status_t status;
u64 storage_size, remaining_size, max_size;
if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
return 0;
status = efi.query_variable_info(attributes, &storage_size,
&remaining_size, &max_size);
if (status != EFI_SUCCESS)
return status;
if (!max_size && remaining_size > size)
printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
" is returning MaxVariableSize=0\n");
/*
* Some firmware implementations refuse to boot if there's insufficient
* space in the variable store. We account for that by refusing the
* write if permitting it would reduce the available space to under
* 50%. However, some firmware won't reclaim variable space until
* after the used (not merely the actively used) space drops below
* a threshold. We can approximate that case with the value calculated
* above. If both the firmware and our calculations indicate that the
* available space would drop below 50%, refuse the write.
* 5KB. This figure was provided by Samsung, so should be safe.
*/
if ((remaining_size - size < EFI_MIN_RESERVE) &&
!efi_no_storage_paranoia) {
if (!storage_size || size > remaining_size ||
(max_size && size > max_size))
return EFI_OUT_OF_RESOURCES;
/*
* Triggering garbage collection may require that the firmware
* generate a real EFI_OUT_OF_RESOURCES error. We can force
* that by attempting to use more space than is available.
*/
unsigned long dummy_size = remaining_size + 1024;
void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
if (!efi_no_storage_paranoia &&
((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
(remaining_size - size < storage_size / 2)))
return EFI_OUT_OF_RESOURCES;
status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS,
dummy_size, dummy);
if (status == EFI_SUCCESS) {
/*
* This should have failed, so if it didn't make sure
* that we delete it...
*/
efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS,
0, dummy);
}
/*
* The runtime code may now have triggered a garbage collection
* run, so check the variable info again
*/
status = efi.query_variable_info(attributes, &storage_size,
&remaining_size, &max_size);
if (status != EFI_SUCCESS)
return status;
/*
* There still isn't enough room, so return an error
*/
if (remaining_size - size < EFI_MIN_RESERVE)
return EFI_OUT_OF_RESOURCES;
}
return EFI_SUCCESS;
}

View File

@ -42,9 +42,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|"
"VDSO|"
#if ELF_BITS == 64
"__vvar_page|"
#endif
"__crc_)",
/*
@ -72,6 +69,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__per_cpu_load|"
"init_per_cpu__.*|"
"__end_rodata_hpage_align|"
"__vvar_page|"
#endif
"_end)$"
};

View File

@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/irq_work.h>
#include <linux/tick.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
/*
* commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
* clears certain data that the cpu_idle loop (which called us
* and that we return from) expects. The only way to get that
* data back is to call:
*/
tick_nohz_idle_enter();
}
#else /* !CONFIG_HOTPLUG_CPU */

View File

@ -3164,7 +3164,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
q->rpm_status = RPM_ACTIVE;
__blk_run_queue(q);
pm_runtime_mark_last_busy(q->dev);
pm_runtime_autosuspend(q->dev);
pm_request_autosuspend(q->dev);
} else {
q->rpm_status = RPM_SUSPENDED;
}

View File

@ -823,6 +823,7 @@ config CRYPTO_BLOWFISH_X86_64
config CRYPTO_BLOWFISH_AVX2_X86_64
tristate "Blowfish cipher algorithm (x86_64/AVX2)"
depends on X86 && 64BIT
depends on BROKEN
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
@ -1299,6 +1300,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
config CRYPTO_TWOFISH_AVX2_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX2)"
depends on X86 && 64BIT
depends on BROKEN
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86

View File

@ -1017,11 +1017,8 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
return -ENOSYS;
result = driver->ops.add(device);
if (result) {
device->driver = NULL;
device->driver_data = NULL;
if (result)
return result;
}
device->driver = driver;

View File

@ -1722,6 +1722,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
int error;
acpi_status status;
if (device->handler)
return -EINVAL;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
device->parent->handle, 1,
acpi_video_bus_match, NULL,

View File

@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
int registers = 0;
int this_registers, average;
map->lock(map);
map->lock(map->lock_arg);
mem_size = sizeof(*rbtree_ctx);
mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
nodes, registers, average, mem_size);
map->unlock(map);
map->unlock(map->lock_arg);
return 0;
}
@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
if (rbnode->base_reg < min)
continue;
if (rbnode->base_reg > max)
break;
if (rbnode->base_reg + rbnode->blklen < min)

View File

@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map)
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
map->lock(map);
map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
@ -306,7 +306,7 @@ out:
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
map->cache_bypass = bypass;
map->unlock(map);
map->unlock(map->lock_arg);
return ret;
}
@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
map->lock(map);
map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
@ -352,7 +352,7 @@ out:
trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
map->unlock(map);
map->unlock(map->lock_arg);
return ret;
}
@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
map->lock(map);
map->lock(map->lock_arg);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
map->unlock(map);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
*/
void regcache_mark_dirty(struct regmap *map)
{
map->lock(map);
map->lock(map->lock_arg);
map->cache_dirty = true;
map->unlock(map);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
map->lock(map);
map->lock(map->lock_arg);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
map->unlock(map);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);

View File

@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file,
char *start = buf;
unsigned long reg, value;
struct regmap *map = file->private_data;
int ret;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file,
/* Userspace has been fiddling around behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
regmap_write(map, reg, value);
ret = regmap_write(map, reg, value);
if (ret < 0)
return ret;
return buf_size;
}
#else

View File

@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
static int cciss_open(struct block_device *bdev, fmode_t mode);
static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
static void cciss_release(struct gendisk *disk, fmode_t mode);
static int do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = {
.owner = THIS_MODULE,
.open = cciss_unlocked_open,
.release = cciss_release,
.ioctl = do_ioctl,
.ioctl = cciss_ioctl,
.getgeo = cciss_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = cciss_compat_ioctl,
@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&cciss_mutex);
}
static int do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
int ret;
mutex_lock(&cciss_mutex);
ret = cciss_ioctl(bdev, mode, cmd, arg);
mutex_unlock(&cciss_mutex);
return ret;
}
#ifdef CONFIG_COMPAT
static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
case CCISS_REGNEWD:
case CCISS_RESCANDISK:
case CCISS_GETLUNINFO:
return do_ioctl(bdev, mode, cmd, arg);
return cciss_ioctl(bdev, mode, cmd, arg);
case CCISS_PASSTHRU32:
return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
if (err)
return -EFAULT;
err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
if (err)
return -EFAULT;
err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
{
cciss_coalint_struct intinfo;
unsigned long flags;
if (!argp)
return -EINVAL;
spin_lock_irqsave(&h->lock, flags);
intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user
(argp, &intinfo, sizeof(cciss_coalint_struct)))
return -EFAULT;
@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
{
NodeName_type NodeName;
unsigned long flags;
int i;
if (!argp)
return -EINVAL;
spin_lock_irqsave(&h->lock, flags);
for (i = 0; i < 16; i++)
NodeName[i] = readb(&h->cfgtable->ServerName[i]);
spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
return -EFAULT;
return 0;
@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
{
Heartbeat_type heartbeat;
unsigned long flags;
if (!argp)
return -EINVAL;
spin_lock_irqsave(&h->lock, flags);
heartbeat = readl(&h->cfgtable->HeartBeat);
spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
return -EFAULT;
return 0;
@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
{
BusTypes_type BusTypes;
unsigned long flags;
if (!argp)
return -EINVAL;
spin_lock_irqsave(&h->lock, flags);
BusTypes = readl(&h->cfgtable->BusTypes);
spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
return -EFAULT;
return 0;

View File

@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
static void mtip_hw_debugfs_exit(struct driver_data *dd)
{
debugfs_remove_recursive(dd->dfs_node);
if (dd->dfs_node)
debugfs_remove_recursive(dd->dfs_node);
}
@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
struct bio_vec *bvec;
int nents = 0;
int i, nents = 0;
int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
/* Create the scatter list for this bio. */
bio_for_each_segment(bvec, bio, nents) {
bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg[nents],
bvec->bv_page,
bvec->bv_len,
bvec->bv_offset);
nents++;
}
/* Issue the read/write. */

View File

@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct nvme_command *cmnd;
struct nvme_iod *iod;
enum dma_data_direction dma_dir;
int cmdid, length, result = -ENOMEM;
int cmdid, length, result;
u16 control;
u32 dsmgmt;
int psegs = bio_phys_segments(ns->queue, bio);
@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return result;
}
result = -ENOMEM;
iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
if (timeout && !time_after(now, info[cmdid].timeout))
continue;
if (info[cmdid].ctx == CMD_CTX_CANCELLED)
continue;
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
if (addr & 3)
return ERR_PTR(-EINVAL);
if (!length)
if (!length || length > INT_MAX - PAGE_SIZE)
return ERR_PTR(-EINVAL);
offset = offset_in_page(addr);
@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
sg_init_table(sg, count);
for (i = 0; i < count; i++) {
sg_set_page(&sg[i], pages[i],
min_t(int, length, PAGE_SIZE - offset), offset);
min_t(unsigned, length, PAGE_SIZE - offset),
offset);
length -= (PAGE_SIZE - offset);
offset = 0;
}
@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
nvme_free_iod(dev, iod);
}
if (!status && copy_to_user(&ucmd->result, &cmd.result,
if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
sizeof(cmd.result)))
status = -EFAULT;
@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
q_count = nr_io_queues;
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, dev->queues[0]);
db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
if (db_bar_size > 8192) {
iounmap(dev->bar);
dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
db_bar_size);
dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs;
}
@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
for (;;) {
result = pci_enable_msix(dev->pci_dev, dev->entry,
nr_io_queues);
result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
if (result == 0) {
break;
} else if (result > 0) {
nr_io_queues = result;
continue;
} else {
nr_io_queues = 1;
nr_io_queues = 0;
break;
}
}
if (nr_io_queues == 0) {
nr_io_queues = q_count;
for (;;) {
result = pci_enable_msi_block(pdev, nr_io_queues);
if (result == 0) {
for (i = 0; i < nr_io_queues; i++)
dev->entry[i].vector = i + pdev->irq;
break;
} else if (result > 0) {
nr_io_queues = result;
continue;
} else {
nr_io_queues = 1;
break;
}
}
}
result = queue_request_irq(dev, dev->queues[0], "nvme admin");
/* XXX: handle failure here */
@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_dev_remove(dev);
pci_disable_msix(dev->pci_dev);
if (dev->pci_dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
else if (dev->pci_dev->msix_enabled)
pci_disable_msix(dev->pci_dev);
iounmap(dev->bar);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
else
goto disable;
result = nvme_set_instance(dev);
if (result)
goto disable;
@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
unmap:
iounmap(dev->bar);
disable_msix:
pci_disable_msix(pdev);
if (dev->pci_dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
else if (dev->pci_dev->msix_enabled)
pci_disable_msix(dev->pci_dev);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
disable:

View File

@ -44,7 +44,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/version.h>
#include <scsi/sg.h>
#include <scsi/scsi.h>
@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
}
}
static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *mode_page, u8 page_code)
{
int res = SNTI_TRANSLATION_SUCCESS;

View File

@ -83,7 +83,8 @@
#define MAX_SPEED 0xffff
#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
~(sector_t)((pd)->settings.size - 1))
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];

View File

@ -519,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
};
/*
* Initialize an rbd client instance.
* We own *ceph_opts.
* Initialize an rbd client instance. Success or not, this function
* consumes ceph_opts.
*/
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
{
@ -675,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
/*
* Get a ceph client with specific addr and configuration, if one does
* not exist create it.
* not exist create it. Either way, ceph_opts is consumed by this
* function.
*/
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
@ -4697,8 +4698,10 @@ out:
return ret;
}
/* Undo whatever state changes are made by v1 or v2 image probe */
/*
* Undo whatever state changes are made by v1 or v2 header info
* call.
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
@ -4902,9 +4905,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
int tmp;
/*
* Get the id from the image id object. If it's not a
* format 2 image, we'll get ENOENT back, and we'll assume
* it's a format 1 image.
* Get the id from the image id object. Unless there's an
* error, rbd_dev->spec->image_id will be filled in with
* a dynamically-allocated string, and rbd_dev->image_format
* will be set to either 1 or 2.
*/
ret = rbd_dev_image_id(rbd_dev);
if (ret)
@ -4992,7 +4996,6 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = PTR_ERR(rbdc);
goto err_out_args;
}
ceph_opts = NULL; /* rbd_dev client now owns this */
/* pick the pool */
osdc = &rbdc->client->osdc;
@ -5027,18 +5030,18 @@ static ssize_t rbd_add(struct bus_type *bus,
rbd_dev->mapping.read_only = read_only;
rc = rbd_dev_device_setup(rbd_dev);
if (!rc)
return count;
if (rc) {
rbd_dev_image_release(rbd_dev);
goto err_out_module;
}
return count;
rbd_dev_image_release(rbd_dev);
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
err_out_args:
if (ceph_opts)
ceph_destroy_options(ceph_opts);
kfree(rbd_opts);
rbd_spec_put(spec);
err_out_module:
module_put(THIS_MODULE);

View File

@ -201,7 +201,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
Marvell Bluetooth devices, such as 8688/8787/8797.
Marvell Bluetooth devices, such as 8688/8787/8797/8897.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@ -214,7 +214,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
devices with SDIO interface. Currently SD8688/SD8787/SD8797
devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver

View File

@ -82,6 +82,23 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.io_port_2 = 0x7a,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
.card_status = 0x50,
.sq_read_base_addr_a0 = 0x60,
.sq_read_base_addr_a1 = 0x61,
.card_revision = 0xbc,
.card_fw_status0 = 0xc0,
.card_fw_status1 = 0xc1,
.card_rx_len = 0xc2,
.card_rx_unit = 0xc3,
.io_port_0 = 0xd8,
.io_port_1 = 0xd9,
.io_port_2 = 0xda,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
@ -103,6 +120,13 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.sd_blksz_fw_dl = 256,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.helper = NULL,
.firmware = "mrvl/sd8897_uapsta.bin",
.reg = &btmrvl_reg_88xx,
.sd_blksz_fw_dl = 256,
};
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
@ -116,6 +140,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
/* Marvell SD8897 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
.driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
{ } /* Terminating entry */
};
@ -1194,3 +1221,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");

View File

@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, sahara_dt_ids);
MODULE_DEVICE_TABLE(of, sahara_dt_ids);
static int sahara_probe(struct platform_device *pdev)
{

View File

@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
size_t addr = 0;
struct gtt_range *gt;
struct drm_gem_object *obj;
int ret;
int ret = 0;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "buffer is to small\n");
return -ENOMEM;
ret = -ENOMEM;
goto unref_cursor;
}
gt = container_of(obj, struct gtt_range, gem);
@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = psb_gtt_pin(gt);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
return ret;
goto unref_cursor;
}
addr = gt->offset; /* Or resource.start ??? */
@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = obj;
}
return 0;
psb_intel_crtc->cursor_obj = obj;
return ret;
unref_cursor:
drm_gem_object_unreference(obj);
return ret;
}
static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
kfree(psb_intel_crtc);
}
static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
{
struct gtt_range *gt;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->fb) {
gt = to_psb_fb(crtc->fb)->gtt;
psb_gtt_unpin(gt);
}
}
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = cdv_intel_crtc_dpms,
.mode_fixup = cdv_intel_crtc_mode_fixup,
@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.mode_set_base = cdv_intel_pipe_set_base,
.prepare = cdv_intel_crtc_prepare,
.commit = cdv_intel_crtc_commit,
.disable = cdv_intel_crtc_disable,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {

View File

@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address;
int ret;
unsigned long pfn;
/* FIXME: assumes fb at stolen base which may not be true */
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
psbfb->gtt->offset;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);

View File

@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
struct drm_gem_object *obj;
void *tmp_dst, *tmp_src;
int ret, i, cursor_pages;
int ret = 0, i, cursor_pages;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "buffer is to small\n");
return -ENOMEM;
ret = -ENOMEM;
goto unref_cursor;
}
gt = container_of(obj, struct gtt_range, gem);
@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = psb_gtt_pin(gt);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
return ret;
goto unref_cursor;
}
if (dev_priv->ops->cursor_needs_phys) {
if (cursor_gt == NULL) {
dev_err(dev->dev, "No hardware cursor mem available");
return -ENOMEM;
ret = -ENOMEM;
goto unref_cursor;
}
/* Prevent overflow */
@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = obj;
}
return 0;
psb_intel_crtc->cursor_obj = obj;
return ret;
unref_cursor:
drm_gem_object_unreference(obj);
return ret;
}
static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
kfree(psb_intel_crtc);
}
static void psb_intel_crtc_disable(struct drm_crtc *crtc)
{
struct gtt_range *gt;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->fb) {
gt = to_psb_fb(crtc->fb)->gtt;
psb_gtt_unpin(gt);
}
}
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.dpms = psb_intel_crtc_dpms,
.mode_fixup = psb_intel_crtc_mode_fixup,
@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.mode_set_base = psb_intel_pipe_set_base,
.prepare = psb_intel_crtc_prepare,
.commit = psb_intel_crtc_commit,
.disable = psb_intel_crtc_disable,
};
const struct drm_crtc_funcs psb_intel_crtc_funcs = {

View File

@ -1777,10 +1777,13 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* arranged in priority order.
*/
intel_ddc_get_modes(connector, &intel_sdvo->ddc);
if (list_empty(&connector->probed_modes) == false)
goto end;
/* Fetch modes from VBT */
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
* SDVO->LVDS transcoders can't cope with the EDID mode. Since
* drm_mode_probed_add adds the mode at the head of the list we add it
* last.
*/
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
}
}
end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
if (intel_sdvo->hotplug_active)
intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->mode_set = intel_sdvo_mode_set;
@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
goto err_output;
}
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
if (intel_sdvo->hotplug_active) {
intel_encoder->hpd_pin =
intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
}
/*
* Cloning SDVO with anything is often impossible, since the SDVO
* encoder can request a special input timing mode. And even if that's

View File

@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = {
static void mt_free_input_name(struct hid_input *hi)
{
struct hid_device *hdev = hi->report->device;
const char *name = hi->input->name;
if (hi->input->name != hdev->name)
kfree(hi->input->name);
if (name != hdev->name) {
hi->input->name = hdev->name;
kfree(name);
}
}
static ssize_t mt_show_quirks(struct device *dev,
@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev)
struct hid_input *hi;
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
list_for_each_entry(hi, &hdev->inputs, list)
mt_free_input_name(hi);
hid_hw_stop(hdev);
kfree(td);
hid_set_drvdata(hdev, NULL);
}

View File

@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client,
man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
if (man_id < 0 || dev_id < 0)
return -ENODEV;
if (man_id == 0x4d && dev_id == 0x01)
type_name = "max1617a";
else if (man_id == 0x41) {
if ((dev_id & 0xF0) == 0x30)
type_name = "adm1023";
else
else if ((dev_id & 0xF0) == 0x00)
type_name = "adm1021";
else
return -ENODEV;
} else if (man_id == 0x49)
type_name = "thmc10";
else if (man_id == 0x23)
type_name = "gl523sm";
else if (man_id == 0x54)
type_name = "mc1066";
/* LM84 Mfr ID in a different place, and it has more unused bits */
else if (conv_rate == 0x00
&& (config & 0x7F) == 0x00
&& (status & 0xAB) == 0x00)
type_name = "lm84";
else
type_name = "max1617";
else {
int lte, rte, lhi, rhi, llo, rlo;
/* extra checks for LM84 and MAX1617 to avoid misdetections */
llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
/* fail if any of the additional register reads failed */
if (llo < 0 || rlo < 0)
return -ENODEV;
lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
/*
* Fail for negative temperatures and negative high limits.
* This check also catches read errors on the tested registers.
*/
if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
return -ENODEV;
/* fail if all registers hold the same value */
if (lte == rte && lte == lhi && lte == rhi && lte == llo
&& lte == rlo)
return -ENODEV;
/*
* LM84 Mfr ID is in a different place,
* and it has more unused bits.
*/
if (conv_rate == 0x00
&& (config & 0x7F) == 0x00
&& (status & 0xAB) == 0x00) {
type_name = "lm84";
} else {
/* fail if low limits are larger than high limits */
if ((s8)llo > lhi || (s8)rlo > rhi)
return -ENODEV;
type_name = "max1617";
}
}
pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
type_name, i2c_adapter_id(adapter), client->addr);

View File

@ -1,7 +1,6 @@
config BCACHE
tristate "Block device as cache"
select CLOSURES
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.

View File

@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
void bch_writeback_init_cached_dev(struct cached_dev *);
void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *);
void bch_cache_allocator_exit(struct cache *ca);

View File

@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
};
static KTYPE(bch_stats);
static void scale_accounting(unsigned long data);
void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent)
{
kobject_init(&acc->total.kobj, &bch_stats_ktype);
kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
kobject_init(&acc->hour.kobj, &bch_stats_ktype);
kobject_init(&acc->day.kobj, &bch_stats_ktype);
closure_init(&acc->cl, parent);
init_timer(&acc->timer);
acc->timer.expires = jiffies + accounting_delay;
acc->timer.data = (unsigned long) acc;
acc->timer.function = scale_accounting;
add_timer(&acc->timer);
}
int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
struct kobject *parent)
{
@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
}
void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent)
{
kobject_init(&acc->total.kobj, &bch_stats_ktype);
kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
kobject_init(&acc->hour.kobj, &bch_stats_ktype);
kobject_init(&acc->day.kobj, &bch_stats_ktype);
closure_init(&acc->cl, parent);
init_timer(&acc->timer);
acc->timer.expires = jiffies + accounting_delay;
acc->timer.data = (unsigned long) acc;
acc->timer.function = scale_accounting;
add_timer(&acc->timer);
}

View File

@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode)
return 0;
}
static int release_dev(struct gendisk *b, fmode_t mode)
static void release_dev(struct gendisk *b, fmode_t mode)
{
struct bcache_device *d = b->private_data;
closure_put(&d->cl);
return 0;
}
static int ioctl_dev(struct block_device *b, fmode_t mode,
@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
if (d->c)
bcache_device_detach(d);
if (d->disk)
if (d->disk && d->disk->flags & GENHD_FL_UP)
del_gendisk(d->disk);
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook))
return -ENOMEM;
d->disk = alloc_disk(1);
if (!d->disk)
bio_split_pool_init(&d->bio_split_hook) ||
!(d->disk = alloc_disk(1)) ||
!(q = blk_alloc_queue(GFP_KERNEL)))
return -ENOMEM;
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return -ENOMEM;
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
mutex_lock(&bch_register_lock);
bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
if (atomic_read(&dc->running))
bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
bcache_device_free(&dc->disk);
list_del(&dc->list);
mutex_unlock(&bch_register_lock);
if (!IS_ERR_OR_NULL(dc->bdev)) {
blk_sync_queue(bdev_get_queue(dc->bdev));
if (dc->bdev->bd_disk)
blk_sync_queue(bdev_get_queue(dc->bdev));
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
{
int err;
int ret;
struct io *io;
closure_init(&dc->disk.cl, NULL);
set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
struct request_queue *q = bdev_get_queue(dc->bdev);
__module_get(THIS_MODULE);
INIT_LIST_HEAD(&dc->list);
closure_init(&dc->disk.cl, NULL);
set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
err = bcache_device_init(&dc->disk, block_size);
if (err)
goto err;
spin_lock_init(&dc->io_lock);
closure_init_unlocked(&dc->sb_write);
INIT_WORK(&dc->detach, cached_dev_detach_finish);
closure_init_unlocked(&dc->sb_write);
INIT_LIST_HEAD(&dc->io_lru);
spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20;
INIT_LIST_HEAD(&dc->io_lru);
dc->sb_bio.bi_max_vecs = 1;
dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
for (io = dc->io; io < dc->io + RECENT_IO; io++) {
list_add(&io->lru, &dc->io_lru);
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
bch_writeback_init_cached_dev(dc);
ret = bcache_device_init(&dc->disk, block_size);
if (ret)
return ret;
set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
dc->disk.disk->queue->backing_dev_info.ra_pages =
max(dc->disk.disk->queue->backing_dev_info.ra_pages,
q->backing_dev_info.ra_pages);
bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc);
return 0;
err:
bcache_device_stop(&dc->disk);
return err;
}
/* Cached device - bcache superblock */
static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
static void register_bdev(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev,
struct cached_dev *dc)
{
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
struct gendisk *g;
struct cache_set *c;
if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
return err;
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
g = dc->disk.disk;
bio_init(&dc->sb_bio);
dc->sb_bio.bi_max_vecs = 1;
dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
g->queue->backing_dev_info.ra_pages =
max(g->queue->backing_dev_info.ra_pages,
bdev->bd_queue->backing_dev_info.ra_pages);
bch_cached_dev_request_init(dc);
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
err = "error creating kobject";
if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
pr_info("registered backing device %s", bdevname(bdev, name));
list_add(&dc->list, &uncached_devices);
list_for_each_entry(c, &bch_cache_sets, list)
bch_cached_dev_attach(dc, c);
@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
bch_cached_dev_run(dc);
return NULL;
return;
err:
kobject_put(&dc->disk.kobj);
pr_notice("error opening %s: %s", bdevname(bdev, name), err);
/*
* Return NULL instead of an error because kobject_put() cleans
* everything up
*/
return NULL;
bcache_device_stop(&dc->disk);
}
/* Flash only volumes */
@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
size_t free;
struct bucket *b;
if (!ca)
return -ENOMEM;
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
INIT_LIST_HEAD(&ca->discards);
bio_init(&ca->sb_bio);
ca->sb_bio.bi_max_vecs = 1;
ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
bio_init(&ca->journal.bio);
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
!init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
!init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
!(ca->buckets = vmalloc(sizeof(struct bucket) *
!(ca->buckets = vzalloc(sizeof(struct bucket) *
ca->sb.nbuckets)) ||
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
!(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
!(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
bio_split_pool_init(&ca->bio_split_hook))
goto err;
return -ENOMEM;
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
@ -1766,22 +1741,28 @@ err:
return -ENOMEM;
}
static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
static void register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
if (cache_alloc(sb, ca) != 0)
return err;
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
bio_init(&ca->sb_bio);
ca->sb_bio.bi_max_vecs = 1;
ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
get_page(sb_page);
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
if (cache_alloc(sb, ca) != 0)
goto err;
err = "error creating kobject";
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
goto err;
@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
goto err;
pr_info("registered cache device %s", bdevname(bdev, name));
return NULL;
return;
err:
pr_notice("error opening %s: %s", bdevname(bdev, name), err);
kobject_put(&ca->kobj);
pr_info("error opening %s: %s", bdevname(bdev, name), err);
/* Return NULL instead of an error because kobject_put() cleans
* everything up
*/
return NULL;
}
/* Global interfaces/init */
@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
sb);
if (bdev == ERR_PTR(-EBUSY))
err = "device busy";
if (IS_ERR(bdev) ||
set_blocksize(bdev, 4096))
if (IS_ERR(bdev)) {
if (bdev == ERR_PTR(-EBUSY))
err = "device busy";
goto err;
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
goto err_close;
err = read_super(sb, bdev, &sb_page);
if (err)
@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
goto err_close;
err = register_bdev(sb, sb_page, bdev, dc);
register_bdev(sb, sb_page, bdev, dc);
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
goto err_close;
err = register_cache(sb, sb_page, bdev, ca);
register_cache(sb, sb_page, bdev, ca);
}
if (err) {
/* register_(bdev|cache) will only return an error if they
* didn't get far enough to create the kobject - if they did,
* the kobject destructor will do this cleanup.
*/
out:
if (sb_page)
put_page(sb_page);
err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
if (attr != &ksysfs_register_quiet)
pr_info("error opening %s: %s", path, err);
ret = -EINVAL;
}
kfree(sb);
kfree(path);
mutex_unlock(&bch_register_lock);
module_put(THIS_MODULE);
return ret;
err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
if (attr != &ksysfs_register_quiet)
pr_info("error opening %s: %s", path, err);
ret = -EINVAL;
goto out;
}
static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)

View File

@ -375,7 +375,7 @@ err:
refill_dirty(cl);
}
void bch_writeback_init_cached_dev(struct cached_dev *dc)
void bch_cached_dev_writeback_init(struct cached_dev *dc)
{
closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);

View File

@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}

View File

@ -417,7 +417,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
r1_bio->bios[mirror] = NULL;
to_put = bio;
set_bit(R1BIO_Uptodate, &r1_bio->state);
/*
* Do not set R1BIO_Uptodate if the current device is
* rebuilding or Faulty. This is because we cannot use
* such device for properly reading the data back (we could
* potentially use it, if the current write would have felt
* before rdev->recovery_offset, but for simplicity we don't
* check this here.
*/
if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
!test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
if (is_badblock(conf->mirrors[mirror].rdev,
@ -870,17 +880,17 @@ static void allow_barrier(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
static void freeze_array(struct r1conf *conf)
static void freeze_array(struct r1conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quite.
* We increment barrier and nr_waiting, and then
* wait until nr_pending match nr_queued+1
* wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
* pending IO requests to complete or be queued for re-try.
* Thus the number queued (nr_queued) plus this request (1)
* Thus the number queued (nr_queued) plus this request (extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
@ -888,7 +898,7 @@ static void freeze_array(struct r1conf *conf)
conf->barrier++;
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+1,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
@ -1544,8 +1554,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* we wait for all outstanding requests to complete.
*/
synchronize_sched();
raise_barrier(conf);
lower_barrier(conf);
freeze_array(conf, 0);
unfreeze_array(conf);
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
@ -1595,11 +1605,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
struct md_rdev *repl =
conf->mirrors[conf->raid_disks + number].rdev;
raise_barrier(conf);
freeze_array(conf, 0);
clear_bit(Replacement, &repl->flags);
p->rdev = repl;
conf->mirrors[conf->raid_disks + number].rdev = NULL;
lower_barrier(conf);
unfreeze_array(conf);
clear_bit(WantReplacement, &rdev->flags);
} else
clear_bit(WantReplacement, &rdev->flags);
@ -2195,7 +2205,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* frozen
*/
if (mddev->ro == 0) {
freeze_array(conf);
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
@ -2780,8 +2790,8 @@ static int run(struct mddev *mddev)
return PTR_ERR(conf);
if (mddev->queue)
blk_queue_max_write_same_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, 0);
rdev_for_each(rdev, mddev) {
if (!mddev->gendisk)
continue;
@ -2963,7 +2973,7 @@ static int raid1_reshape(struct mddev *mddev)
return -ENOMEM;
}
raise_barrier(conf);
freeze_array(conf, 0);
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
@ -2994,7 +3004,7 @@ static int raid1_reshape(struct mddev *mddev)
conf->raid_disks = mddev->raid_disks = raid_disks;
mddev->delta_disks = 0;
lower_barrier(conf);
unfreeze_array(conf);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);

View File

@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
sector_t first_bad;
int bad_sectors;
set_bit(R10BIO_Uptodate, &r10_bio->state);
/*
* Do not set R10BIO_Uptodate if the current device is
* rebuilding or Faulty. This is because we cannot use
* such device for properly reading the data back (we could
* potentially use it, if the current write would have felt
* before rdev->recovery_offset, but for simplicity we don't
* check this here.
*/
if (test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev,
@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
wake_up(&conf->wait_barrier);
}
static void freeze_array(struct r10conf *conf)
static void freeze_array(struct r10conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quiet.
* We increment barrier and nr_waiting, and then
* wait until nr_pending match nr_queued+1
* wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
* pending IO requests to complete or be queued for re-try.
* Thus the number queued (nr_queued) plus this request (1)
* Thus the number queued (nr_queued) plus this request (extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
conf->barrier++;
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+1,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* we wait for all outstanding requests to complete.
*/
synchronize_sched();
raise_barrier(conf, 0);
lower_barrier(conf);
freeze_array(conf, 0);
unfreeze_array(conf);
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
r10_bio->devs[slot].bio = NULL;
if (mddev->ro == 0) {
freeze_array(conf);
freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
} else
@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);

View File

@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_FLUSH;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
else
rbi->bi_sector = (sh->sector
+ rrdev->data_offset);
rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
@ -5464,7 +5466,7 @@ static int run(struct mddev *mddev)
if (mddev->major_version == 0 &&
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
@ -5587,6 +5589,8 @@ static int run(struct mddev *mddev)
*/
mddev->queue->limits.discard_zeroes_data = 0;
blk_queue_max_write_same_sectors(mddev->queue, 0);
rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);

View File

@ -197,6 +197,8 @@ void mei_stop(struct mei_device *dev)
{
dev_dbg(&dev->pdev->dev, "stopping the device.\n");
flush_scheduled_work();
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
@ -210,8 +212,6 @@ void mei_stop(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
flush_scheduled_work();
mei_watchdog_unregister(dev);
}
EXPORT_SYMBOL_GPL(mei_stop);

View File

@ -142,6 +142,8 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
mei_cl_unlink(ndev->cl_info);
kfree(ndev->cl_info);
}
memset(ndev, 0, sizeof(struct mei_nfc_dev));
}
static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)

View File

@ -325,6 +325,7 @@ static int mei_me_pci_resume(struct device *device)
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_UP;
mei_clear_interrupts(dev);
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);

View File

@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg)
nodesperblade = 2;
else
nodesperblade = 1;
memset(&info, 0, sizeof(info));
info.cpus = num_online_cpus();
info.nodes = num_online_nodes();
info.blades = info.nodes / nodesperblade;

View File

@ -764,8 +764,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;
rcu_read_lock();
read_lock(&bond->lock);
rcu_read_lock();
bond_dev = bond->dev;
@ -787,12 +787,19 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
}
if (--bond->igmp_retrans > 0)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
read_unlock(&bond->lock);
rcu_read_unlock();
/* We use curr_slave_lock to protect against concurrent access to
* igmp_retrans from multiple running instances of this function and
* bond_change_active_slave
*/
write_lock_bh(&bond->curr_slave_lock);
if (bond->igmp_retrans > 1) {
bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@ -1957,6 +1964,10 @@ err_free:
err_undo_flags:
bond_compute_features(bond);
/* Enslave of first slave has failed and we need to fix master's mac */
if (bond->slave_cnt == 0 &&
ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
return res;
}

View File

@ -225,7 +225,7 @@ struct bonding {
rwlock_t curr_slave_lock;
u8 send_peer_notif;
s8 setup_by_slave;
s8 igmp_retrans;
u8 igmp_retrans;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];

View File

@ -1800,6 +1800,9 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
if (tg3_flag(tp, NO_FWARE_REPORTED))
return 0;
if (tg3_flag(tp, IS_SSB_CORE)) {
/* We don't use firmware. */
return 0;
@ -10404,6 +10407,13 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
*/
static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
{
/* Chip may have been just powered on. If so, the boot code may still
* be running initialization. Wait for it to finish to avoid races in
* accessing the hardware.
*/
tg3_enable_register_access(tp);
tg3_poll_fw(tp);
tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);

Some files were not shown because too many files have changed in this diff Show More