Linux 3.15-rc4

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJTZuYPAAoJEHm+PkMAQRiGC/wIAJhgGkofnbEv4x0QEXJuNQOv
 rXxId3RQpLQ2PGjJHbrYRvwyK9X7L+JREDJGMyKh8VCMHaYQ1safpI6KII+BvkVb
 9FxcledNEB4sDNiShsckG39iBGBAsi2R630XgygLoydfrvNnCrhPB5vi1TbWE37t
 xTsaMJxZncSZ6nMsO8w5klVhSOlBPGUp9cVLZhvKTR66eF4JGX3hZHVPwlpZZd2J
 ExhxmhnwBATuP3IexRyZ/3/J/zGdJJ9e5JLlEwdLo2hKVgqVosDu7teEucLGzZL1
 YdGz7Ndl+Ky9rG/RSuZSCfG7XF5DCroqcDNhT+sqvoTWXU0iunAjFIKavy3CxNA=
 =7moZ
 -----END PGP SIGNATURE-----

Merge tag 'v3.15-rc4' into devel

Linux 3.15-rc4
This commit is contained in:
Linus Walleij 2014-05-13 08:40:43 +02:00
commit 09ffa131c7
154 changed files with 1224 additions and 707 deletions

View File

@ -19,6 +19,9 @@ to deliver its interrupts via SPIs.
- clock-frequency : The frequency of the main counter, in Hz. Optional. - clock-frequency : The frequency of the main counter, in Hz. Optional.
- always-on : a boolean property. If present, the timer is powered through an
always-on power domain, therefore it never loses context.
Example: Example:
timer { timer {

View File

@ -24,6 +24,7 @@ Required properties:
* "sata-phy" for the SATA 6.0Gbps PHY * "sata-phy" for the SATA 6.0Gbps PHY
Optional properties: Optional properties:
- dma-coherent : Present if dma operations are coherent
- status : Shall be "ok" if enabled or "disabled" if disabled. - status : Shall be "ok" if enabled or "disabled" if disabled.
Default is "ok". Default is "ok".
@ -55,6 +56,7 @@ Example:
<0x0 0x1f22e000 0x0 0x1000>, <0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>; <0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>; interrupts = <0x0 0x87 0x4>;
dma-coherent;
status = "ok"; status = "ok";
clocks = <&sataclk 0>; clocks = <&sataclk 0>;
phys = <&phy2 0>; phys = <&phy2 0>;
@ -69,6 +71,7 @@ Example:
<0x0 0x1f23e000 0x0 0x1000>, <0x0 0x1f23e000 0x0 0x1000>,
<0x0 0x1f237000 0x0 0x1000>; <0x0 0x1f237000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>; interrupts = <0x0 0x88 0x4>;
dma-coherent;
status = "ok"; status = "ok";
clocks = <&sataclk 0>; clocks = <&sataclk 0>;
phys = <&phy3 0>; phys = <&phy3 0>;

View File

@ -23,5 +23,5 @@ gmac0: ethernet@ff700000 {
interrupt-names = "macirq"; interrupt-names = "macirq";
mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */ mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
clocks = <&emac_0_clk>; clocks = <&emac_0_clk>;
clocks-names = "stmmaceth"; clock-names = "stmmaceth";
}; };

View File

@ -33,7 +33,7 @@ Optional properties:
- max-frame-size: See ethernet.txt file in the same directory - max-frame-size: See ethernet.txt file in the same directory
- clocks: If present, the first clock should be the GMAC main clock, - clocks: If present, the first clock should be the GMAC main clock,
further clocks may be specified in derived bindings. further clocks may be specified in derived bindings.
- clocks-names: One name for each entry in the clocks property, the - clock-names: One name for each entry in the clocks property, the
first one should be "stmmaceth". first one should be "stmmaceth".
Examples: Examples:

View File

@ -83,7 +83,7 @@ Example:
reg = <0xfe61f080 0x4>; reg = <0xfe61f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>; ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 { PIO0: gpio@fe610000 {
@ -165,7 +165,7 @@ sdhci0:sdhci@fe810000{
interrupt-parent = <&PIO3>; interrupt-parent = <&PIO3>;
#interrupt-cells = <2>; #interrupt-cells = <2>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
interrupts-names = "card-detect"; interrupt-names = "card-detect";
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc>; pinctrl-0 = <&pinctrl_mmc>;
}; };

View File

@ -47,7 +47,7 @@ mcasp0: mcasp0@1d00000 {
reg = <0x100000 0x3000>; reg = <0x100000 0x3000>;
reg-names "mpu"; reg-names "mpu";
interrupts = <82>, <83>; interrupts = <82>, <83>;
interrupts-names = "tx", "rx"; interrupt-names = "tx", "rx";
op-mode = <0>; /* MCASP_IIS_MODE */ op-mode = <0>; /* MCASP_IIS_MODE */
tdm-slots = <2>; tdm-slots = <2>;
serial-dir = < serial-dir = <

View File

@ -13,6 +13,9 @@ Required properties:
"ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP) "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
- reg - <int> - I2C slave address - reg - <int> - I2C slave address
- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
DVDD-supply : power supplies for the device as covered in
Documentation/devicetree/bindings/regulator/regulator.txt
Optional properties: Optional properties:
@ -24,9 +27,6 @@ Optional properties:
3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD 3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
If this node is not mentioned or if the value is unknown, then If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V. micbias is set to 2.0V.
- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
DVDD-supply : power supplies for the device as covered in
Documentation/devicetree/bindings/regulator/regulator.txt
CODEC output pins: CODEC output pins:
* HPL * HPL

View File

@ -3485,6 +3485,12 @@ S: Maintained
F: drivers/extcon/ F: drivers/extcon/
F: Documentation/extcon/ F: Documentation/extcon/
EXYNOS DP DRIVER
M: Jingoo Han <jg1.han@samsung.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/exynos/exynos_dp*
EXYNOS MIPI DISPLAY DRIVERS EXYNOS MIPI DISPLAY DRIVERS
M: Inki Dae <inki.dae@samsung.com> M: Inki Dae <inki.dae@samsung.com>
M: Donghwa Lee <dh09.lee@samsung.com> M: Donghwa Lee <dh09.lee@samsung.com>
@ -3550,7 +3556,7 @@ F: include/scsi/libfcoe.h
F: include/uapi/scsi/fc/ F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf()) FILE LOCKING (flock() and fcntl()/lockf())
M: Jeff Layton <jlayton@redhat.com> M: Jeff Layton <jlayton@poochiereds.net>
M: J. Bruce Fields <bfields@fieldses.org> M: J. Bruce Fields <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org L: linux-fsdevel@vger.kernel.org
S: Maintained S: Maintained
@ -5108,14 +5114,19 @@ F: drivers/s390/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR ARM KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org> M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm W: http://systems.cs.columbia.edu/projects/kvm-arm
S: Supported S: Supported
F: arch/arm/include/uapi/asm/kvm* F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm* F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/ F: arch/arm/kvm/
F: virt/kvm/arm/
F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com> M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu L: kvmarm@lists.cs.columbia.edu

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc4
NAME = Shuffling Zombie Juror NAME = Shuffling Zombie Juror
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -614,11 +614,13 @@ resume_user_mode_begin:
resume_kernel_mode: resume_kernel_mode:
#ifdef CONFIG_PREEMPT ; Disable Interrupts from this point on
; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
; This is a must for preempt_schedule_irq() ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
IRQ_DISABLE r9 IRQ_DISABLE r9
#ifdef CONFIG_PREEMPT
; Can't preempt if preemption disabled ; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10 GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]

View File

@ -802,7 +802,7 @@
<0x46000000 0x400000>; <0x46000000 0x400000>;
reg-names = "mpu", "dat"; reg-names = "mpu", "dat";
interrupts = <80>, <81>; interrupts = <80>, <81>;
interrupts-names = "tx", "rx"; interrupt-names = "tx", "rx";
status = "disabled"; status = "disabled";
dmas = <&edma 8>, dmas = <&edma 8>,
<&edma 9>; <&edma 9>;
@ -816,7 +816,7 @@
<0x46400000 0x400000>; <0x46400000 0x400000>;
reg-names = "mpu", "dat"; reg-names = "mpu", "dat";
interrupts = <82>, <83>; interrupts = <82>, <83>;
interrupts-names = "tx", "rx"; interrupt-names = "tx", "rx";
status = "disabled"; status = "disabled";
dmas = <&edma 10>, dmas = <&edma 10>,
<&edma 11>; <&edma 11>;

View File

@ -691,7 +691,7 @@
<0x46000000 0x400000>; <0x46000000 0x400000>;
reg-names = "mpu", "dat"; reg-names = "mpu", "dat";
interrupts = <80>, <81>; interrupts = <80>, <81>;
interrupts-names = "tx", "rx"; interrupt-names = "tx", "rx";
status = "disabled"; status = "disabled";
dmas = <&edma 8>, dmas = <&edma 8>,
<&edma 9>; <&edma 9>;
@ -705,7 +705,7 @@
<0x46400000 0x400000>; <0x46400000 0x400000>;
reg-names = "mpu", "dat"; reg-names = "mpu", "dat";
interrupts = <82>, <83>; interrupts = <82>, <83>;
interrupts-names = "tx", "rx"; interrupt-names = "tx", "rx";
status = "disabled"; status = "disabled";
dmas = <&edma 10>, dmas = <&edma 10>,
<&edma 11>; <&edma 11>;

View File

@ -49,7 +49,7 @@
reg = <0xfe61f080 0x4>; reg = <0xfe61f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>; ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 { PIO0: gpio@fe610000 {
@ -187,7 +187,7 @@
reg = <0xfee0f080 0x4>; reg = <0xfee0f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x8000>; ranges = <0 0xfee00000 0x8000>;
PIO5: gpio@fee00000 { PIO5: gpio@fee00000 {
@ -282,7 +282,7 @@
reg = <0xfe82f080 0x4>; reg = <0xfe82f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x8000>; ranges = <0 0xfe820000 0x8000>;
PIO13: gpio@fe820000 { PIO13: gpio@fe820000 {
@ -423,7 +423,7 @@
reg = <0xfd6bf080 0x4>; reg = <0xfd6bf080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>; ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 { PIO100: gpio@fd6b0000 {
@ -460,7 +460,7 @@
reg = <0xfd33f080 0x4>; reg = <0xfd33f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>; ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 { PIO103: gpio@fd330000 {

View File

@ -53,7 +53,7 @@
reg = <0xfe61f080 0x4>; reg = <0xfe61f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x6000>; ranges = <0 0xfe610000 0x6000>;
PIO0: gpio@fe610000 { PIO0: gpio@fe610000 {
@ -201,7 +201,7 @@
reg = <0xfee0f080 0x4>; reg = <0xfee0f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x10000>; ranges = <0 0xfee00000 0x10000>;
PIO5: gpio@fee00000 { PIO5: gpio@fee00000 {
@ -333,7 +333,7 @@
reg = <0xfe82f080 0x4>; reg = <0xfe82f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x6000>; ranges = <0 0xfe820000 0x6000>;
PIO13: gpio@fe820000 { PIO13: gpio@fe820000 {
@ -461,7 +461,7 @@
reg = <0xfd6bf080 0x4>; reg = <0xfd6bf080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>; ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 { PIO100: gpio@fd6b0000 {
@ -498,7 +498,7 @@
reg = <0xfd33f080 0x4>; reg = <0xfd33f080 0x4>;
reg-names = "irqmux"; reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
interrupts-names = "irqmux"; interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>; ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 { PIO103: gpio@fd330000 {

View File

@ -23,7 +23,7 @@ config KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO select KVM_MMIO
select KVM_ARM_HOST select KVM_ARM_HOST
depends on ARM_VIRT_EXT && ARM_LPAE depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
---help--- ---help---
Support hosting virtualized guest machines. You will also Support hosting virtualized guest machines. You will also
need to select one or more of the processor modules below. need to select one or more of the processor modules below.

View File

@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end; static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector; static phys_addr_t hyp_idmap_vector;
#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
if (boot_hyp_pgd) { if (boot_hyp_pgd) {
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(boot_hyp_pgd); free_pages((unsigned long)boot_hyp_pgd, pgd_order);
boot_hyp_pgd = NULL; boot_hyp_pgd = NULL;
} }
if (hyp_pgd) if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(init_bounce_page); free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL; init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex); mutex_unlock(&kvm_hyp_pgd_mutex);
@ -330,7 +332,7 @@ void free_hyp_pgds(void)
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
kfree(hyp_pgd); free_pages((unsigned long)hyp_pgd, pgd_order);
hyp_pgd = NULL; hyp_pgd = NULL;
} }
@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
phys_addr_t phys_base; phys_addr_t phys_base;
init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) { if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n"); kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM; err = -ENOMEM;
@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
(unsigned long)phys_base); (unsigned long)phys_base);
} }
hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
if (!hyp_pgd || !boot_hyp_pgd) { if (!hyp_pgd || !boot_hyp_pgd) {
kvm_err("Hyp mode PGD not allocated\n"); kvm_err("Hyp mode PGD not allocated\n");
err = -ENOMEM; err = -ENOMEM;

View File

@ -307,6 +307,7 @@
<0x0 0x1f21e000 0x0 0x1000>, <0x0 0x1f21e000 0x0 0x1000>,
<0x0 0x1f217000 0x0 0x1000>; <0x0 0x1f217000 0x0 0x1000>;
interrupts = <0x0 0x86 0x4>; interrupts = <0x0 0x86 0x4>;
dma-coherent;
status = "disabled"; status = "disabled";
clocks = <&sata01clk 0>; clocks = <&sata01clk 0>;
phys = <&phy1 0>; phys = <&phy1 0>;
@ -321,6 +322,7 @@
<0x0 0x1f22e000 0x0 0x1000>, <0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>; <0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>; interrupts = <0x0 0x87 0x4>;
dma-coherent;
status = "ok"; status = "ok";
clocks = <&sata23clk 0>; clocks = <&sata23clk 0>;
phys = <&phy2 0>; phys = <&phy2 0>;
@ -334,6 +336,7 @@
<0x0 0x1f23d000 0x0 0x1000>, <0x0 0x1f23d000 0x0 0x1000>,
<0x0 0x1f23e000 0x0 0x1000>; <0x0 0x1f23e000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>; interrupts = <0x0 0x88 0x4>;
dma-coherent;
status = "ok"; status = "ok";
clocks = <&sata45clk 0>; clocks = <&sata45clk 0>;
phys = <&phy3 0>; phys = <&phy3 0>;

View File

@ -143,10 +143,8 @@ static int __init setup_early_printk(char *buf)
} }
/* no options parsing yet */ /* no options parsing yet */
if (paddr) { if (paddr)
set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr); early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
}
printch = match->printch; printch = match->printch;
early_console = &early_console_dev; early_console = &early_console_dev;

View File

@ -396,7 +396,7 @@ static int __init arm64_device_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0; return 0;
} }
arch_initcall(arm64_device_init); arch_initcall_sync(arm64_device_init);
static DEFINE_PER_CPU(struct cpu, cpu_data); static DEFINE_PER_CPU(struct cpu, cpu_data);

View File

@ -22,8 +22,11 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h> #include <linux/dma-contiguous.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <linux/amba/bus.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
@ -305,17 +308,45 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
}; };
EXPORT_SYMBOL(coherent_swiotlb_dma_ops); EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
static int dma_bus_notifier(struct notifier_block *nb,
unsigned long event, void *_dev)
{
struct device *dev = _dev;
if (event != BUS_NOTIFY_ADD_DEVICE)
return NOTIFY_DONE;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return NOTIFY_OK;
}
static struct notifier_block platform_bus_nb = {
.notifier_call = dma_bus_notifier,
};
static struct notifier_block amba_bus_nb = {
.notifier_call = dma_bus_notifier,
};
extern int swiotlb_late_init_with_default_size(size_t default_size); extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void) static int __init swiotlb_late_init(void)
{ {
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
dma_ops = &coherent_swiotlb_dma_ops; /*
* These must be registered before of_platform_populate().
*/
bus_register_notifier(&platform_bus_type, &platform_bus_nb);
bus_register_notifier(&amba_bustype, &amba_bus_nb);
dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size); return swiotlb_late_init_with_default_size(swiotlb_size);
} }
subsys_initcall(swiotlb_late_init); arch_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096 #define PREALLOC_DMA_DEBUG_ENTRIES 4096

View File

@ -374,6 +374,9 @@ int kern_addr_valid(unsigned long addr)
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return 0; return 0;
if (pmd_sect(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) if (pte_none(*pte))
return 0; return 0;

View File

@ -1,37 +0,0 @@
/*
* Memory barrier definitions for the Hexagon architecture
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_BARRIER_H
#define _ASM_BARRIER_H
#define rmb() barrier()
#define read_barrier_depends() barrier()
#define wmb() barrier()
#define mb() barrier()
#define smp_rmb() barrier()
#define smp_read_barrier_depends() barrier()
#define smp_wmb() barrier()
#define smp_mb() barrier()
/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
#endif /* _ASM_BARRIER_H */

View File

@ -1,6 +1,8 @@
# UAPI Header export list # UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm include include/uapi/asm-generic/Kbuild.asm
generic-y += resource.h
header-y += bitsperlong.h header-y += bitsperlong.h
header-y += byteorder.h header-y += byteorder.h
header-y += errno.h header-y += errno.h
@ -13,7 +15,6 @@ header-y += msgbuf.h
header-y += pdc.h header-y += pdc.h
header-y += posix_types.h header-y += posix_types.h
header-y += ptrace.h header-y += ptrace.h
header-y += resource.h
header-y += sembuf.h header-y += sembuf.h
header-y += setup.h header-y += setup.h
header-y += shmbuf.h header-y += shmbuf.h

View File

@ -1,7 +0,0 @@
#ifndef _ASM_PARISC_RESOURCE_H
#define _ASM_PARISC_RESOURCE_H
#define _STK_LIM_MAX 10 * _STK_LIM
#include <asm-generic/resource.h>
#endif

View File

@ -139,18 +139,18 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
* edit the command line passed to vmlinux (by setting /chosen/bootargs). * edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier. * The buffer is put in it's own section so that tools may locate it easier.
*/ */
static char cmdline[COMMAND_LINE_SIZE] static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline"))); __attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen) static void prep_cmdline(void *chosen)
{ {
if (cmdline[0] == '\0') if (cmdline[0] == '\0')
getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1); getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
printf("\n\rLinux/PowerPC load: %s", cmdline); printf("\n\rLinux/PowerPC load: %s", cmdline);
/* If possible, edit the command line */ /* If possible, edit the command line */
if (console_ops.edit_cmdline) if (console_ops.edit_cmdline)
console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE); console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
printf("\n\r"); printf("\n\r");
/* Put the command line back into the devtree for the kernel */ /* Put the command line back into the devtree for the kernel */
@ -174,7 +174,7 @@ void start(void)
* built-in command line wasn't set by an external tool */ * built-in command line wasn't set by an external tool */
if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0')) if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
memmove(cmdline, loader_info.cmdline, memmove(cmdline, loader_info.cmdline,
min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1)); min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
if (console_ops.open && (console_ops.open() < 0)) if (console_ops.open && (console_ops.open() < 0))
exit(); exit();

View File

@ -15,7 +15,7 @@
#include "types.h" #include "types.h"
#include "string.h" #include "string.h"
#define COMMAND_LINE_SIZE 512 #define BOOT_COMMAND_LINE_SIZE 2048
#define MAX_PATH_LEN 256 #define MAX_PATH_LEN 256
#define MAX_PROP_LEN 256 /* What should this be? */ #define MAX_PROP_LEN 256 /* What should this be? */

View File

@ -47,13 +47,13 @@ BSS_STACK(4096);
* The buffer is put in it's own section so that tools may locate it easier. * The buffer is put in it's own section so that tools may locate it easier.
*/ */
static char cmdline[COMMAND_LINE_SIZE] static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline"))); __attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen) static void prep_cmdline(void *chosen)
{ {
if (cmdline[0] == '\0') if (cmdline[0] == '\0')
getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1); getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
else else
setprop_str(chosen, "bootargs", cmdline); setprop_str(chosen, "bootargs", cmdline);

View File

@ -41,14 +41,14 @@ struct opal_takeover_args {
* size except the last one in the list to be as well. * size except the last one in the list to be as well.
*/ */
struct opal_sg_entry { struct opal_sg_entry {
void *data; __be64 data;
long length; __be64 length;
}; };
/* sg list */ /* SG list */
struct opal_sg_list { struct opal_sg_list {
unsigned long num_entries; __be64 length;
struct opal_sg_list *next; __be64 next;
struct opal_sg_entry entry[]; struct opal_sg_entry entry[];
}; };
@ -858,8 +858,8 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, __be32 *data, uint32_t sz); uint32_t addr, __be32 *data, uint32_t sz);
int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id); int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type); int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset); int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_send_ack_elog(uint64_t log_id); int64_t opal_send_ack_elog(uint64_t log_id);
void opal_resend_pending_logs(void); void opal_resend_pending_logs(void);
@ -868,23 +868,24 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
int64_t opal_manage_flash(uint8_t op); int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list); int64_t opal_update_flash(uint64_t blk_list);
int64_t opal_dump_init(uint8_t dump_type); int64_t opal_dump_init(uint8_t dump_type);
int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size); int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type); int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer); int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
int64_t opal_dump_ack(uint32_t dump_id); int64_t opal_dump_ack(uint32_t dump_id);
int64_t opal_dump_resend_notification(void); int64_t opal_dump_resend_notification(void);
int64_t opal_get_msg(uint64_t buffer, size_t size); int64_t opal_get_msg(uint64_t buffer, uint64_t size);
int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token); int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
int64_t opal_sync_host_reboot(void); int64_t opal_sync_host_reboot(void);
int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
size_t length); uint64_t length);
int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
size_t length); uint64_t length);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
/* Internal functions */ /* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node, extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data); const char *uname, int depth, void *data);
@ -893,10 +894,6 @@ extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
extern void hvc_opal_init_early(void); extern void hvc_opal_init_early(void);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int opal_notifier_register(struct notifier_block *nb); extern int opal_notifier_register(struct notifier_block *nb);
extern int opal_notifier_unregister(struct notifier_block *nb); extern int opal_notifier_unregister(struct notifier_block *nb);
@ -906,9 +903,6 @@ extern void opal_notifier_enable(void);
extern void opal_notifier_disable(void); extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
extern int __opal_async_get_token(void); extern int __opal_async_get_token(void);
extern int opal_async_get_token_interruptible(void); extern int opal_async_get_token_interruptible(void);
extern int __opal_async_release_token(int token); extern int __opal_async_release_token(int token);
@ -916,8 +910,6 @@ extern int opal_async_release_token(int token);
extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
extern void hvc_opal_init_early(void);
struct rtc_time; struct rtc_time;
extern int opal_set_rtc_time(struct rtc_time *tm); extern int opal_set_rtc_time(struct rtc_time *tm);
extern void opal_get_rtc_time(struct rtc_time *tm); extern void opal_get_rtc_time(struct rtc_time *tm);
@ -937,6 +929,10 @@ extern int opal_resync_timebase(void);
extern void opal_lpc_init(void); extern void opal_lpc_init(void);
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size);
void opal_free_sg_list(struct opal_sg_list *sg);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */ #endif /* __OPAL_H */

View File

@ -1 +1,6 @@
#include <asm-generic/setup.h> #ifndef _UAPI_ASM_POWERPC_SETUP_H
#define _UAPI_ASM_POWERPC_SETUP_H
#define COMMAND_LINE_SIZE 2048
#endif /* _UAPI_ASM_POWERPC_SETUP_H */

View File

@ -120,6 +120,7 @@ EXPORT_SYMBOL(giveup_spe);
EXPORT_SYMBOL(flush_instruction_cache); EXPORT_SYMBOL(flush_instruction_cache);
#endif #endif
EXPORT_SYMBOL(flush_dcache_range); EXPORT_SYMBOL(flush_dcache_range);
EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32

View File

@ -705,7 +705,7 @@ static int __init rtas_flash_init(void)
if (rtas_token("ibm,update-flash-64-and-reboot") == if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) { RTAS_UNKNOWN_SERVICE) {
pr_info("rtas_flash: no firmware flash support\n"); pr_info("rtas_flash: no firmware flash support\n");
return 1; return -EINVAL;
} }
rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);

View File

@ -242,6 +242,12 @@ kvm_novcpu_exit:
*/ */
.globl kvm_start_guest .globl kvm_start_guest
kvm_start_guest: kvm_start_guest:
/* Set runlatch bit the minute you wake up from nap */
mfspr r1, SPRN_CTRLF
ori r1, r1, 1
mtspr SPRN_CTRLT, r1
ld r2,PACATOC(r13) ld r2,PACATOC(r13)
li r0,KVM_HWTHREAD_IN_KVM li r0,KVM_HWTHREAD_IN_KVM
@ -309,6 +315,11 @@ kvm_no_guest:
li r0, KVM_HWTHREAD_IN_NAP li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13) stb r0, HSTATE_HWTHREAD_STATE(r13)
kvm_do_nap: kvm_do_nap:
/* Clear the runlatch bit before napping */
mfspr r2, SPRN_CTRLF
clrrdi r2, r2, 1
mtspr SPRN_CTRLT, r2
li r3, LPCR_PECE0 li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@ -1999,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/* /*
* Take a nap until a decrementer or external or doobell interrupt * Take a nap until a decrementer or external or doobell interrupt
* occurs, with PECE1, PECE0 and PECEDP set in LPCR * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
* runlatch bit before napping.
*/ */
mfspr r2, SPRN_CTRLF
clrrdi r2, r2, 1
mtspr SPRN_CTRLT, r2
li r0,1 li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13) stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR mfspr r5,SPRN_LPCR

View File

@ -82,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12; va |= penc << 12;
va |= ssize << 8; va |= ssize << 8;
/* Add AVAL part */ /*
if (psize != apsize) { * AVAL bits:
/* * We don't need all the bits, but rest of the bits
* MPSS, 64K base page size and 16MB parge page size * must be ignored by the processor.
* We don't need all the bits, but rest of the bits * vpn cover upto 65 bits of va. (0...65) and we need
* must be ignored by the processor. * 58..64 bits of va.
* vpn cover upto 65 bits of va. (0...65) and we need */
* 58..64 bits of va. va |= (vpn & 0xfe); /* AVAL */
*/
va |= (vpn & 0xfe);
}
va |= 1; /* L */ va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@ -133,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12; va |= penc << 12;
va |= ssize << 8; va |= ssize << 8;
/* Add AVAL part */ /*
if (psize != apsize) { * AVAL bits:
/* * We don't need all the bits, but rest of the bits
* MPSS, 64K base page size and 16MB parge page size * must be ignored by the processor.
* We don't need all the bits, but rest of the bits * vpn cover upto 65 bits of va. (0...65) and we need
* must be ignored by the processor. * 58..64 bits of va.
* vpn cover upto 65 bits of va. (0...65) and we need */
* 58..64 bits of va. va |= (vpn & 0xfe);
*/
va |= (vpn & 0xfe);
}
va |= 1; /* L */ va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory"); : : "r"(va) : "memory");

View File

@ -155,14 +155,26 @@ static ssize_t read_offset_data(void *dest, size_t dest_len,
return copy_len; return copy_len;
} }
static unsigned long h_get_24x7_catalog_page(char page[static 4096], static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
u32 version, u32 index) unsigned long version,
unsigned long index)
{ {
WARN_ON(!IS_ALIGNED((unsigned long)page, 4096)); pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, phys_4096,
virt_to_phys(page),
version, version,
index); index);
WARN_ON(!IS_ALIGNED(phys_4096, 4096));
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
phys_4096,
version,
index);
}
static unsigned long h_get_24x7_catalog_page(char page[],
u64 version, u32 index)
{
return h_get_24x7_catalog_page_(virt_to_phys(page),
version, index);
} }
static ssize_t catalog_read(struct file *filp, struct kobject *kobj, static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
@ -173,7 +185,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
ssize_t ret = 0; ssize_t ret = 0;
size_t catalog_len = 0, catalog_page_len = 0, page_count = 0; size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
loff_t page_offset = 0; loff_t page_offset = 0;
uint32_t catalog_version_num = 0; uint64_t catalog_version_num = 0;
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
struct hv_24x7_catalog_page_0 *page_0 = page; struct hv_24x7_catalog_page_0 *page_0 = page;
if (!page) if (!page)
@ -185,7 +197,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
goto e_free; goto e_free;
} }
catalog_version_num = be32_to_cpu(page_0->version); catalog_version_num = be64_to_cpu(page_0->version);
catalog_page_len = be32_to_cpu(page_0->length); catalog_page_len = be32_to_cpu(page_0->length);
catalog_len = catalog_page_len * 4096; catalog_len = catalog_page_len * 4096;
@ -208,8 +220,9 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
page, 4096, page_offset * 4096); page, 4096, page_offset * 4096);
e_free: e_free:
if (hret) if (hret)
pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n", pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
catalog_version_num, page_offset, hret); " rc=%ld\n",
catalog_version_num, page_offset, hret);
kfree(page); kfree(page);
pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n", pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
@ -243,7 +256,7 @@ e_free: \
static DEVICE_ATTR_RO(_name) static DEVICE_ATTR_RO(_name)
PAGE_0_ATTR(catalog_version, "%lld\n", PAGE_0_ATTR(catalog_version, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->version)); (unsigned long long)be64_to_cpu(page_0->version));
PAGE_0_ATTR(catalog_len, "%lld\n", PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096); (unsigned long long)be32_to_cpu(page_0->length) * 4096);
static BIN_ATTR_RO(catalog, 0/* real length varies */); static BIN_ATTR_RO(catalog, 0/* real length varies */);
@ -485,13 +498,13 @@ static int hv_24x7_init(void)
struct hv_perf_caps caps; struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) { if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_info("not a virtualized system, not enabling\n"); pr_debug("not a virtualized system, not enabling\n");
return -ENODEV; return -ENODEV;
} }
hret = hv_perf_caps_get(&caps); hret = hv_perf_caps_get(&caps);
if (hret) { if (hret) {
pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n", pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret); hret);
return -ENODEV; return -ENODEV;
} }

View File

@ -78,7 +78,7 @@ static ssize_t kernel_version_show(struct device *dev,
return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
} }
DEVICE_ATTR_RO(kernel_version); static DEVICE_ATTR_RO(kernel_version);
HV_CAPS_ATTR(version, "0x%x\n"); HV_CAPS_ATTR(version, "0x%x\n");
HV_CAPS_ATTR(ga, "%d\n"); HV_CAPS_ATTR(ga, "%d\n");
HV_CAPS_ATTR(expanded, "%d\n"); HV_CAPS_ATTR(expanded, "%d\n");
@ -273,13 +273,13 @@ static int hv_gpci_init(void)
struct hv_perf_caps caps; struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) { if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_info("not a virtualized system, not enabling\n"); pr_debug("not a virtualized system, not enabling\n");
return -ENODEV; return -ENODEV;
} }
hret = hv_perf_caps_get(&caps); hret = hv_perf_caps_get(&caps);
if (hret) { if (hret) {
pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n", pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret); hret);
return -ENODEV; return -ENODEV;
} }

View File

@ -209,89 +209,20 @@ static struct kobj_type dump_ktype = {
.default_attrs = dump_default_attrs, .default_attrs = dump_default_attrs,
}; };
static void free_dump_sg_list(struct opal_sg_list *list) static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
{
struct opal_sg_list *sg1;
while (list) {
sg1 = list->next;
kfree(list);
list = sg1;
}
list = NULL;
}
static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
{
struct opal_sg_list *sg1, *list = NULL;
void *addr;
int64_t size;
addr = dump->buffer;
size = dump->size;
sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1)
goto nomem;
list = sg1;
sg1->num_entries = 0;
while (size > 0) {
/* Translate virtual address to physical address */
sg1->entry[sg1->num_entries].data =
(void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
if (size > PAGE_SIZE)
sg1->entry[sg1->num_entries].length = PAGE_SIZE;
else
sg1->entry[sg1->num_entries].length = size;
sg1->num_entries++;
if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1->next)
goto nomem;
sg1 = sg1->next;
sg1->num_entries = 0;
}
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return list;
nomem:
pr_err("%s : Failed to allocate memory\n", __func__);
free_dump_sg_list(list);
return NULL;
}
static void sglist_to_phy_addr(struct opal_sg_list *list)
{
struct opal_sg_list *sg, *next;
for (sg = list; sg; sg = next) {
next = sg->next;
/* Don't translate NULL pointer for last entry */
if (sg->next)
sg->next = (struct opal_sg_list *)__pa(sg->next);
else
sg->next = NULL;
/* Convert num_entries to length */
sg->num_entries =
sg->num_entries * sizeof(struct opal_sg_entry) + 16;
}
}
static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
{ {
__be32 id, size, type;
int rc; int rc;
*type = 0xffffffff;
rc = opal_dump_info2(id, size, type); type = cpu_to_be32(0xffffffff);
rc = opal_dump_info2(&id, &size, &type);
if (rc == OPAL_PARAMETER) if (rc == OPAL_PARAMETER)
rc = opal_dump_info(id, size); rc = opal_dump_info(&id, &size);
*dump_id = be32_to_cpu(id);
*dump_size = be32_to_cpu(size);
*dump_type = be32_to_cpu(type);
if (rc) if (rc)
pr_warn("%s: Failed to get dump info (%d)\n", pr_warn("%s: Failed to get dump info (%d)\n",
@ -314,15 +245,12 @@ static int64_t dump_read_data(struct dump_obj *dump)
} }
/* Generate SG list */ /* Generate SG list */
list = dump_data_to_sglist(dump); list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
if (!list) { if (!list) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
/* Translate sg list addr to real address */
sglist_to_phy_addr(list);
/* First entry address */ /* First entry address */
addr = __pa(list); addr = __pa(list);
@ -341,7 +269,7 @@ static int64_t dump_read_data(struct dump_obj *dump)
__func__, dump->id); __func__, dump->id);
/* Free SG list */ /* Free SG list */
free_dump_sg_list(list); opal_free_sg_list(list);
out: out:
return rc; return rc;

View File

@ -238,18 +238,25 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
static void elog_work_fn(struct work_struct *work) static void elog_work_fn(struct work_struct *work)
{ {
size_t elog_size; __be64 size;
__be64 id;
__be64 type;
uint64_t elog_size;
uint64_t log_id; uint64_t log_id;
uint64_t elog_type; uint64_t elog_type;
int rc; int rc;
char name[2+16+1]; char name[2+16+1];
rc = opal_get_elog_size(&log_id, &elog_size, &elog_type); rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) { if (rc != OPAL_SUCCESS) {
pr_err("ELOG: Opal log read failed\n"); pr_err("ELOG: Opal log read failed\n");
return; return;
} }
elog_size = be64_to_cpu(size);
log_id = be64_to_cpu(id);
elog_type = be64_to_cpu(type);
BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE) if (elog_size >= OPAL_MAX_ERRLOG_SIZE)

View File

@ -79,9 +79,6 @@
/* XXX: Assume candidate image size is <= 1GB */ /* XXX: Assume candidate image size is <= 1GB */
#define MAX_IMAGE_SIZE 0x40000000 #define MAX_IMAGE_SIZE 0x40000000
/* Flash sg list version */
#define SG_LIST_VERSION (1UL)
/* Image status */ /* Image status */
enum { enum {
IMAGE_INVALID, IMAGE_INVALID,
@ -131,11 +128,15 @@ static DEFINE_MUTEX(image_data_mutex);
*/ */
static inline void opal_flash_validate(void) static inline void opal_flash_validate(void)
{ {
struct validate_flash_t *args_buf = &validate_flash_data; long ret;
void *buf = validate_flash_data.buf;
__be32 size, result;
args_buf->status = opal_validate_flash(__pa(args_buf->buf), ret = opal_validate_flash(__pa(buf), &size, &result);
&(args_buf->buf_size),
&(args_buf->result)); validate_flash_data.status = ret;
validate_flash_data.buf_size = be32_to_cpu(size);
validate_flash_data.result = be32_to_cpu(result);
} }
/* /*
@ -267,94 +268,12 @@ static ssize_t manage_store(struct kobject *kobj,
return count; return count;
} }
/*
* Free sg list
*/
static void free_sg_list(struct opal_sg_list *list)
{
struct opal_sg_list *sg1;
while (list) {
sg1 = list->next;
kfree(list);
list = sg1;
}
list = NULL;
}
/*
* Build candidate image scatter gather list
*
* list format:
* -----------------------------------
* | VER (8) | Entry length in bytes |
* -----------------------------------
* | Pointer to next entry |
* -----------------------------------
* | Address of memory area 1 |
* -----------------------------------
* | Length of memory area 1 |
* -----------------------------------
* | ......... |
* -----------------------------------
* | ......... |
* -----------------------------------
* | Address of memory area N |
* -----------------------------------
* | Length of memory area N |
* -----------------------------------
*/
static struct opal_sg_list *image_data_to_sglist(void)
{
struct opal_sg_list *sg1, *list = NULL;
void *addr;
int size;
addr = image_data.data;
size = image_data.size;
sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1)
return NULL;
list = sg1;
sg1->num_entries = 0;
while (size > 0) {
/* Translate virtual address to physical address */
sg1->entry[sg1->num_entries].data =
(void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
if (size > PAGE_SIZE)
sg1->entry[sg1->num_entries].length = PAGE_SIZE;
else
sg1->entry[sg1->num_entries].length = size;
sg1->num_entries++;
if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1->next) {
pr_err("%s : Failed to allocate memory\n",
__func__);
goto nomem;
}
sg1 = sg1->next;
sg1->num_entries = 0;
}
addr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return list;
nomem:
free_sg_list(list);
return NULL;
}
/* /*
* OPAL update flash * OPAL update flash
*/ */
static int opal_flash_update(int op) static int opal_flash_update(int op)
{ {
struct opal_sg_list *sg, *list, *next; struct opal_sg_list *list;
unsigned long addr; unsigned long addr;
int64_t rc = OPAL_PARAMETER; int64_t rc = OPAL_PARAMETER;
@ -364,30 +283,13 @@ static int opal_flash_update(int op)
goto flash; goto flash;
} }
list = image_data_to_sglist(); list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
if (!list) if (!list)
goto invalid_img; goto invalid_img;
/* First entry address */ /* First entry address */
addr = __pa(list); addr = __pa(list);
/* Translate sg list address to absolute */
for (sg = list; sg; sg = next) {
next = sg->next;
/* Don't translate NULL pointer for last entry */
if (sg->next)
sg->next = (struct opal_sg_list *)__pa(sg->next);
else
sg->next = NULL;
/*
* Convert num_entries to version/length format
* to satisfy OPAL.
*/
sg->num_entries = (SG_LIST_VERSION << 56) |
(sg->num_entries * sizeof(struct opal_sg_entry) + 16);
}
pr_alert("FLASH: Image is %u bytes\n", image_data.size); pr_alert("FLASH: Image is %u bytes\n", image_data.size);
pr_alert("FLASH: Image update requested\n"); pr_alert("FLASH: Image update requested\n");
pr_alert("FLASH: Image will be updated during system reboot\n"); pr_alert("FLASH: Image will be updated during system reboot\n");

View File

@ -39,10 +39,11 @@ struct param_attr {
struct kobj_attribute kobj_attr; struct kobj_attribute kobj_attr;
}; };
static int opal_get_sys_param(u32 param_id, u32 length, void *buffer) static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
{ {
struct opal_msg msg; struct opal_msg msg;
int ret, token; ssize_t ret;
int token;
token = opal_async_get_token_interruptible(); token = opal_async_get_token_interruptible();
if (token < 0) { if (token < 0) {
@ -59,7 +60,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
ret = opal_async_wait_response(token, &msg); ret = opal_async_wait_response(token, &msg);
if (ret) { if (ret) {
pr_err("%s: Failed to wait for the async response, %d\n", pr_err("%s: Failed to wait for the async response, %zd\n",
__func__, ret); __func__, ret);
goto out_token; goto out_token;
} }
@ -111,7 +112,7 @@ static ssize_t sys_param_show(struct kobject *kobj,
{ {
struct param_attr *attr = container_of(kobj_attr, struct param_attr, struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr); kobj_attr);
int ret; ssize_t ret;
mutex_lock(&opal_sysparam_mutex); mutex_lock(&opal_sysparam_mutex);
ret = opal_get_sys_param(attr->param_id, attr->param_size, ret = opal_get_sys_param(attr->param_id, attr->param_size,
@ -121,9 +122,10 @@ static ssize_t sys_param_show(struct kobject *kobj,
memcpy(buf, param_data_buf, attr->param_size); memcpy(buf, param_data_buf, attr->param_size);
ret = attr->param_size;
out: out:
mutex_unlock(&opal_sysparam_mutex); mutex_unlock(&opal_sysparam_mutex);
return ret ? ret : attr->param_size; return ret;
} }
static ssize_t sys_param_store(struct kobject *kobj, static ssize_t sys_param_store(struct kobject *kobj,
@ -131,14 +133,20 @@ static ssize_t sys_param_store(struct kobject *kobj,
{ {
struct param_attr *attr = container_of(kobj_attr, struct param_attr, struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr); kobj_attr);
int ret; ssize_t ret;
/* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
if (count > MAX_PARAM_DATA_LEN)
count = MAX_PARAM_DATA_LEN;
mutex_lock(&opal_sysparam_mutex); mutex_lock(&opal_sysparam_mutex);
memcpy(param_data_buf, buf, count); memcpy(param_data_buf, buf, count);
ret = opal_set_sys_param(attr->param_id, attr->param_size, ret = opal_set_sys_param(attr->param_id, attr->param_size,
param_data_buf); param_data_buf);
mutex_unlock(&opal_sysparam_mutex); mutex_unlock(&opal_sysparam_mutex);
return ret ? ret : count; if (!ret)
ret = count;
return ret;
} }
void __init opal_sys_param_init(void) void __init opal_sys_param_init(void)
@ -214,13 +222,13 @@ void __init opal_sys_param_init(void)
} }
if (of_property_read_u32_array(sysparam, "param-len", size, count)) { if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
pr_err("SYSPARAM: Missing propery param-len in the DT\n"); pr_err("SYSPARAM: Missing property param-len in the DT\n");
goto out_free_perm; goto out_free_perm;
} }
if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) { if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
pr_err("SYSPARAM: Missing propery param-perm in the DT\n"); pr_err("SYSPARAM: Missing property param-perm in the DT\n");
goto out_free_perm; goto out_free_perm;
} }
@ -233,6 +241,12 @@ void __init opal_sys_param_init(void)
/* For each of the parameters, populate the parameter attributes */ /* For each of the parameters, populate the parameter attributes */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (size[i] > MAX_PARAM_DATA_LEN) {
pr_warn("SYSPARAM: Not creating parameter %d as size "
"exceeds buffer length\n", i);
continue;
}
sysfs_attr_init(&attr[i].kobj_attr.attr); sysfs_attr_init(&attr[i].kobj_attr.attr);
attr[i].param_id = id[i]; attr[i].param_id = id[i];
attr[i].param_size = size[i]; attr[i].param_size = size[i];

View File

@ -242,14 +242,14 @@ void opal_notifier_update_evt(uint64_t evt_mask,
void opal_notifier_enable(void) void opal_notifier_enable(void)
{ {
int64_t rc; int64_t rc;
uint64_t evt = 0; __be64 evt = 0;
atomic_set(&opal_notifier_hold, 0); atomic_set(&opal_notifier_hold, 0);
/* Process pending events */ /* Process pending events */
rc = opal_poll_events(&evt); rc = opal_poll_events(&evt);
if (rc == OPAL_SUCCESS && evt) if (rc == OPAL_SUCCESS && evt)
opal_do_notifier(evt); opal_do_notifier(be64_to_cpu(evt));
} }
void opal_notifier_disable(void) void opal_notifier_disable(void)
@ -529,7 +529,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
opal_handle_interrupt(virq_to_hw(irq), &events); opal_handle_interrupt(virq_to_hw(irq), &events);
opal_do_notifier(events); opal_do_notifier(be64_to_cpu(events));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -638,3 +638,66 @@ void opal_shutdown(void)
/* Export this so that test modules can use it */ /* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call); EXPORT_SYMBOL_GPL(opal_invalid_call);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size)
{
struct opal_sg_list *sg, *first = NULL;
unsigned long i = 0;
sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg)
goto nomem;
first = sg;
while (vmalloc_size > 0) {
uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
uint64_t length = min(vmalloc_size, PAGE_SIZE);
sg->entry[i].data = cpu_to_be64(data);
sg->entry[i].length = cpu_to_be64(length);
i++;
if (i >= SG_ENTRIES_PER_NODE) {
struct opal_sg_list *next;
next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!next)
goto nomem;
sg->length = cpu_to_be64(
i * sizeof(struct opal_sg_entry) + 16);
i = 0;
sg->next = cpu_to_be64(__pa(next));
sg = next;
}
vmalloc_addr += length;
vmalloc_size -= length;
}
sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
return first;
nomem:
pr_err("%s : Failed to allocate memory\n", __func__);
opal_free_sg_list(first);
return NULL;
}
void opal_free_sg_list(struct opal_sg_list *sg)
{
while (sg) {
uint64_t next = be64_to_cpu(sg->next);
kfree(sg);
if (next)
sg = __va(next);
else
sg = NULL;
}
}

View File

@ -343,7 +343,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev)); pci_name(dev));
continue; continue;
} }
pci_dev_get(dev);
pdn->pcidev = dev; pdn->pcidev = dev;
pdn->pe_number = pe->pe_number; pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev); pe->dma_weight += pnv_ioda_dma_weight(dev);
@ -462,7 +461,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number]; pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); set_iommu_table_base(&pdev->dev, &pe->tce32_table);
} }
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,

View File

@ -162,18 +162,62 @@ static void pnv_shutdown(void)
} }
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
static void pnv_kexec_wait_secondaries_down(void)
{
int my_cpu, i, notified = -1;
my_cpu = get_cpu();
for_each_online_cpu(i) {
uint8_t status;
int64_t rc;
if (i == my_cpu)
continue;
for (;;) {
rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
&status);
if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
break;
barrier();
if (i != notified) {
printk(KERN_INFO "kexec: waiting for cpu %d "
"(physical %d) to enter OPAL\n",
i, paca[i].hw_cpu_id);
notified = i;
}
}
}
}
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{ {
xics_kexec_teardown_cpu(secondary); xics_kexec_teardown_cpu(secondary);
/* Return secondary CPUs to firmware on OPAL v3 */ /* On OPAL v3, we return all CPUs to firmware */
if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
if (!firmware_has_feature(FW_FEATURE_OPALv3))
return;
if (secondary) {
/* Return secondary CPUs to firmware on OPAL v3 */
mb(); mb();
get_paca()->kexec_state = KEXEC_STATE_REAL_MODE; get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
mb(); mb();
/* Return the CPU to OPAL */ /* Return the CPU to OPAL */
opal_return_cpu(); opal_return_cpu();
} else if (crash_shutdown) {
/*
* On crash, we don't wait for secondaries to go
* down as they might be unreachable or hung, so
* instead we just wait a bit and move on.
*/
mdelay(1);
} else {
/* Primary waits for the secondaries to have reached OPAL */
pnv_kexec_wait_secondaries_down();
} }
} }
#endif /* CONFIG_KEXEC */ #endif /* CONFIG_KEXEC */

View File

@ -30,6 +30,7 @@
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/runlatch.h>
#include "powernv.h" #include "powernv.h"
@ -156,7 +157,9 @@ static void pnv_smp_cpu_kill_self(void)
*/ */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) { while (!generic_check_cpu_restart(cpu)) {
ppc64_runlatch_off();
power7_nap(); power7_nap();
ppc64_runlatch_on();
if (!generic_check_cpu_restart(cpu)) { if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu); DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable /* We may be getting an IPI, so we re-enable

View File

@ -88,13 +88,14 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void) static void rtas_stop_self(void)
{ {
struct rtas_args args = { static struct rtas_args args = {
.token = cpu_to_be32(rtas_stop_self_token),
.nargs = 0, .nargs = 0,
.nret = 1, .nret = 1,
.rets = &args.args[0], .rets = &args.args[0],
}; };
args.token = cpu_to_be32(rtas_stop_self_token);
local_irq_disable(); local_irq_disable();
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);

View File

@ -100,10 +100,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
start_pfn = base >> PAGE_SHIFT; start_pfn = base >> PAGE_SHIFT;
if (!pfn_valid(start_pfn)) { lock_device_hotplug();
memblock_remove(base, memblock_size);
return 0; if (!pfn_valid(start_pfn))
} goto out;
block_sz = memory_block_size_bytes(); block_sz = memory_block_size_bytes();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
@ -114,8 +114,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
base += MIN_MEMORY_BLOCK_SIZE; base += MIN_MEMORY_BLOCK_SIZE;
} }
out:
/* Update memory regions for memory remove */ /* Update memory regions for memory remove */
memblock_remove(base, memblock_size); memblock_remove(base, memblock_size);
unlock_device_hotplug();
return 0; return 0;
} }

View File

@ -1058,7 +1058,7 @@ static int __init apm821xx_pciex_core_init(struct device_node *np)
return 1; return 1;
} }
static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{ {
u32 val; u32 val;

View File

@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
case BPF_S_LD_W_IND: case BPF_S_LD_W_IND:
case BPF_S_LD_H_IND: case BPF_S_LD_H_IND:
case BPF_S_LD_B_IND: case BPF_S_LD_B_IND:
case BPF_S_LDX_B_MSH:
case BPF_S_LD_IMM: case BPF_S_LD_IMM:
case BPF_S_LD_MEM: case BPF_S_LD_MEM:
case BPF_S_MISC_TXA: case BPF_S_MISC_TXA:

View File

@ -83,7 +83,9 @@ else
KBUILD_CFLAGS += -m64 KBUILD_CFLAGS += -m64
# Don't autogenerate traditional x87, MMX or SSE instructions # Don't autogenerate traditional x87, MMX or SSE instructions
KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387 KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-mno-80387)
KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
# Use -mpreferred-stack-boundary=3 if supported. # Use -mpreferred-stack-boundary=3 if supported.
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3) KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)

View File

@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void)
return nr_irqs_gsi; return nr_irqs_gsi;
} }
unsigned int arch_dynirq_lower_bound(unsigned int from)
{
return from < nr_irqs_gsi ? nr_irqs_gsi : from;
}
int __init arch_probe_nr_irqs(void) int __init arch_probe_nr_irqs(void)
{ {
int nr; int nr;

View File

@ -543,7 +543,8 @@ static int rapl_cpu_prepare(int cpu)
if (phys_id < 0) if (phys_id < 0)
return -1; return -1;
if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) /* protect rdmsrl() to handle virtualization */
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
return -1; return -1;
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));

View File

@ -26,6 +26,9 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10 #define TOPOLOGY_REGISTER_OFFSET 0x10
/* Flag below is initialized once during vSMP PCI initialization. */
static int irq_routing_comply = 1;
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
/* /*
* Interrupt control on vSMPowered systems: * Interrupt control on vSMPowered systems:
@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cap & ctl & BIT(8)) { if (cap & ctl & BIT(8)) {
ctl &= ~BIT(8); ctl &= ~BIT(8);
/* Interrupt routing set to ignore */
irq_routing_comply = 0;
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* Don't let users change irq affinity via procfs */ /* Don't let users change irq affinity via procfs */
no_irq_affinity = 1; no_irq_affinity = 1;
@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void)
{ {
/* need to update phys_pkg_id */ /* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id; apic->phys_pkg_id = apicid_phys_pkg_id;
apic->vector_allocation_domain = fill_vector_allocation_domain;
if (!irq_routing_comply)
apic->vector_allocation_domain = fill_vector_allocation_domain;
} }
void __init vsmp_init(void) void __init vsmp_init(void)

View File

@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
[number##_HIGH] = VMCS12_OFFSET(name)+4 [number##_HIGH] = VMCS12_OFFSET(name)+4
static const unsigned long shadow_read_only_fields[] = { static unsigned long shadow_read_only_fields[] = {
/* /*
* We do NOT shadow fields that are modified when L0 * We do NOT shadow fields that are modified when L0
* traps and emulates any vmx instruction (e.g. VMPTRLD, * traps and emulates any vmx instruction (e.g. VMPTRLD,
@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
GUEST_LINEAR_ADDRESS, GUEST_LINEAR_ADDRESS,
GUEST_PHYSICAL_ADDRESS GUEST_PHYSICAL_ADDRESS
}; };
static const int max_shadow_read_only_fields = static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields); ARRAY_SIZE(shadow_read_only_fields);
static const unsigned long shadow_read_write_fields[] = { static unsigned long shadow_read_write_fields[] = {
GUEST_RIP, GUEST_RIP,
GUEST_RSP, GUEST_RSP,
GUEST_CR0, GUEST_CR0,
@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
HOST_FS_SELECTOR, HOST_FS_SELECTOR,
HOST_GS_SELECTOR HOST_GS_SELECTOR
}; };
static const int max_shadow_read_write_fields = static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields); ARRAY_SIZE(shadow_read_write_fields);
static const unsigned short vmcs_field_to_offset_table[] = { static const unsigned short vmcs_field_to_offset_table[] = {
@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
} }
} }
static void init_vmcs_shadow_fields(void)
{
int i, j;
/* No checks for read only fields yet */
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
if (!vmx_mpx_supported())
continue;
break;
default:
break;
}
if (j < i)
shadow_read_write_fields[j] =
shadow_read_write_fields[i];
j++;
}
max_shadow_read_write_fields = j;
/* shadowed fields guest access without vmexit */
for (i = 0; i < max_shadow_read_write_fields; i++) {
clear_bit(shadow_read_write_fields[i],
vmx_vmwrite_bitmap);
clear_bit(shadow_read_write_fields[i],
vmx_vmread_bitmap);
}
for (i = 0; i < max_shadow_read_only_fields; i++)
clear_bit(shadow_read_only_fields[i],
vmx_vmread_bitmap);
}
static __init int alloc_kvm_area(void) static __init int alloc_kvm_area(void)
{ {
int cpu; int cpu;
@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
enable_vpid = 0; enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs()) if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0; enable_shadow_vmcs = 0;
if (enable_shadow_vmcs)
init_vmcs_shadow_fields();
if (!cpu_has_vmx_ept() || if (!cpu_has_vmx_ept() ||
!cpu_has_vmx_ept_4levels()) { !cpu_has_vmx_ept_4levels()) {
@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
/* shadowed read/write fields */
for (i = 0; i < max_shadow_read_write_fields; i++) {
clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
}
/* shadowed read only fields */
for (i = 0; i < max_shadow_read_only_fields; i++)
clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
/* /*
* Allow direct access to the PC debug port (it is often used for I/O * Allow direct access to the PC debug port (it is often used for I/O

View File

@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status; acpi_status status;
int ret; int ret;
if (pr->apic_id == -1)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
return -ENODEV; return -ENODEV;
@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
} }
apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
if (apic_id < 0) { if (apic_id < 0)
acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
return -ENODEV;
}
pr->apic_id = apic_id; pr->apic_id = apic_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);

View File

@ -206,13 +206,13 @@ unlock:
spin_unlock_irqrestore(&ec->lock, flags); spin_unlock_irqrestore(&ec->lock, flags);
} }
static int acpi_ec_sync_query(struct acpi_ec *ec); static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{ {
if (state & ACPI_EC_FLAG_SCI) { if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
return acpi_ec_sync_query(ec); return acpi_ec_sync_query(ec, NULL);
} }
return 0; return 0;
} }
@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
EXPORT_SYMBOL(ec_get_handle); EXPORT_SYMBOL(ec_get_handle);
static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
/* /*
* Clears stale _Q events that might have accumulated in the EC. * Process _Q events that might have accumulated in the EC.
* Run with locked ec mutex. * Run with locked ec mutex.
*/ */
static void acpi_ec_clear(struct acpi_ec *ec) static void acpi_ec_clear(struct acpi_ec *ec)
@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
u8 value = 0; u8 value = 0;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
status = acpi_ec_query_unlocked(ec, &value); status = acpi_ec_sync_query(ec, &value);
if (status || !value) if (status || !value)
break; break;
} }
@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
kfree(handler); kfree(handler);
} }
static int acpi_ec_sync_query(struct acpi_ec *ec) static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
{ {
u8 value = 0; u8 value = 0;
int status; int status;
struct acpi_ec_query_handler *handler, *copy; struct acpi_ec_query_handler *handler, *copy;
if ((status = acpi_ec_query_unlocked(ec, &value)))
status = acpi_ec_query_unlocked(ec, &value);
if (data)
*data = value;
if (status)
return status; return status;
list_for_each_entry(handler, &ec->list, node) { list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) { if (value == handler->query_bit) {
/* have custom handler for this bit */ /* have custom handler for this bit */
@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
if (!ec) if (!ec)
return; return;
mutex_lock(&ec->mutex); mutex_lock(&ec->mutex);
acpi_ec_sync_query(ec); acpi_ec_sync_query(ec, NULL);
mutex_unlock(&ec->mutex); mutex_unlock(&ec->mutex);
} }

View File

@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list); static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq; static struct workqueue_struct *deferred_wq;
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/** /**
* deferred_probe_work_func() - Retry probing devices in the active list. * deferred_probe_work_func() - Retry probing devices in the active list.
@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
* This functions moves all devices from the pending list to the active * This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It * list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device. * should be called anytime a driver is successfully bound to a device.
*
* Note, there is a race condition in multi-threaded probe. In the case where
* more than one device is probing at the same time, it is possible for one
* probe to complete successfully while another is about to defer. If the second
* depends on the first, then it will get put on the pending list after the
* trigger event has already occured and will be stuck there.
*
* The atomic 'deferred_trigger_count' is used to determine if a successful
* trigger has occurred in the midst of probing a driver. If the trigger count
* changes in the midst of a probe, then deferred processing should be triggered
* again.
*/ */
static void driver_deferred_probe_trigger(void) static void driver_deferred_probe_trigger(void)
{ {
@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
* into the active list so they can be retried by the workqueue * into the active list so they can be retried by the workqueue
*/ */
mutex_lock(&deferred_probe_mutex); mutex_lock(&deferred_probe_mutex);
atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list, list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list); &deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex); mutex_unlock(&deferred_probe_mutex);
@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv) static int really_probe(struct device *dev, struct device_driver *drv)
{ {
int ret = 0; int ret = 0;
int local_trigger_count = atomic_read(&deferred_trigger_count);
atomic_inc(&probe_count); atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n", pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@ -310,6 +324,9 @@ probe_failed:
/* Driver requested deferred probing */ /* Driver requested deferred probing */
dev_info(dev, "Driver %s requests probe deferral\n", drv->name); dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
driver_deferred_probe_add(dev); driver_deferred_probe_add(dev);
/* Did a trigger occur while probing? Need to re-trigger if yes */
if (local_trigger_count != atomic_read(&deferred_trigger_count))
driver_deferred_probe_trigger();
} else if (ret != -ENODEV && ret != -ENXIO) { } else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */ /* driver matched but the probe failed */
printk(KERN_WARNING printk(KERN_WARNING

View File

@ -13,6 +13,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
return -ENXIO; return -ENXIO;
return dev->archdata.irqs[num]; return dev->archdata.irqs[num];
#else #else
struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); struct resource *r;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
return of_irq_get(dev->dev.of_node, num);
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO; return r ? r->start : -ENXIO;
#endif #endif

View File

@ -100,6 +100,8 @@ void __init vexpress_osc_of_setup(struct device_node *node)
struct clk *clk; struct clk *clk;
u32 range[2]; u32 range[2];
vexpress_sysreg_of_early_init();
osc = kzalloc(sizeof(*osc), GFP_KERNEL); osc = kzalloc(sizeof(*osc), GFP_KERNEL);
if (!osc) if (!osc)
return; return;

View File

@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt; static struct clock_event_device __percpu *arch_timer_evt;
static bool arch_timer_use_virtual = true; static bool arch_timer_use_virtual = true;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual; static bool arch_timer_mem_use_virtual;
/* /*
@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT; clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_CP15_TIMER) { if (type == ARCH_CP15_TIMER) {
clk->features |= CLOCK_EVT_FEAT_C3STOP; if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer"; clk->name = "arch_sys_timer";
clk->rating = 450; clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id()); clk->cpumask = cpumask_of(smp_processor_id());
@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
} }
} }
arch_timer_c3stop = !of_property_read_bool(np, "always-on");
arch_timer_register(); arch_timer_register();
arch_timer_common_init(); arch_timer_common_init();
} }

View File

@ -212,4 +212,9 @@ error_free:
return ret; return ret;
} }
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); static void __init zevio_timer_init(struct device_node *node)
{
BUG_ON(zevio_timer_add(node));
}
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);

View File

@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
* Sets a new clock ratio. * Sets a new clock ratio.
*/ */
static void longhaul_setstate(struct cpufreq_policy *policy, static int longhaul_setstate(struct cpufreq_policy *policy,
unsigned int table_index) unsigned int table_index)
{ {
unsigned int mults_index; unsigned int mults_index;
@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
/* Safety precautions */ /* Safety precautions */
mult = mults[mults_index & 0x1f]; mult = mults[mults_index & 0x1f];
if (mult == -1) if (mult == -1)
return; return -EINVAL;
speed = calc_speed(mult); speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed)) if ((speed > highest_speed) || (speed < lowest_speed))
return; return -EINVAL;
/* Voltage transition before frequency transition? */ /* Voltage transition before frequency transition? */
if (can_scale_voltage && longhaul_index < table_index) if (can_scale_voltage && longhaul_index < table_index)
dir = 1; dir = 1;
@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed; freqs.new = speed;
cpufreq_freq_transition_begin(policy, &freqs);
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000)); fsb, mult/10, mult%10, print_speed(speed/1000));
retry_loop: retry_loop:
@ -385,12 +385,14 @@ retry_loop:
goto retry_loop; goto retry_loop;
} }
} }
/* Report true CPU frequency */
cpufreq_freq_transition_end(policy, &freqs, 0);
if (!bm_timeout) if (!bm_timeout) {
printk(KERN_INFO PFX "Warning: Timeout while waiting for " printk(KERN_INFO PFX "Warning: Timeout while waiting for "
"idle PCI bus.\n"); "idle PCI bus.\n");
return -EBUSY;
}
return 0;
} }
/* /*
@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
unsigned int i; unsigned int i;
unsigned int dir = 0; unsigned int dir = 0;
u8 vid, current_vid; u8 vid, current_vid;
int retval = 0;
if (!can_scale_voltage) if (!can_scale_voltage)
longhaul_setstate(policy, table_index); retval = longhaul_setstate(policy, table_index);
else { else {
/* On test system voltage transitions exceeding single /* On test system voltage transitions exceeding single
* step up or down were turning motherboard off. Both * step up or down were turning motherboard off. Both
@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
while (i != table_index) { while (i != table_index) {
vid = (longhaul_table[i].driver_data >> 8) & 0x1f; vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
if (vid != current_vid) { if (vid != current_vid) {
longhaul_setstate(policy, i); retval = longhaul_setstate(policy, i);
current_vid = vid; current_vid = vid;
msleep(200); msleep(200);
} }
@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
else else
i--; i--;
} }
longhaul_setstate(policy, table_index); retval = longhaul_setstate(policy, table_index);
} }
longhaul_index = table_index; longhaul_index = table_index;
return 0; return retval;
} }
@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
for (i = 0; i < numscales; i++) { for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) { if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
freqs.old = policy->cur;
freqs.new = longhaul_table[i].frequency;
freqs.flags = 0;
cpufreq_freq_transition_begin(policy, &freqs);
longhaul_setstate(policy, i); longhaul_setstate(policy, i);
cpufreq_freq_transition_end(policy, &freqs, 0);
break; break;
} }
} }

View File

@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
static int powernow_k6_target(struct cpufreq_policy *policy, static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i) unsigned int best_i)
{ {
struct cpufreq_freqs freqs;
if (clock_ratio[best_i].driver_data > max_multiplier) { if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n"); printk(KERN_ERR PFX "invalid target frequency\n");
return -EINVAL; return -EINVAL;
} }
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
freqs.new = busfreq * clock_ratio[best_i].driver_data;
cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_set_cpu_multiplier(best_i); powernow_k6_set_cpu_multiplier(best_i);
cpufreq_freq_transition_end(policy, &freqs, 0);
return 0; return 0;
} }
@ -227,9 +219,20 @@ have_busfreq:
static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < 8; i++) {
if (i == max_multiplier) for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
if (clock_ratio[i].driver_data == max_multiplier) {
struct cpufreq_freqs freqs;
freqs.old = policy->cur;
freqs.new = clock_ratio[i].frequency;
freqs.flags = 0;
cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i); powernow_k6_target(policy, i);
cpufreq_freq_transition_end(policy, &freqs, 0);
break;
}
} }
return 0; return 0;
} }

View File

@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
freqs.new = powernow_table[index].frequency; freqs.new = powernow_table[index].frequency;
cpufreq_freq_transition_begin(policy, &freqs);
/* Now do the magic poking into the MSRs. */ /* Now do the magic poking into the MSRs. */
if (have_a0 == 1) /* A0 errata 5 */ if (have_a0 == 1) /* A0 errata 5 */
@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
if (have_a0 == 1) if (have_a0 == 1)
local_irq_enable(); local_irq_enable();
cpufreq_freq_transition_end(policy, &freqs, 0);
return 0; return 0;
} }

View File

@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct cpufreq_frequency_table *table; struct cpufreq_frequency_table *table;
struct cpu_data *data; struct cpu_data *data;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
u64 transition_latency_hz;
np = of_get_cpu_node(cpu, NULL); np = of_get_cpu_node(cpu, NULL);
if (!np) if (!np)
@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
for_each_cpu(i, per_cpu(cpu_mask, cpu)) for_each_cpu(i, per_cpu(cpu_mask, cpu))
per_cpu(cpu_data, i) = data; per_cpu(cpu_data, i) = data;
transition_latency_hz = 12ULL * NSEC_PER_SEC;
policy->cpuinfo.transition_latency = policy->cpuinfo.transition_latency =
(12ULL * NSEC_PER_SEC) / fsl_get_sys_freq(); do_div(transition_latency_hz, fsl_get_sys_freq());
of_node_put(np); of_node_put(np);
return 0; return 0;

View File

@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
plane->crtc = crtc; plane->crtc = crtc;
plane->fb = crtc->primary->fb; plane->fb = crtc->primary->fb;
drm_framebuffer_reference(plane->fb);
return 0; return 0;
} }

View File

@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
buffer->sgt = sgt; buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach; exynos_gem_obj->base.import_attach = attach;
DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
buffer->size); buffer->size);
return &exynos_gem_obj->base; return &exynos_gem_obj->base;

View File

@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (!dsi->reg_base) { if (IS_ERR(dsi->reg_base)) {
dev_err(&pdev->dev, "failed to remap io region\n"); dev_err(&pdev->dev, "failed to remap io region\n");
return -EADDRNOTAVAIL; return PTR_ERR(dsi->reg_base);
} }
dsi->phy = devm_phy_get(&pdev->dev, "dsim"); dsi->phy = devm_phy_get(&pdev->dev, "dsim");

View File

@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
win_data->enabled = true; win_data->enabled = true;
DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
if (ctx->vblank_on) if (ctx->vblank_on)
schedule_work(&ctx->work); schedule_work(&ctx->work);

View File

@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
/* Full ppgtt disabled by default for now due to issues. */ /* Full ppgtt disabled by default for now due to issues. */
if (full) if (full)
return false; /* HAS_PPGTT(dev) */ return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
else else
return HAS_ALIASING_PPGTT(dev); return HAS_ALIASING_PPGTT(dev);
} }

View File

@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) { for (i = 1; i < HPD_NUM_PINS; i++) {
WARN_ONCE(hpd[i] & hotplug_trigger && if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", /*
hotplug_trigger, i, hpd[i]); * On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
hotplug_trigger, i, hpd[i]);
continue;
}
if (!(hpd[i] & hotplug_trigger) || if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)

View File

@ -827,6 +827,7 @@ enum punit_power_well {
# define MI_FLUSH_ENABLE (1 << 12) # define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14) # define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9) # define MODE_IDLE (1 << 9)
# define STOP_RING (1 << 8)
#define GEN6_GT_MODE 0x20d0 #define GEN6_GT_MODE 0x20d0
#define GEN7_GT_MODE 0x7008 #define GEN7_GT_MODE 0x7008

View File

@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pipe_src_w); PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h); PIPE_CONF_CHECK_I(pipe_src_h);
PIPE_CONF_CHECK_I(gmch_pfit.control); /*
/* pfit ratios are autocomputed by the hw on gen4+ */ * FIXME: BIOS likes to set up a cloned config with lvds+external
if (INTEL_INFO(dev)->gen < 4) * screen. Since we don't yet re-compute the pipe config when moving
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); * just the lvds port away to another pipe the sw tracking won't match.
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); *
* Proper atomic modesets with recomputed global state will fix this.
* Until then just don't check gmch state for inherited modes.
*/
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
}
PIPE_CONF_CHECK_I(pch_pfit.enabled); PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) { if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos); PIPE_CONF_CHECK_I(pch_pfit.pos);
@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
base.head) { base.head) {
memset(&crtc->config, 0, sizeof(crtc->config)); memset(&crtc->config, 0, sizeof(crtc->config));
crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
crtc->active = dev_priv->display.get_pipe_config(crtc, crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config); &crtc->config);

View File

@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
{ {
struct drm_connector *connector = &intel_connector->base; struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev; struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *fixed_mode = NULL;
bool has_dpcd; bool has_dpcd;
@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp)) if (!is_edp(intel_dp))
return true; return true;
/* The VDD bit needs a power domain reference, so if the bit is already
* enabled when we boot, grab this reference. */
if (edp_have_panel_vdd(intel_dp)) {
enum intel_display_power_domain power_domain;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
}
/* Cache DPCD and EDID for edp. */ /* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp); has_dpcd = intel_dp_get_dpcd(intel_dp);

View File

@ -236,7 +236,8 @@ struct intel_crtc_config {
* tracked with quirk flags so that fastboot and state checker can act * tracked with quirk flags so that fastboot and state checker can act
* accordingly. * accordingly.
*/ */
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks; unsigned long quirks;
/* User requested mode, only valid as a starting point to /* User requested mode, only valid as a starting point to

View File

@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
intel_fb->base.width, intel_fb->base.height,
sizes->fb_width, sizes->fb_height);
drm_framebuffer_unreference(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || WARN_ON(!intel_fb->obj)) { if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes); ret = intelfb_alloc(helper, sizes);

View File

@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
} }
} }
static int hdmi_portclock_limit(struct intel_hdmi *hdmi) static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{ {
struct drm_device *dev = intel_hdmi_to_dev(hdmi); struct drm_device *dev = intel_hdmi_to_dev(hdmi);
if (!hdmi->has_hdmi_sink || IS_G4X(dev)) if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
return 165000; return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000; return 300000;
@ -837,7 +837,8 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector, intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
true))
return MODE_CLOCK_HIGH; return MODE_CLOCK_HIGH;
if (mode->clock < 20000) if (mode->clock < 20000)
return MODE_CLOCK_LOW; return MODE_CLOCK_LOW;
@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi); int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp; int desired_bpp;
if (intel_hdmi->color_range_auto) { if (intel_hdmi->color_range_auto) {

View File

@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
I915_WRITE(HWS_PGA, addr); I915_WRITE(HWS_PGA, addr);
} }
static bool stop_ring(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
if (!IS_GEN2(ring->dev)) {
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
return false;
}
}
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
if (!IS_GEN2(ring->dev)) {
(void)I915_READ_CTL(ring);
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
}
static int init_ring_common(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj; struct drm_i915_gem_object *obj = ring->obj;
int ret = 0; int ret = 0;
u32 head;
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
/* Stop the ring if it's running. */ if (!stop_ring(ring)) {
I915_WRITE_CTL(ring, 0); /* G45 ring initialization often fails to reset head to zero */
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
DRM_DEBUG_KMS("%s head not reset to zero " DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n", "ctl %08x head %08x tail %08x start %08x\n",
ring->name, ring->name,
@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_TAIL(ring), I915_READ_TAIL(ring),
I915_READ_START(ring)); I915_READ_START(ring));
I915_WRITE_HEAD(ring, 0); if (!stop_ring(ring)) {
if (I915_READ_HEAD(ring) & HEAD_ADDR) {
DRM_ERROR("failed to set %s head to zero " DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", "ctl %08x head %08x tail %08x start %08x\n",
ring->name, ring->name,
@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring), I915_READ_HEAD(ring),
I915_READ_TAIL(ring), I915_READ_TAIL(ring),
I915_READ_START(ring)); I915_READ_START(ring));
ret = -EIO;
goto out;
} }
} }
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
/* Initialize the ring. This must happen _after_ we've cleared the ring /* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers * registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring * also enforces ordering), otherwise the hw might lose the new ring

View File

@ -34,6 +34,7 @@ struct intel_hw_status_page {
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
enum intel_ring_hangcheck_action { enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0, HANGCHECK_IDLE = 0,

View File

@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc)
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else { } else {
/* disable cursor: */ /* disable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), mdp4_kms->blank_cursor_iova);
MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
} }
/* and drop the iova ref + obj rev when done scanning out: */ /* and drop the iova ref + obj rev when done scanning out: */
@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
if (old_bo) { if (old_bo) {
/* drop our previous reference: */ /* drop our previous reference: */
msm_gem_put_iova(old_bo, mdp4_kms->id); drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
drm_gem_object_unreference_unlocked(old_bo);
} }
crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR); request_pending(crtc, PENDING_CURSOR);
return 0; return 0;

View File

@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
VERB("status=%08x", status); VERB("status=%08x", status);
mdp_dispatch_irqs(mdp_kms, status);
for (id = 0; id < priv->num_crtcs; id++) for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp4_crtc_vblank(priv->crtcs[id])) if (status & mdp4_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id); drm_handle_vblank(dev, id);
mdp_dispatch_irqs(mdp_kms, status);
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp4_destroy(struct msm_kms *kms) static void mdp4_destroy(struct msm_kms *kms)
{ {
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
kfree(mdp4_kms); kfree(mdp4_kms);
} }
@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
mutex_lock(&dev->struct_mutex);
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
mdp4_kms->blank_cursor_bo = NULL;
goto fail;
}
ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
goto fail;
}
return kms; return kms;
fail: fail:

View File

@ -44,6 +44,10 @@ struct mdp4_kms {
struct clk *lut_clk; struct clk *lut_clk;
struct mdp_irq error_handler; struct mdp_irq error_handler;
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
uint32_t blank_cursor_iova;
}; };
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)

View File

@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
VERB("status=%08x", status); VERB("status=%08x", status);
mdp_dispatch_irqs(mdp_kms, status);
for (id = 0; id < priv->num_crtcs; id++) for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id])) if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id); drm_handle_vblank(dev, id);
mdp_dispatch_irqs(mdp_kms, status);
} }
irqreturn_t mdp5_irq(struct msm_kms *kms) irqreturn_t mdp5_irq(struct msm_kms *kms)

View File

@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dma_addr_t paddr; dma_addr_t paddr;
int ret, size; int ret, size;
/* only doing ARGB32 since this is what is needed to alpha-blend
* with video overlays:
*/
sizes->surface_bpp = 32; sizes->surface_bpp = 32;
sizes->surface_depth = 32; sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp, sizes->surface_height, sizes->surface_bpp,

View File

@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj)
if (iommu_present(&platform_bus_type)) if (iommu_present(&platform_bus_type))
drm_gem_put_pages(obj, msm_obj->pages, true, false); drm_gem_put_pages(obj, msm_obj->pages, true, false);
else else {
drm_mm_remove_node(msm_obj->vram_node); drm_mm_remove_node(msm_obj->vram_node);
drm_free_large(msm_obj->pages);
}
msm_obj->pages = NULL; msm_obj->pages = NULL;
} }

View File

@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
struct drm_device *drm = crtc->dev; struct drm_device *drm = crtc->dev;
struct drm_plane *plane; struct drm_plane *plane;
list_for_each_entry(plane, &drm->mode_config.plane_list, head) { drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
if (plane->crtc == crtc) { if (plane->crtc == crtc) {
tegra_plane_disable(plane); tegra_plane_disable(plane);
plane->crtc = NULL; plane->crtc = NULL;

View File

@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma; SVGA3dCmdSurfaceDMA dma;
} *cmd; } *cmd;
int ret; int ret;
SVGA3dCmdSurfaceDMASuffix *suffix;
uint32_t bo_size;
cmd = container_of(header, struct vmw_dma_cmd, header); cmd = container_of(header, struct vmw_dma_cmd, header);
suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
header->size - sizeof(*suffix));
/* Make sure device and verifier stays in sync. */
if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
DRM_ERROR("Invalid DMA suffix size.\n");
return -EINVAL;
}
ret = vmw_translate_guest_ptr(dev_priv, sw_context, ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr, &cmd->dma.guest.ptr,
&vmw_bo); &vmw_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
/* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
DRM_ERROR("Invalid DMA offset.\n");
return -EINVAL;
}
bo_size -= cmd->dma.guest.ptr.offset;
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid, user_surface_converter, &cmd->dma.host.sid,
NULL); NULL);

View File

@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
if (cpu_has_tjmax(c)) if (cpu_has_tjmax(c))
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else { } else {
val = (eax >> 16) & 0x7f; val = (eax >> 16) & 0xff;
/* /*
* If the TjMax is not plausible, an assumption * If the TjMax is not plausible, an assumption
* will be used * will be used
*/ */
if (val >= 85) { if (val) {
dev_dbg(dev, "TjMax is %d degrees C\n", val); dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000; return val * 1000;
} }

View File

@ -1,10 +1,10 @@
config INFINIBAND_CXGB4 config INFINIBAND_CXGB4
tristate "Chelsio T4 RDMA Driver" tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
This is an iWARP/RDMA driver for the Chelsio T4 1GbE and This is an iWARP/RDMA driver for the Chelsio T4 and T5
10GbE adapters. 1GbE, 10GbE adapters and T5 40GbE adapter.
For general information about Chelsio and our products, visit For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>. our website at <http://www.chelsio.com>.

View File

@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= SACK_EN(1); opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling) if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1); opt2 |= WND_SCALE_EN(1);
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
}
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{ {
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
state_set(&ep->com, ABORTING); __state_set(&ep->com, ABORTING);
set_bit(ABORT_CONN, &ep->com.history); set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp); return send_abort(ep, skb, gfp);
} }
@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return credits; return credits;
} }
static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{ {
struct mpa_message *mpa; struct mpa_message *mpa;
struct mpa_v2_conn_params *mpa_v2_params; struct mpa_v2_conn_params *mpa_v2_params;
@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
enum c4iw_qp_attr_mask mask; enum c4iw_qp_attr_mask mask;
int err; int err;
int disconnect = 0;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* will abort the connection. * will abort the connection.
*/ */
if (stop_ep_timer(ep)) if (stop_ep_timer(ep))
return; return 0;
/* /*
* If we get more than the supported amount of private data * If we get more than the supported amount of private data
@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* if we don't even have the mpa message, then bail. * if we don't even have the mpa message, then bail.
*/ */
if (ep->mpa_pkt_len < sizeof(*mpa)) if (ep->mpa_pkt_len < sizeof(*mpa))
return; return 0;
mpa = (struct mpa_message *) ep->mpa_pkt; mpa = (struct mpa_message *) ep->mpa_pkt;
/* Validate MPA header. */ /* Validate MPA header. */
@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* We'll continue process when more data arrives. * We'll continue process when more data arrives.
*/ */
if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
return; return 0;
if (mpa->flags & MPA_REJECT) { if (mpa->flags & MPA_REJECT) {
err = -ECONNREFUSED; err = -ECONNREFUSED;
@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR; attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM; err = -ENOMEM;
disconnect = 1;
goto out; goto out;
} }
@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD; attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM; err = -ENOMEM;
disconnect = 1;
goto out; goto out;
} }
goto out; goto out;
@ -1366,7 +1375,7 @@ err:
send_abort(ep, skb, GFP_KERNEL); send_abort(ep, skb, GFP_KERNEL);
out: out:
connect_reply_upcall(ep, err); connect_reply_upcall(ep, err);
return; return disconnect;
} }
static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(hdr); unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids; struct tid_info *t = dev->rdev.lldi.tids;
__u8 status = hdr->status; __u8 status = hdr->status;
int disconnect = 0;
ep = lookup_tid(t, tid); ep = lookup_tid(t, tid);
if (!ep) if (!ep)
@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
switch (ep->com.state) { switch (ep->com.state) {
case MPA_REQ_SENT: case MPA_REQ_SENT:
ep->rcv_seq += dlen; ep->rcv_seq += dlen;
process_mpa_reply(ep, skb); disconnect = process_mpa_reply(ep, skb);
break; break;
case MPA_REQ_WAIT: case MPA_REQ_WAIT:
ep->rcv_seq += dlen; ep->rcv_seq += dlen;
@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.state, ep->hwtid, status); ep->com.state, ep->hwtid, status);
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
disconnect = 1;
break; break;
} }
default: default:
break; break;
} }
mutex_unlock(&ep->com.mutex); mutex_unlock(&ep->com.mutex);
if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
return 0; return 0;
} }
@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (tcph->ece && tcph->cwr) if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN(1); opt2 |= CCTRL_ECN(1);
} }
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
}
rpl = cplhdr(skb); rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid); INIT_TP_WR(rpl, ep->hwtid);
@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
__func__, ep, ep->hwtid, ep->com.state); __func__, ep, ep->hwtid, ep->com.state);
abort = 0; abort = 0;
} }
mutex_unlock(&ep->com.mutex);
if (abort) if (abort)
abort_connection(ep, NULL, GFP_KERNEL); abort_connection(ep, NULL, GFP_KERNEL);
mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
} }

View File

@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
u8 ecode; u8 ecode;
u16 sq_db_inc; u16 sq_db_inc;
u16 rq_db_inc; u16 rq_db_inc;
u8 send_term;
}; };
struct c4iw_qp { struct c4iw_qp {

View File

@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode; qhp->attr.ecode = attrs->ecode;
ep = qhp->ep; ep = qhp->ep;
disconnect = 1; if (!internal) {
c4iw_get_ep(&qhp->ep->com); c4iw_get_ep(&qhp->ep->com);
if (!internal)
terminate = 1; terminate = 1;
else { disconnect = 1;
} else {
terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep); ret = rdma_fini(rhp, qhp, ep);
if (ret) if (ret)
goto err; goto err;
@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
/* /*
* Use SQ_PSN and RQ_PSN to pass in IDX_INC values for * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
* ringing the queue db when we're in DB_FULL mode. * ringing the queue db when we're in DB_FULL mode.
* Only allow this on T4 devices.
*/ */
attrs.sq_db_inc = attr->sq_psn; attrs.sq_db_inc = attr->sq_psn;
attrs.rq_db_inc = attr->rq_psn; attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
(mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
return -EINVAL;
return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
} }

View File

@ -836,4 +836,18 @@ struct ulptx_idata {
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
CONG_ALG_TAHOE,
CONG_ALG_NEWRENO,
CONG_ALG_HIGHSPEED
};
#define S_CONG_CNTRL 14
#define M_CONG_CNTRL 0x3
#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
#define T5_OPT_2_VALID (1 << 31)
#endif /* _T4FW_RI_API_H_ */ #endif /* _T4FW_RI_API_H_ */

View File

@ -41,6 +41,7 @@
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
#define ARMADA_375_PPI_CAUSE (0x10) #define ARMADA_375_PPI_CAUSE (0x10)
@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
struct msi_desc *desc) struct msi_desc *desc)
{ {
struct msi_msg msg; struct msi_msg msg;
irq_hw_number_t hwirq; int virq, hwirq;
int virq;
hwirq = armada_370_xp_alloc_msi(); hwirq = armada_370_xp_alloc_msi();
if (hwirq < 0) if (hwirq < 0)
@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
unsigned int irq) unsigned int irq)
{ {
struct irq_data *d = irq_get_irq_data(irq); struct irq_data *d = irq_get_irq_data(irq);
unsigned long hwirq = d->hwirq;
irq_dispose_mapping(irq); irq_dispose_mapping(irq);
armada_370_xp_free_msi(d->hwirq); armada_370_xp_free_msi(hwirq);
}
static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
int nvec, int type)
{
/* We support MSI, but not MSI-X */
if (type == PCI_CAP_ID_MSI)
return 0;
return -EINVAL;
} }
static struct irq_chip armada_370_xp_msi_irq_chip = { static struct irq_chip armada_370_xp_msi_irq_chip = {
@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
msi_chip->setup_irq = armada_370_xp_setup_msi_irq; msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
msi_chip->check_device = armada_370_xp_check_msi_device;
msi_chip->of_node = node; msi_chip->of_node = node;
armada_370_xp_msi_domain = armada_370_xp_msi_domain =
@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static int armada_xp_set_affinity(struct irq_data *d, static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force) const struct cpumask *mask_val, bool force)
{ {
unsigned long reg;
unsigned long new_mask = 0;
unsigned long online_mask = 0;
unsigned long count = 0;
irq_hw_number_t hwirq = irqd_to_hwirq(d); irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long reg, mask;
int cpu; int cpu;
for_each_cpu(cpu, mask_val) { /* Select a single core from the affinity mask which is online */
new_mask |= 1 << cpu_logical_map(cpu); cpu = cpumask_any_and(mask_val, cpu_online_mask);
count++; mask = 1UL << cpu_logical_map(cpu);
}
/*
* Forbid mutlicore interrupt affinity
* This is required since the MPIC HW doesn't limit
* several CPUs from acknowledging the same interrupt.
*/
if (count > 1)
return -EINVAL;
for_each_cpu(cpu, cpu_online_mask)
online_mask |= 1 << cpu_logical_map(cpu);
raw_spin_lock(&irq_controller_lock); raw_spin_lock(&irq_controller_lock);
reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
reg = (reg & (~online_mask)) | new_mask; reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
raw_spin_unlock(&irq_controller_lock); raw_spin_unlock(&irq_controller_lock);
return 0; return 0;
@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init(); armada_xp_mpic_smp_cpu_init();
/*
* Set the default affinity from all CPUs to the boot cpu.
* This is required since the MPIC doesn't limit several CPUs
* from acknowledging the same interrupt.
*/
cpumask_clear(irq_default_affinity);
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif #endif
armada_370_xp_msi_init(node, main_int_res.start); armada_370_xp_msi_init(node, main_int_res.start);

View File

@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
int i, size, max, reserved = 0, entry; int i, size, max, reserved = 0, entry;
const __be32 *irqsr; const __be32 *irqsr;
cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb) if (!cb)
return -ENOMEM; return -ENOMEM;

View File

@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
} else { } else {
inc_hit_counter(cache, bio); inc_hit_counter(cache, bio);
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
!is_dirty(cache, lookup_result.cblock)) !is_dirty(cache, lookup_result.cblock))

View File

@ -232,6 +232,13 @@ struct thin_c {
struct bio_list deferred_bio_list; struct bio_list deferred_bio_list;
struct bio_list retry_on_resume_list; struct bio_list retry_on_resume_list;
struct rb_root sort_bio_list; /* sorted list of deferred bios */ struct rb_root sort_bio_list; /* sorted list of deferred bios */
/*
* Ensures the thin is not destroyed until the worker has finished
* iterating the active_thins list.
*/
atomic_t refcount;
struct completion can_destroy;
}; };
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
blk_finish_plug(&plug); blk_finish_plug(&plug);
} }
static void thin_get(struct thin_c *tc);
static void thin_put(struct thin_c *tc);
/*
* We can't hold rcu_read_lock() around code that can block. So we
* find a thin with the rcu lock held; bump a refcount; then drop
* the lock.
*/
static struct thin_c *get_first_thin(struct pool *pool)
{
struct thin_c *tc = NULL;
rcu_read_lock();
if (!list_empty(&pool->active_thins)) {
tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
thin_get(tc);
}
rcu_read_unlock();
return tc;
}
static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
{
struct thin_c *old_tc = tc;
rcu_read_lock();
list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
thin_get(tc);
thin_put(old_tc);
rcu_read_unlock();
return tc;
}
thin_put(old_tc);
rcu_read_unlock();
return NULL;
}
static void process_deferred_bios(struct pool *pool) static void process_deferred_bios(struct pool *pool)
{ {
unsigned long flags; unsigned long flags;
@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool)
struct bio_list bios; struct bio_list bios;
struct thin_c *tc; struct thin_c *tc;
rcu_read_lock(); tc = get_first_thin(pool);
list_for_each_entry_rcu(tc, &pool->active_thins, list) while (tc) {
process_thin_deferred_bios(tc); process_thin_deferred_bios(tc);
rcu_read_unlock(); tc = get_next_thin(pool, tc);
}
/* /*
* If there are any deferred flush bios, we must commit * If there are any deferred flush bios, we must commit
@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
{ {
struct noflush_work w; struct noflush_work w;
INIT_WORK(&w.worker, fn); INIT_WORK_ONSTACK(&w.worker, fn);
w.tc = tc; w.tc = tc;
atomic_set(&w.complete, 0); atomic_set(&w.complete, 0);
init_waitqueue_head(&w.wait); init_waitqueue_head(&w.wait);
@ -3061,11 +3108,25 @@ static struct target_type pool_target = {
/*---------------------------------------------------------------- /*----------------------------------------------------------------
* Thin target methods * Thin target methods
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
static void thin_get(struct thin_c *tc)
{
atomic_inc(&tc->refcount);
}
static void thin_put(struct thin_c *tc)
{
if (atomic_dec_and_test(&tc->refcount))
complete(&tc->can_destroy);
}
static void thin_dtr(struct dm_target *ti) static void thin_dtr(struct dm_target *ti)
{ {
struct thin_c *tc = ti->private; struct thin_c *tc = ti->private;
unsigned long flags; unsigned long flags;
thin_put(tc);
wait_for_completion(&tc->can_destroy);
spin_lock_irqsave(&tc->pool->lock, flags); spin_lock_irqsave(&tc->pool->lock, flags);
list_del_rcu(&tc->list); list_del_rcu(&tc->list);
spin_unlock_irqrestore(&tc->pool->lock, flags); spin_unlock_irqrestore(&tc->pool->lock, flags);
@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct thin_c *tc; struct thin_c *tc;
struct dm_dev *pool_dev, *origin_dev; struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md; struct mapped_device *pool_md;
unsigned long flags;
mutex_lock(&dm_thin_pool_table.mutex); mutex_lock(&dm_thin_pool_table.mutex);
@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
mutex_unlock(&dm_thin_pool_table.mutex); mutex_unlock(&dm_thin_pool_table.mutex);
spin_lock(&tc->pool->lock); atomic_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
spin_lock_irqsave(&tc->pool->lock, flags);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins); list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock(&tc->pool->lock); spin_unlock_irqrestore(&tc->pool->lock, flags);
/* /*
* This synchronize_rcu() call is needed here otherwise we risk a * This synchronize_rcu() call is needed here otherwise we risk a
* wake_worker() call finding no bios to process (because the newly * wake_worker() call finding no bios to process (because the newly

View File

@ -330,15 +330,17 @@ test_block_hash:
return r; return r;
} }
} }
todo = 1 << v->data_dev_block_bits; todo = 1 << v->data_dev_block_bits;
while (io->iter.bi_size) { do {
u8 *page; u8 *page;
unsigned len;
struct bio_vec bv = bio_iter_iovec(bio, io->iter); struct bio_vec bv = bio_iter_iovec(bio, io->iter);
page = kmap_atomic(bv.bv_page); page = kmap_atomic(bv.bv_page);
r = crypto_shash_update(desc, page + bv.bv_offset, len = bv.bv_len;
bv.bv_len); if (likely(len >= todo))
len = todo;
r = crypto_shash_update(desc, page + bv.bv_offset, len);
kunmap_atomic(page); kunmap_atomic(page);
if (r < 0) { if (r < 0) {
@ -346,8 +348,9 @@ test_block_hash:
return r; return r;
} }
bio_advance_iter(bio, &io->iter, bv.bv_len); bio_advance_iter(bio, &io->iter, len);
} todo -= len;
} while (todo);
if (!v->version) { if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size); r = crypto_shash_update(desc, v->salt, v->salt_size);

View File

@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
memset(r, 0, sizeof(*r)); memset(r, 0, sizeof(*r));
/* /*
* Get optional "interrupts-names" property to add a name * Get optional "interrupt-names" property to add a name
* to the resource. * to the resource.
*/ */
of_property_read_string_index(dev, "interrupt-names", index, of_property_read_string_index(dev, "interrupt-names", index,
@ -379,6 +379,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
} }
EXPORT_SYMBOL_GPL(of_irq_to_resource); EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
* of_irq_get - Decode a node's IRQ and return it as a Linux irq number
* @dev: pointer to device tree node
* @index: zero-based index of the irq
*
* Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
* is not yet created.
*
*/
int of_irq_get(struct device_node *dev, int index)
{
int rc;
struct of_phandle_args oirq;
struct irq_domain *domain;
rc = of_irq_parse_one(dev, index, &oirq);
if (rc)
return rc;
domain = irq_find_host(oirq.np);
if (!domain)
return -EPROBE_DEFER;
return irq_create_of_mapping(&oirq);
}
/** /**
* of_irq_count - Count the number of IRQs a node uses * of_irq_count - Count the number of IRQs a node uses
* @dev: pointer to device tree node * @dev: pointer to device tree node

View File

@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
rc = of_address_to_resource(np, i, res); rc = of_address_to_resource(np, i, res);
WARN_ON(rc); WARN_ON(rc);
} }
WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
pr_debug("not all legacy IRQ resources mapped for %s\n",
np->name);
} }
dev->dev.of_node = of_node_get(np); dev->dev.of_node = of_node_get(np);

View File

@ -10,6 +10,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void)
} }
} }
static void __init of_selftest_platform_populate(void)
{
int irq;
struct device_node *np;
struct platform_device *pdev;
np = of_find_node_by_path("/testcase-data");
of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
/* Test that a missing irq domain returns -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device1");
pdev = of_find_device_by_node(np);
if (!pdev)
selftest(0, "device 1 creation failed\n");
irq = platform_get_irq(pdev, 0);
if (irq != -EPROBE_DEFER)
selftest(0, "device deferred probe failed - %d\n", irq);
/* Test that a parsing failure does not return -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device2");
pdev = of_find_device_by_node(np);
if (!pdev)
selftest(0, "device 2 creation failed\n");
irq = platform_get_irq(pdev, 0);
if (irq >= 0 || irq == -EPROBE_DEFER)
selftest(0, "device parsing error failed - %d\n", irq);
selftest(1, "passed");
}
static int __init of_selftest(void) static int __init of_selftest(void)
{ {
struct device_node *np; struct device_node *np;
@ -445,6 +476,7 @@ static int __init of_selftest(void)
of_selftest_parse_interrupts(); of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended(); of_selftest_parse_interrupts_extended();
of_selftest_match_node(); of_selftest_match_node();
of_selftest_platform_populate();
pr_info("end of selftest - %i passed, %i failed\n", pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed); selftest_results.passed, selftest_results.failed);
return 0; return 0;

Some files were not shown because too many files have changed in this diff Show More