Merge branch 'irq/urgent' into irq/core

Merge urgent fixes so pending patches for 4.9 can be applied.
This commit is contained in:
Thomas Gleixner 2016-09-20 23:20:32 +02:00
commit 464b5847e6
262 changed files with 2549 additions and 1644 deletions

View File

@ -10,7 +10,7 @@ Required properties:
subsystem (mmcss) inside the FlashSS (available in STiH407 SoC subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
family). family).
- clock-names: Should be "mmc". - clock-names: Should be "mmc" and "icn". (NB: The latter is not compulsory)
See: Documentation/devicetree/bindings/resource-names.txt See: Documentation/devicetree/bindings/resource-names.txt
- clocks: Phandle to the clock. - clocks: Phandle to the clock.
See: Documentation/devicetree/bindings/clock/clock-bindings.txt See: Documentation/devicetree/bindings/clock/clock-bindings.txt

View File

@ -1625,6 +1625,7 @@ N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org> M: Kukjin Kim <kgene@kernel.org>
M: Krzysztof Kozlowski <krzk@kernel.org> M: Krzysztof Kozlowski <krzk@kernel.org>
R: Javier Martinez Canillas <javier@osg.samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained S: Maintained
@ -2485,7 +2486,7 @@ F: include/net/bluetooth/
BONDING DRIVER BONDING DRIVER
M: Jay Vosburgh <j.vosburgh@gmail.com> M: Jay Vosburgh <j.vosburgh@gmail.com>
M: Veaceslav Falico <vfalico@gmail.com> M: Veaceslav Falico <vfalico@gmail.com>
M: Andy Gospodarek <gospo@cumulusnetworks.com> M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/ W: http://sourceforge.net/projects/bonding/
S: Supported S: Supported
@ -3269,7 +3270,7 @@ S: Maintained
F: drivers/net/wan/cosa* F: drivers/net/wan/cosa*
CPMAC ETHERNET DRIVER CPMAC ETHERNET DRIVER
M: Florian Fainelli <florian@openwrt.org> M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/ethernet/ti/cpmac.c F: drivers/net/ethernet/ti/cpmac.c

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 8 PATCHLEVEL = 8
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Psychotic Stoned Sheep NAME = Psychotic Stoned Sheep
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
return __cu_len; return __cu_len;
} }
extern inline long
__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
{
if (__access_ok((unsigned long)validate, len, get_fs()))
len = __copy_tofrom_user_nocheck(to, from, len);
return len;
}
#define __copy_to_user(to, from, n) \ #define __copy_to_user(to, from, n) \
({ \ ({ \
__chk_user_ptr(to); \ __chk_user_ptr(to); \
@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user
extern inline long extern inline long
copy_to_user(void __user *to, const void *from, long n) copy_to_user(void __user *to, const void *from, long n)
{ {
return __copy_tofrom_user((__force void *)to, from, n, to); if (likely(__access_ok((unsigned long)to, n, get_fs())))
n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
return n;
} }
extern inline long extern inline long
copy_from_user(void *to, const void __user *from, long n) copy_from_user(void *to, const void __user *from, long n)
{ {
return __copy_tofrom_user(to, (__force void *)from, n, from); if (likely(__access_ok((unsigned long)from, n, get_fs())))
n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
else
memset(to, 0, n);
return n;
} }
extern void __do_clear_user(void); extern void __do_clear_user(void);

View File

@ -83,7 +83,10 @@
"2: ;nop\n" \ "2: ;nop\n" \
" .section .fixup, \"ax\"\n" \ " .section .fixup, \"ax\"\n" \
" .align 4\n" \ " .align 4\n" \
"3: mov %0, %3\n" \ "3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" j 2b\n" \ " j 2b\n" \
" .previous\n" \ " .previous\n" \
" .section __ex_table, \"a\"\n" \ " .section __ex_table, \"a\"\n" \
@ -101,7 +104,11 @@
"2: ;nop\n" \ "2: ;nop\n" \
" .section .fixup, \"ax\"\n" \ " .section .fixup, \"ax\"\n" \
" .align 4\n" \ " .align 4\n" \
"3: mov %0, %3\n" \ "3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" mov %R1, 0\n" \
" j 2b\n" \ " j 2b\n" \
" .previous\n" \ " .previous\n" \
" .section __ex_table, \"a\"\n" \ " .section __ex_table, \"a\"\n" \

View File

@ -2,6 +2,7 @@
/ { / {
memory { memory {
device_type = "memory";
reg = <0 0x10000000>; reg = <0 0x10000000>;
}; };

View File

@ -2,7 +2,6 @@
#include <dt-bindings/clock/bcm2835.h> #include <dt-bindings/clock/bcm2835.h>
#include <dt-bindings/clock/bcm2835-aux.h> #include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/gpio/gpio.h>
#include "skeleton.dtsi"
/* This include file covers the common peripherals and configuration between /* This include file covers the common peripherals and configuration between
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
@ -13,6 +12,8 @@
compatible = "brcm,bcm2835"; compatible = "brcm,bcm2835";
model = "BCM2835"; model = "BCM2835";
interrupt-parent = <&intc>; interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
chosen { chosen {
bootargs = "earlyprintk console=ttyAMA0"; bootargs = "earlyprintk console=ttyAMA0";

View File

@ -550,8 +550,9 @@
interrupt-names = "mmcirq"; interrupt-names = "mmcirq";
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc0>; pinctrl-0 = <&pinctrl_mmc0>;
clock-names = "mmc"; clock-names = "mmc", "icn";
clocks = <&clk_s_c0_flexgen CLK_MMC_0>; clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
bus-width = <8>; bus-width = <8>;
non-removable; non-removable;
}; };
@ -565,8 +566,9 @@
interrupt-names = "mmcirq"; interrupt-names = "mmcirq";
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sd1>; pinctrl-0 = <&pinctrl_sd1>;
clock-names = "mmc"; clock-names = "mmc", "icn";
clocks = <&clk_s_c0_flexgen CLK_MMC_1>; clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
<&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
resets = <&softreset STIH407_MMC1_SOFTRESET>; resets = <&softreset STIH407_MMC1_SOFTRESET>;
bus-width = <4>; bus-width = <4>;
}; };

View File

@ -41,7 +41,8 @@
compatible = "st,st-ohci-300x"; compatible = "st,st-ohci-300x";
reg = <0x9a03c00 0x100>; reg = <0x9a03c00 0x100>;
interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>, resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>; <&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset"; reset-names = "power", "softreset";
@ -57,7 +58,8 @@
interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>; pinctrl-0 = <&pinctrl_usb0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>, resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>; <&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset"; reset-names = "power", "softreset";
@ -71,7 +73,8 @@
compatible = "st,st-ohci-300x"; compatible = "st,st-ohci-300x";
reg = <0x9a83c00 0x100>; reg = <0x9a83c00 0x100>;
interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>, resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>; <&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset"; reset-names = "power", "softreset";
@ -87,7 +90,8 @@
interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>; pinctrl-0 = <&pinctrl_usb1>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>, resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>; <&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset"; reset-names = "power", "softreset";

View File

@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
static void locomo_handler(struct irq_desc *desc) static void locomo_handler(struct irq_desc *desc)
{ {
struct locomo *lchip = irq_desc_get_chip_data(desc); struct locomo *lchip = irq_desc_get_handler_data(desc);
int req, i; int req, i;
/* Acknowledge the parent IRQ */ /* Acknowledge the parent IRQ */
@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
* Install handler for IRQ_LOCOMO_HW. * Install handler for IRQ_LOCOMO_HW.
*/ */
irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
irq_set_chip_data(lchip->irq, lchip); irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
irq_set_chained_handler(lchip->irq, locomo_handler);
/* Install handlers for IRQ_LOCOMO_* */ /* Install handlers for IRQ_LOCOMO_* */
for ( ; irq <= lchip->irq_base + 3; irq++) { for ( ; irq <= lchip->irq_base + 3; irq++) {

View File

@ -472,8 +472,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
* specifies that S0ReadyInt and S1ReadyInt should be '1'. * specifies that S0ReadyInt and S1ReadyInt should be '1'.
*/ */
sa1111_writel(0, irqbase + SA1111_INTPOL0); sa1111_writel(0, irqbase + SA1111_INTPOL0);
sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) | sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
SA1111_IRQMASK_HI(IRQ_S1_READY_NINT), BIT(IRQ_S1_READY_NINT & 31),
irqbase + SA1111_INTPOL1); irqbase + SA1111_INTPOL1);
/* clear all IRQs */ /* clear all IRQs */
@ -754,7 +754,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
if (sachip->irq != NO_IRQ) { if (sachip->irq != NO_IRQ) {
ret = sa1111_setup_irq(sachip, pd->irq_base); ret = sa1111_setup_irq(sachip, pd->irq_base);
if (ret) if (ret)
goto err_unmap; goto err_clk;
} }
#ifdef CONFIG_ARCH_SA1100 #ifdef CONFIG_ARCH_SA1100
@ -799,6 +799,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
return 0; return 0;
err_clk:
clk_disable(sachip->clk);
err_unmap: err_unmap:
iounmap(sachip->base); iounmap(sachip->base);
err_clk_unprep: err_clk_unprep:
@ -869,9 +871,9 @@ struct sa1111_save_data {
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int sa1111_suspend(struct platform_device *dev, pm_message_t state) static int sa1111_suspend_noirq(struct device *dev)
{ {
struct sa1111 *sachip = platform_get_drvdata(dev); struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save; struct sa1111_save_data *save;
unsigned long flags; unsigned long flags;
unsigned int val; unsigned int val;
@ -934,9 +936,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
* restored by their respective drivers, and must be called * restored by their respective drivers, and must be called
* via LDM after this function. * via LDM after this function.
*/ */
static int sa1111_resume(struct platform_device *dev) static int sa1111_resume_noirq(struct device *dev)
{ {
struct sa1111 *sachip = platform_get_drvdata(dev); struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save; struct sa1111_save_data *save;
unsigned long flags, id; unsigned long flags, id;
void __iomem *base; void __iomem *base;
@ -952,7 +954,7 @@ static int sa1111_resume(struct platform_device *dev)
id = sa1111_readl(sachip->base + SA1111_SKID); id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip); __sa1111_remove(sachip);
platform_set_drvdata(dev, NULL); dev_set_drvdata(dev, NULL);
kfree(save); kfree(save);
return 0; return 0;
} }
@ -1003,8 +1005,8 @@ static int sa1111_resume(struct platform_device *dev)
} }
#else #else
#define sa1111_suspend NULL #define sa1111_suspend_noirq NULL
#define sa1111_resume NULL #define sa1111_resume_noirq NULL
#endif #endif
static int sa1111_probe(struct platform_device *pdev) static int sa1111_probe(struct platform_device *pdev)
@ -1017,7 +1019,7 @@ static int sa1111_probe(struct platform_device *pdev)
return -EINVAL; return -EINVAL;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) if (irq < 0)
return -ENXIO; return irq;
return __sa1111_probe(&pdev->dev, mem, irq); return __sa1111_probe(&pdev->dev, mem, irq);
} }
@ -1038,6 +1040,11 @@ static int sa1111_remove(struct platform_device *pdev)
return 0; return 0;
} }
static struct dev_pm_ops sa1111_pm_ops = {
.suspend_noirq = sa1111_suspend_noirq,
.resume_noirq = sa1111_resume_noirq,
};
/* /*
* Not sure if this should be on the system bus or not yet. * Not sure if this should be on the system bus or not yet.
* We really want some way to register a system device at * We really want some way to register a system device at
@ -1050,10 +1057,9 @@ static int sa1111_remove(struct platform_device *pdev)
static struct platform_driver sa1111_device_driver = { static struct platform_driver sa1111_device_driver = {
.probe = sa1111_probe, .probe = sa1111_probe,
.remove = sa1111_remove, .remove = sa1111_remove,
.suspend = sa1111_suspend,
.resume = sa1111_resume,
.driver = { .driver = {
.name = "sa1111", .name = "sa1111",
.pm = &sa1111_pm_ops,
}, },
}; };

View File

@ -161,6 +161,7 @@ CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_HCD=y
CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE=y
CONFIG_USB_DWC3=y CONFIG_USB_DWC3=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_KEYSTONE_USB_PHY=y CONFIG_KEYSTONE_USB_PHY=y
CONFIG_NEW_LEDS=y CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y CONFIG_LEDS_CLASS=y

View File

@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
CONFIG_DMA_BCM2835=y CONFIG_DMA_BCM2835=y
CONFIG_DMA_OMAP=y CONFIG_DMA_OMAP=y
CONFIG_QCOM_BAM_DMA=y CONFIG_QCOM_BAM_DMA=y
CONFIG_XILINX_VDMA=y CONFIG_XILINX_DMA=y
CONFIG_DMA_SUN6I=y CONFIG_DMA_SUN6I=y
CONFIG_STAGING=y CONFIG_STAGING=y
CONFIG_SENSORS_ISL29018=y CONFIG_SENSORS_ISL29018=y

View File

@ -47,6 +47,7 @@
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) #define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) #define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) #define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) #define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
/* /*

View File

@ -62,6 +62,7 @@
#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */ #define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */ #define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */ #define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2)
/* /*
* + Level 3 descriptor (PTE) * + Level 3 descriptor (PTE)

View File

@ -158,8 +158,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
int i; int i;
kvm_free_stage2_pgd(kvm);
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) { if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]); kvm_arch_vcpu_free(kvm->vcpus[i]);

View File

@ -1714,7 +1714,8 @@ int kvm_mmu_init(void)
kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
hyp_idmap_start < kern_hyp_va(~0UL)) { hyp_idmap_start < kern_hyp_va(~0UL) &&
hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
/* /*
* The idmap page is intersecting with the VA space, * The idmap page is intersecting with the VA space,
* it is not safe to continue further. * it is not safe to continue further.
@ -1893,6 +1894,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
void kvm_arch_flush_shadow_all(struct kvm *kvm) void kvm_arch_flush_shadow_all(struct kvm *kvm)
{ {
kvm_free_stage2_pgd(kvm);
} }
void kvm_arch_flush_shadow_memslot(struct kvm *kvm, void kvm_arch_flush_shadow_memslot(struct kvm *kvm,

View File

@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
return -ENOMEM; return -ENOMEM;
} }
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
* later the Exynos PMU platform device won't be skipped.
*/
of_node_clear_flag(node, OF_POPULATED);
return 0; return 0;
} }

View File

@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
// no D+ pullup; lubbock can't connect/disconnect in software // no D+ pullup; lubbock can't connect/disconnect in software
}; };
static void lubbock_init_pcmcia(void)
{
struct clk *clk;
/* Add an alias for the SA1111 PCMCIA clock */
clk = clk_get_sys("pxa2xx-pcmcia", NULL);
if (!IS_ERR(clk)) {
clkdev_create(clk, NULL, "1800");
clk_put(clk);
}
}
static struct resource sa1111_resources[] = { static struct resource sa1111_resources[] = {
[0] = { [0] = {
.start = 0x10000000, .start = 0x10000000,
@ -467,6 +479,8 @@ static void __init lubbock_init(void)
pxa_set_btuart_info(NULL); pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL); pxa_set_stuart_info(NULL);
lubbock_init_pcmcia();
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL); clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
pxa_set_udc_info(&udc_info); pxa_set_udc_info(&udc_info);
pxa_set_fb_info(NULL, &sharp_lm8v31); pxa_set_fb_info(NULL, &sharp_lm8v31);

View File

@ -41,40 +41,27 @@
#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */ #define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
/* start of DA9210 System Control and Event Registers */
#define DA9210_REG_MASK_A 0x54
static void __iomem *irqc; static void __iomem *irqc;
static const u8 da9063_mask_regs[] = { /* first byte sets the memory pointer, following are consecutive reg values */
DA9063_REG_IRQ_MASK_A, static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
DA9063_REG_IRQ_MASK_B, static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
DA9063_REG_IRQ_MASK_C,
DA9063_REG_IRQ_MASK_D, static struct i2c_msg da9xxx_msgs[2] = {
{
.addr = 0x58,
.len = ARRAY_SIZE(da9063_irq_clr),
.buf = da9063_irq_clr,
}, {
.addr = 0x68,
.len = ARRAY_SIZE(da9210_irq_clr),
.buf = da9210_irq_clr,
},
}; };
/* DA9210 System Control and Event Registers */
#define DA9210_REG_MASK_A 0x54
#define DA9210_REG_MASK_B 0x55
static const u8 da9210_mask_regs[] = {
DA9210_REG_MASK_A,
DA9210_REG_MASK_B,
};
static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
unsigned int nregs)
{
unsigned int i;
dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
for (i = 0; i < nregs; i++) {
int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
if (error) {
dev_err(&client->dev, "i2c error %d\n", error);
return;
}
}
}
static int regulator_quirk_notify(struct notifier_block *nb, static int regulator_quirk_notify(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
client = to_i2c_client(dev); client = to_i2c_client(dev);
dev_dbg(dev, "Detected %s\n", client->name); dev_dbg(dev, "Detected %s\n", client->name);
if ((client->addr == 0x58 && !strcmp(client->name, "da9063"))) if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
da9xxx_mask_irqs(client, da9063_mask_regs, (client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
ARRAY_SIZE(da9063_mask_regs)); int ret;
else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
da9xxx_mask_irqs(client, da9210_mask_regs, dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
ARRAY_SIZE(da9210_mask_regs)); ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
if (ret != ARRAY_SIZE(da9xxx_msgs))
dev_err(&client->dev, "i2c error %d\n", ret);
}
mon = ioread32(irqc + IRQC_MONITOR); mon = ioread32(irqc + IRQC_MONITOR);
if (mon & REGULATOR_IRQ_MASK) if (mon & REGULATOR_IRQ_MASK)

View File

@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
initial_pmd_value = pmd; initial_pmd_value = pmd;
pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; pmd &= PMD_SECT_CACHE_MASK;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
if (cache_policies[i].pmd == pmd) { if (cache_policies[i].pmd == pmd) {

View File

@ -170,9 +170,6 @@ static int xen_starting_cpu(unsigned int cpu)
pr_info("Xen: initializing cpu%d\n", cpu); pr_info("Xen: initializing cpu%d\n", cpu);
vcpup = per_cpu_ptr(xen_vcpu_info, cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
/* Direct vCPU id mapping for ARM guests. */
per_cpu(xen_vcpu_id, cpu) = cpu;
info.mfn = virt_to_gfn(vcpup); info.mfn = virt_to_gfn(vcpup);
info.offset = xen_offset_in_page(vcpup); info.offset = xen_offset_in_page(vcpup);
@ -330,6 +327,7 @@ static int __init xen_guest_init(void)
{ {
struct xen_add_to_physmap xatp; struct xen_add_to_physmap xatp;
struct shared_info *shared_info_page = NULL; struct shared_info *shared_info_page = NULL;
int cpu;
if (!xen_domain()) if (!xen_domain())
return 0; return 0;
@ -380,7 +378,8 @@ static int __init xen_guest_init(void)
return -ENOMEM; return -ENOMEM;
/* Direct vCPU id mapping for ARM guests. */ /* Direct vCPU id mapping for ARM guests. */
per_cpu(xen_vcpu_id, 0) = 0; for_each_possible_cpu(cpu)
per_cpu(xen_vcpu_id, cpu) = cpu;
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,

View File

@ -255,10 +255,10 @@
/* Local timer */ /* Local timer */
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <1 13 0xf01>, interrupts = <1 13 0xf08>,
<1 14 0xf01>, <1 14 0xf08>,
<1 11 0xf01>, <1 11 0xf08>,
<1 10 0xf01>; <1 10 0xf08>;
}; };
timer0: timer0@ffc03000 { timer0: timer0@ffc03000 {

View File

@ -102,13 +102,13 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 interrupts = <GIC_PPI 13
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>, (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 <GIC_PPI 14
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>, (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 <GIC_PPI 11
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>, (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 <GIC_PPI 10
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>; (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
}; };
xtal: xtal-clk { xtal: xtal-clk {

View File

@ -110,10 +110,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <1 0 0xff01>, /* Secure Phys IRQ */ interrupts = <1 0 0xff08>, /* Secure Phys IRQ */
<1 13 0xff01>, /* Non-secure Phys IRQ */ <1 13 0xff08>, /* Non-secure Phys IRQ */
<1 14 0xff01>, /* Virt IRQ */ <1 14 0xff08>, /* Virt IRQ */
<1 15 0xff01>; /* Hyp IRQ */ <1 15 0xff08>; /* Hyp IRQ */
clock-frequency = <50000000>; clock-frequency = <50000000>;
}; };

View File

@ -0,0 +1 @@
../../../../arm/boot/dts/bcm2835-rpi.dtsi

View File

@ -1,7 +1,7 @@
/dts-v1/; /dts-v1/;
#include "bcm2837.dtsi" #include "bcm2837.dtsi"
#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi" #include "bcm2835-rpi.dtsi"
#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi" #include "bcm283x-rpi-smsc9514.dtsi"
/ { / {
compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; compatible = "raspberrypi,3-model-b", "brcm,bcm2837";

View File

@ -1,4 +1,4 @@
#include "../../../../arm/boot/dts/bcm283x.dtsi" #include "bcm283x.dtsi"
/ { / {
compatible = "brcm,bcm2836"; compatible = "brcm,bcm2836";

View File

@ -0,0 +1 @@
../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi

View File

@ -0,0 +1 @@
../../../../arm/boot/dts/bcm283x.dtsi

View File

@ -88,13 +88,13 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) | interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>, IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) | <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>, IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) | <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>, IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) | <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>; IRQ_TYPE_LEVEL_LOW)>;
}; };
pmu { pmu {

View File

@ -354,10 +354,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <1 13 0xff01>, interrupts = <1 13 4>,
<1 14 0xff01>, <1 14 4>,
<1 11 0xff01>, <1 11 4>,
<1 10 0xff01>; <1 10 4>;
}; };
pmu { pmu {

View File

@ -473,10 +473,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <1 13 0xff01>, interrupts = <1 13 0xff08>,
<1 14 0xff01>, <1 14 0xff08>,
<1 11 0xff01>, <1 11 0xff08>,
<1 10 0xff01>; <1 10 0xff08>;
}; };
pmu_system_controller: system-controller@105c0000 { pmu_system_controller: system-controller@105c0000 {

View File

@ -119,10 +119,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <1 13 0x1>, /* Physical Secure PPI */ interrupts = <1 13 0xf08>, /* Physical Secure PPI */
<1 14 0x1>, /* Physical Non-Secure PPI */ <1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0x1>, /* Virtual PPI */ <1 11 0xf08>, /* Virtual PPI */
<1 10 0x1>; /* Hypervisor PPI */ <1 10 0xf08>; /* Hypervisor PPI */
}; };
pmu { pmu {

View File

@ -191,10 +191,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */ interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
<1 14 0x8>, /* Physical Non-Secure PPI, active-low */ <1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 0x8>, /* Virtual PPI, active-low */ <1 11 4>, /* Virtual PPI, active-low */
<1 10 0x8>; /* Hypervisor PPI, active-low */ <1 10 4>; /* Hypervisor PPI, active-low */
}; };
pmu { pmu {

View File

@ -122,10 +122,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>, interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>, <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>; <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
}; };
odmi: odmi@300000 { odmi: odmi@300000 {

View File

@ -129,10 +129,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupts = <1 13 0xf01>, interrupts = <1 13 4>,
<1 14 0xf01>, <1 14 4>,
<1 11 0xf01>, <1 11 4>,
<1 10 0xf01>; <1 10 4>;
}; };
soc { soc {

View File

@ -65,10 +65,10 @@
timer { timer {
compatible = "arm,armv8-timer"; compatible = "arm,armv8-timer";
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <1 13 0xf01>, interrupts = <1 13 0xf08>,
<1 14 0xf01>, <1 14 0xf08>,
<1 11 0xf01>, <1 11 0xf08>,
<1 10 0xf01>; <1 10 0xf08>;
}; };
amba_apu { amba_apu {

View File

@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
extern __kernel_size_t copy_to_user(void __user *to, const void *from, extern __kernel_size_t copy_to_user(void __user *to, const void *from,
__kernel_size_t n); __kernel_size_t n);
extern __kernel_size_t copy_from_user(void *to, const void __user *from, extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
__kernel_size_t n); __kernel_size_t n);
static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
{ {
return __copy_user(to, (const void __force *)from, n); return __copy_user(to, (const void __force *)from, n);
} }
static inline __kernel_size_t copy_from_user(void *to,
const void __user *from,
__kernel_size_t n)
{
size_t res = ___copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user

View File

@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
/* /*
* Userspace access stuff. * Userspace access stuff.
*/ */
EXPORT_SYMBOL(copy_from_user); EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(strncpy_from_user);

View File

@ -23,13 +23,13 @@
*/ */
.text .text
.align 1 .align 1
.global copy_from_user .global ___copy_from_user
.type copy_from_user, @function .type ___copy_from_user, @function
copy_from_user: ___copy_from_user:
branch_if_kernel r8, __copy_user branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10 ret_if_privileged r8, r11, r10, r10
rjmp __copy_user rjmp __copy_user
.size copy_from_user, . - copy_from_user .size ___copy_from_user, . - ___copy_from_user
.global copy_to_user .global copy_to_user
.type copy_to_user, @function .type copy_to_user, @function

View File

@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
static inline unsigned long __must_check static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n) copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (access_ok(VERIFY_READ, from, n)) if (likely(access_ok(VERIFY_READ, from, n))) {
memcpy(to, (const void __force *)from, n); memcpy(to, (const void __force *)from, n);
else return 0;
return n; }
return 0; memset(to, 0, n);
return n;
} }
static inline unsigned long __must_check static inline unsigned long __must_check

View File

@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
extern unsigned long __do_clear_user(void __user *to, unsigned long n); extern unsigned long __do_clear_user(void __user *to, unsigned long n);
static inline unsigned long
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __copy_user(to, from, n);
return n;
}
static inline unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
return __copy_user_zeroing(to, from, n);
return n;
}
static inline unsigned long
__generic_clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __do_clear_user(to, n);
return n;
}
static inline long static inline long
__strncpy_from_user(char *dst, const char __user *src, long count) __strncpy_from_user(char *dst, const char __user *src, long count)
{ {
@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
else if (n == 24) else if (n == 24)
__asm_copy_from_user_24(to, from, ret); __asm_copy_from_user_24(to, from, ret);
else else
ret = __generic_copy_from_user(to, from, n); ret = __copy_user_zeroing(to, from, n);
return ret; return ret;
} }
@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
else if (n == 24) else if (n == 24)
__asm_copy_to_user_24(to, from, ret); __asm_copy_to_user_24(to, from, ret);
else else
ret = __generic_copy_to_user(to, from, n); ret = __copy_user(to, from, n);
return ret; return ret;
} }
@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
else if (n == 24) else if (n == 24)
__asm_clear_24(to, ret); __asm_clear_24(to, ret);
else else
ret = __generic_clear_user(to, n); ret = __do_clear_user(to, n);
return ret; return ret;
} }
#define clear_user(to, n) \ static inline size_t clear_user(void __user *to, size_t n)
(__builtin_constant_p(n) ? \ {
__constant_clear_user(to, n) : \ if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
__generic_clear_user(to, n)) return n;
if (__builtin_constant_p(n))
return __constant_clear_user(to, n);
else
return __do_clear_user(to, n);
}
#define copy_from_user(to, from, n) \ static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
(__builtin_constant_p(n) ? \ {
__constant_copy_from_user(to, from, n) : \ if (unlikely(!access_ok(VERIFY_READ, from, n))) {
__generic_copy_from_user(to, from, n)) memset(to, 0, n);
return n;
}
if (__builtin_constant_p(n))
return __constant_copy_from_user(to, from, n);
else
return __copy_user_zeroing(to, from, n);
}
#define copy_to_user(to, from, n) \ static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
(__builtin_constant_p(n) ? \ {
__constant_copy_to_user(to, from, n) : \ if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
__generic_copy_to_user(to, from, n)) return n;
if (__builtin_constant_p(n))
return __constant_copy_to_user(to, from, n);
else
return __copy_user(to, from, n);
}
/* We let the __ versions of copy_from/to_user inline, because they're often /* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead. * used in fast paths and have only a small space overhead.

View File

@ -263,19 +263,25 @@ do { \
extern long __memset_user(void *dst, unsigned long count); extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count); extern long __memcpy_user(void *dst, const void *src, unsigned long count);
#define clear_user(dst,count) __memset_user(____force(dst), (count)) #define __clear_user(dst,count) __memset_user(____force(dst), (count))
#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) #define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else #else
#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) #define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) #define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif #endif
#define __clear_user clear_user static inline unsigned long __must_check
clear_user(void __user *to, unsigned long n)
{
if (likely(__access_ok(to, n)))
n = __clear_user(to, n);
return n;
}
static inline unsigned long __must_check static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) __copy_to_user(void __user *to, const void *from, unsigned long n)

View File

@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
{ {
long res = __strnlen_user(src, n); long res = __strnlen_user(src, n);
/* return from strnlen can't be zero -- that would be rubbish. */ if (unlikely(!res))
return -EFAULT;
if (res > n) { if (res > n) {
copy_from_user(dst, src, n); copy_from_user(dst, src, n);

View File

@ -269,19 +269,16 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
__cu_len; \ __cu_len; \
}) })
#define copy_from_user(to, from, n) \ static inline unsigned long
({ \ copy_from_user(void *to, const void __user *from, unsigned long n)
void *__cu_to = (to); \ {
const void __user *__cu_from = (from); \ check_object_size(to, n, false);
long __cu_len = (n); \ if (likely(__access_ok(from, n, get_fs())))
\ n = __copy_user((__force void __user *) to, from, n);
__chk_user_ptr(__cu_from); \ else
if (__access_ok(__cu_from, __cu_len, get_fs())) { \ memset(to, 0, n);
check_object_size(__cu_to, __cu_len, false); \ return n;
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ }
} \
__cu_len; \
})
#define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) #define __copy_in_user(to, from, size) __copy_user((to), (from), (size))

View File

@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
#define __get_user_nocheck(x, ptr, size) \ #define __get_user_nocheck(x, ptr, size) \
({ \ ({ \
long __gu_err = 0; \ long __gu_err = 0; \
unsigned long __gu_val; \ unsigned long __gu_val = 0; \
might_fault(); \ might_fault(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \

View File

@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
static inline unsigned long static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n) copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (access_ok(VERIFY_READ, from, n)) if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n); return __copy_user_zeroing(to, from, n);
memset(to, 0, n);
return n; return n;
} }

View File

@ -227,7 +227,7 @@ extern long __user_bad(void);
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
({ \ ({ \
unsigned long __gu_val; \ unsigned long __gu_val = 0; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \ long __gu_err; \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
@ -373,10 +373,13 @@ extern long __user_bad(void);
static inline long copy_from_user(void *to, static inline long copy_from_user(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
{ {
unsigned long res = n;
might_fault(); might_fault();
if (access_ok(VERIFY_READ, from, n)) if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_from_user(to, from, n); res = __copy_from_user(to, from, n);
return n; if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
} }
#define __copy_to_user(to, from, n) \ #define __copy_to_user(to, from, n) \

View File

@ -14,6 +14,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm-eva.h> #include <asm/asm-eva.h>
/* /*
@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_len = __invoke_copy_from_user(__cu_to, \ __cu_len = __invoke_copy_from_user(__cu_to, \
__cu_from, \ __cu_from, \
__cu_len); \ __cu_len); \
} else { \
memset(__cu_to, 0, __cu_len); \
} \ } \
} \ } \
__cu_len; \ __cu_len; \

View File

@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
"3:\n\t" \ "3:\n\t" \
" mov 0,%1\n" \
" mov %3,%0\n" \ " mov %3,%0\n" \
" jmp 2b\n" \ " jmp 2b\n" \
" .previous\n" \ " .previous\n" \

View File

@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version. * 2 of the Licence, or (at your option) any later version.
*/ */
#include <asm/uaccess.h> #include <linux/uaccess.h>
unsigned long unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n) __generic_copy_to_user(void *to, const void *from, unsigned long n)
@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
{ {
if (access_ok(VERIFY_READ, from, n)) if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to, from, n); __copy_user_zeroing(to, from, n);
else
memset(to, 0, n);
return n; return n;
} }

View File

@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
static inline long copy_from_user(void *to, const void __user *from, static inline long copy_from_user(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
if (!access_ok(VERIFY_READ, from, n)) unsigned long res = n;
return n; if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n); res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
} }
static inline long copy_to_user(void __user *to, const void *from, static inline long copy_to_user(void __user *to, const void *from,
@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
#define __get_user_unknown(val, size, ptr, err) do { \ #define __get_user_unknown(val, size, ptr, err) do { \
err = 0; \ err = 0; \
if (copy_from_user(&(val), ptr, size)) { \ if (__copy_from_user(&(val), ptr, size)) { \
err = -EFAULT; \ err = -EFAULT; \
} \ } \
} while (0) } while (0)
@ -166,7 +169,7 @@ do { \
({ \ ({ \
long __gu_err = -EFAULT; \ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
unsigned long __gu_val; \ unsigned long __gu_val = 0; \
__get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\ __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
(x) = (__force __typeof__(x))__gu_val; \ (x) = (__force __typeof__(x))__gu_val; \
__gu_err; \ __gu_err; \

View File

@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
static inline unsigned long static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n) copy_from_user(void *to, const void *from, unsigned long n)
{ {
unsigned long over; unsigned long res = n;
if (access_ok(VERIFY_READ, from, n)) if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_tofrom_user(to, from, n); res = __copy_tofrom_user(to, from, n);
if ((unsigned long)from < TASK_SIZE) { if (unlikely(res))
over = (unsigned long)from + n - TASK_SIZE; memset(to + (n - res), 0, res);
return __copy_tofrom_user(to, from, n - over) + over; return res;
}
return n;
} }
static inline unsigned long static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long n) copy_to_user(void *to, const void *from, unsigned long n)
{ {
unsigned long over; if (likely(access_ok(VERIFY_WRITE, to, n)))
n = __copy_tofrom_user(to, from, n);
if (access_ok(VERIFY_WRITE, to, n))
return __copy_tofrom_user(to, from, n);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
return __copy_tofrom_user(to, from, n - over) + over;
}
return n; return n;
} }
@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
static inline __must_check unsigned long static inline __must_check unsigned long
clear_user(void *addr, unsigned long size) clear_user(void *addr, unsigned long size)
{ {
if (likely(access_ok(VERIFY_WRITE, addr, size)))
if (access_ok(VERIFY_WRITE, addr, size)) size = __clear_user(addr, size);
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
return __clear_user(addr, size - over) + over;
}
return size; return size;
} }

View File

@ -10,6 +10,7 @@
#include <asm-generic/uaccess-unaligned.h> #include <asm-generic/uaccess-unaligned.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/string.h>
#define VERIFY_READ 0 #define VERIFY_READ 0
#define VERIFY_WRITE 1 #define VERIFY_WRITE 1
@ -221,7 +222,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
unsigned long n) unsigned long n)
{ {
int sz = __compiletime_object_size(to); int sz = __compiletime_object_size(to);
int ret = -EFAULT; unsigned long ret = n;
if (likely(sz == -1 || sz >= n)) if (likely(sz == -1 || sz >= n))
ret = __copy_from_user(to, from, n); ret = __copy_from_user(to, from, n);
@ -230,6 +231,8 @@ static inline unsigned long __must_check copy_from_user(void *to,
else else
__bad_copy_user(); __bad_copy_user();
if (unlikely(ret))
memset(to + (n - ret), 0, ret);
return ret; return ret;
} }

View File

@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
#include <linux/jump_label.h> #include <linux/jump_label.h>
#define NUM_CPU_FTR_KEYS 64 #define NUM_CPU_FTR_KEYS BITS_PER_LONG
extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS]; extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];

View File

@ -308,36 +308,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long copy_from_user(void *to, static inline unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
{ {
unsigned long over; if (likely(access_ok(VERIFY_READ, from, n))) {
if (access_ok(VERIFY_READ, from, n)) {
check_object_size(to, n, false); check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n); return __copy_tofrom_user((__force void __user *)to, from, n);
} }
if ((unsigned long)from < TASK_SIZE) { memset(to, 0, n);
over = (unsigned long)from + n - TASK_SIZE;
check_object_size(to, n - over, false);
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
}
return n; return n;
} }
static inline unsigned long copy_to_user(void __user *to, static inline unsigned long copy_to_user(void __user *to,
const void *from, unsigned long n) const void *from, unsigned long n)
{ {
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n)) { if (access_ok(VERIFY_WRITE, to, n)) {
check_object_size(from, n, true); check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n); return __copy_tofrom_user(to, (__force void __user *)from, n);
} }
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
check_object_size(from, n - over, true);
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
}
return n; return n;
} }
@ -434,10 +419,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
might_fault(); might_fault();
if (likely(access_ok(VERIFY_WRITE, addr, size))) if (likely(access_ok(VERIFY_WRITE, addr, size)))
return __clear_user(addr, size); return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
return __clear_user(addr, size - over) + over;
}
return size; return size;
} }

View File

@ -411,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
* *
* r13 - PACA * r13 - PACA
* cr3 - gt if waking up with partial/complete hypervisor state loss * cr3 - gt if waking up with partial/complete hypervisor state loss
* cr4 - eq if waking up from complete hypervisor state loss. * cr4 - gt or eq if waking up from complete hypervisor state loss.
*/ */
_GLOBAL(pnv_wakeup_tb_loss) _GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13) ld r1,PACAR1(r13)
@ -453,7 +453,7 @@ lwarx_loop2:
* At this stage * At this stage
* cr2 - eq if first thread to wakeup in core * cr2 - eq if first thread to wakeup in core
* cr3- gt if waking up with partial/complete hypervisor state loss * cr3- gt if waking up with partial/complete hypervisor state loss
* cr4 - eq if waking up from complete hypervisor state loss. * cr4 - gt or eq if waking up from complete hypervisor state loss.
*/ */
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
@ -481,7 +481,7 @@ first_thread_in_subcore:
* If waking up from sleep, subcore state is not lost. Hence * If waking up from sleep, subcore state is not lost. Hence
* skip subcore state restore * skip subcore state restore
*/ */
bne cr4,subcore_state_restored blt cr4,subcore_state_restored
/* Restore per-subcore state */ /* Restore per-subcore state */
ld r4,_SDR1(r1) ld r4,_SDR1(r1)
@ -526,7 +526,7 @@ timebase_resync:
* If waking up from sleep, per core state is not lost, skip to * If waking up from sleep, per core state is not lost, skip to
* clear_lock. * clear_lock.
*/ */
bne cr4,clear_lock blt cr4,clear_lock
/* /*
* First thread in the core to wake up and its waking up with * First thread in the core to wake up and its waking up with
@ -557,7 +557,7 @@ common_exit:
* If waking up from sleep, hypervisor state is not lost. Hence * If waking up from sleep, hypervisor state is not lost. Hence
* skip hypervisor state restore. * skip hypervisor state restore.
*/ */
bne cr4,hypervisor_state_restored blt cr4,hypervisor_state_restored
/* Waking up from winkle */ /* Waking up from winkle */

View File

@ -2217,7 +2217,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
pnv_pci_link_table_and_group(phb->hose->node, num, pnv_pci_link_table_and_group(phb->hose->node, num,
tbl, &pe->table_group); tbl, &pe->table_group);
pnv_pci_phb3_tce_invalidate_pe(pe); pnv_pci_ioda2_tce_invalidate_pe(pe);
return 0; return 0;
} }
@ -2355,7 +2355,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
if (ret) if (ret)
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
else else
pnv_pci_phb3_tce_invalidate_pe(pe); pnv_pci_ioda2_tce_invalidate_pe(pe);
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
@ -3426,7 +3426,17 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
} }
} }
pnv_ioda_free_pe(pe); /*
* The PE for root bus can be removed because of hotplug in EEH
* recovery for fenced PHB error. We need to mark the PE dead so
* that it can be populated again in PCI hot add path. The PE
* shouldn't be destroyed as it's the global reserved resource.
*/
if (phb->ioda.root_pe_populated &&
phb->ioda.root_pe_idx == pe->pe_number)
phb->ioda.root_pe_populated = false;
else
pnv_ioda_free_pe(pe);
} }
static void pnv_pci_release_device(struct pci_dev *pdev) static void pnv_pci_release_device(struct pci_dev *pdev)
@ -3442,7 +3452,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
if (!pdn || pdn->pe_number == IODA_INVALID_PE) if (!pdn || pdn->pe_number == IODA_INVALID_PE)
return; return;
/*
* PCI hotplug can happen as part of EEH error recovery. The @pdn
* isn't removed and added afterwards in this scenario. We should
* set the PE number in @pdn to an invalid one. Otherwise, the PE's
* device count is decreased on removing devices while failing to
* be increased on adding devices. It leads to unbalanced PE's device
* count and eventually make normal PCI hotplug path broken.
*/
pe = &phb->ioda.pe_array[pdn->pe_number]; pe = &phb->ioda.pe_array[pdn->pe_number];
pdn->pe_number = IODA_INVALID_PE;
WARN_ON(--pe->device_count < 0); WARN_ON(--pe->device_count < 0);
if (pe->device_count == 0) if (pe->device_count == 0)
pnv_ioda_release_pe(pe); pnv_ioda_release_pe(pe);

View File

@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: { \ case 1: { \
unsigned char __x; \ unsigned char __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \ __gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \ break; \
}; \ }; \
case 2: { \ case 2: { \
unsigned short __x; \ unsigned short __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \ __gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \ break; \
}; \ }; \
case 4: { \ case 4: { \
unsigned int __x; \ unsigned int __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \ __gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \ break; \
}; \ }; \
case 8: { \ case 8: { \
unsigned long long __x; \ unsigned long long __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \ __gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \

View File

@ -2231,9 +2231,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL; return -EINVAL;
current->thread.fpu.fpc = fpu->fpc; current->thread.fpu.fpc = fpu->fpc;
if (MACHINE_HAS_VX) if (MACHINE_HAS_VX)
convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs); convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
(freg_t *) fpu->fprs);
else else
memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs)); memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
return 0; return 0;
} }
@ -2242,9 +2243,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
/* make sure we have the latest values */ /* make sure we have the latest values */
save_fpu_regs(); save_fpu_regs();
if (MACHINE_HAS_VX) if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs); convert_vx_to_fp((freg_t *) fpu->fprs,
(__vector128 *) vcpu->run->s.regs.vrs);
else else
memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs)); memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
fpu->fpc = current->thread.fpu.fpc; fpu->fpc = current->thread.fpu.fpc;
return 0; return 0;
} }

View File

@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Validity 0x0044 will be checked by SIE */ /* Validity 0x0044 will be checked by SIE */
if (rc) if (rc)
goto unpin; goto unpin;
scb_s->gvrd = hpa; scb_s->riccbd = hpa;
} }
return 0; return 0;
unpin: unpin:

View File

@ -163,7 +163,7 @@ do { \
__get_user_asm(val, "lw", ptr); \ __get_user_asm(val, "lw", ptr); \
break; \ break; \
case 8: \ case 8: \
if ((copy_from_user((void *)&val, ptr, 8)) == 0) \ if (__copy_from_user((void *)&val, ptr, 8) == 0) \
__gu_err = 0; \ __gu_err = 0; \
else \ else \
__gu_err = -EFAULT; \ __gu_err = -EFAULT; \
@ -188,6 +188,8 @@ do { \
\ \
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \ __get_user_common((x), size, __gu_ptr); \
else \
(x) = 0; \
\ \
__gu_err; \ __gu_err; \
}) })
@ -201,6 +203,7 @@ do { \
"2:\n" \ "2:\n" \
".section .fixup,\"ax\"\n" \ ".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \ "3:li %0, %4\n" \
"li %1, 0\n" \
"j 2b\n" \ "j 2b\n" \
".previous\n" \ ".previous\n" \
".section __ex_table,\"a\"\n" \ ".section __ex_table,\"a\"\n" \
@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long len) copy_from_user(void *to, const void *from, unsigned long len)
{ {
unsigned long over; unsigned long res = len;
if (access_ok(VERIFY_READ, from, len)) if (likely(access_ok(VERIFY_READ, from, len)))
return __copy_tofrom_user(to, from, len); res = __copy_tofrom_user(to, from, len);
if ((unsigned long)from < TASK_SIZE) { if (unlikely(res))
over = (unsigned long)from + len - TASK_SIZE; memset(to + (len - res), 0, res);
return __copy_tofrom_user(to, from, len - over) + over;
} return res;
return len;
} }
static inline unsigned long static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long len) copy_to_user(void *to, const void *from, unsigned long len)
{ {
unsigned long over; if (likely(access_ok(VERIFY_WRITE, to, len)))
len = __copy_tofrom_user(to, from, len);
if (access_ok(VERIFY_WRITE, to, len))
return __copy_tofrom_user(to, from, len);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + len - TASK_SIZE;
return __copy_tofrom_user(to, from, len - over) + over;
}
return len; return len;
} }
#define __copy_from_user(to, from, len) \ static inline unsigned long
__copy_tofrom_user((to), (from), (len)) __copy_from_user(void *to, const void *from, unsigned long len)
{
unsigned long left = __copy_tofrom_user(to, from, len);
if (unlikely(left))
memset(to + (len - left), 0, left);
return left;
}
#define __copy_to_user(to, from, len) \ #define __copy_to_user(to, from, len) \
__copy_tofrom_user((to), (from), (len)) __copy_tofrom_user((to), (from), (len))
@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
static inline unsigned long static inline unsigned long
__copy_from_user_inatomic(void *to, const void *from, unsigned long len) __copy_from_user_inatomic(void *to, const void *from, unsigned long len)
{ {
return __copy_from_user(to, from, len); return __copy_tofrom_user(to, from, len);
} }
#define __copy_in_user(to, from, len) __copy_from_user(to, from, len) #define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
static inline unsigned long static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long len) copy_in_user(void *to, const void *from, unsigned long len)
{ {
if (access_ok(VERIFY_READ, from, len) && if (access_ok(VERIFY_READ, from, len) &&
access_ok(VERFITY_WRITE, to, len)) access_ok(VERFITY_WRITE, to, len))
return copy_from_user(to, from, len); return __copy_tofrom_user(to, from, len);
} }
/* /*

View File

@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
__kernel_size_t __copy_size = (__kernel_size_t) n; __kernel_size_t __copy_size = (__kernel_size_t) n;
if (__copy_size && __access_ok(__copy_from, __copy_size)) if (__copy_size && __access_ok(__copy_from, __copy_size))
return __copy_user(to, from, __copy_size); __copy_size = __copy_user(to, from, __copy_size);
if (unlikely(__copy_size))
memset(to + (n - __copy_size), 0, __copy_size);
return __copy_size; return __copy_size;
} }

View File

@ -24,6 +24,7 @@
#define __get_user_size(x,ptr,size,retval) \ #define __get_user_size(x,ptr,size,retval) \
do { \ do { \
retval = 0; \ retval = 0; \
x = 0; \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
retval = __get_user_asm_b((void *)&x, \ retval = __get_user_asm_b((void *)&x, \

View File

@ -266,8 +266,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
if (n && __access_ok((unsigned long) from, n)) { if (n && __access_ok((unsigned long) from, n)) {
check_object_size(to, n, false); check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n); return __copy_user((__force void __user *) to, from, n);
} else } else {
memset(to, 0, n);
return n; return n;
}
} }
static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)

View File

@ -1004,79 +1004,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
return status; return status;
} }
struct exit_boot_struct {
struct boot_params *boot_params;
struct efi_info *efi;
struct setup_data *e820ext;
__u32 e820ext_size;
bool is64;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv)
{
static bool first = true;
const char *signature;
__u32 nr_desc;
efi_status_t status;
struct exit_boot_struct *p = priv;
if (first) {
nr_desc = *map->buff_size / *map->desc_size;
if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
u32 nr_e820ext = nr_desc -
ARRAY_SIZE(p->boot_params->e820_map);
status = alloc_e820ext(nr_e820ext, &p->e820ext,
&p->e820ext_size);
if (status != EFI_SUCCESS)
return status;
}
first = false;
}
signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
p->efi->efi_systab = (unsigned long)sys_table_arg;
p->efi->efi_memdesc_size = *map->desc_size;
p->efi->efi_memdesc_version = *map->desc_ver;
p->efi->efi_memmap = (unsigned long)*map->map;
p->efi->efi_memmap_size = *map->map_size;
#ifdef CONFIG_X86_64
p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
#endif
return EFI_SUCCESS;
}
static efi_status_t exit_boot(struct boot_params *boot_params, static efi_status_t exit_boot(struct boot_params *boot_params,
void *handle, bool is64) void *handle, bool is64)
{ {
struct efi_info *efi = &boot_params->efi_info; unsigned long map_sz, key, desc_size, buff_size;
unsigned long map_sz, key, desc_size;
efi_memory_desc_t *mem_map; efi_memory_desc_t *mem_map;
struct setup_data *e820ext; struct setup_data *e820ext;
const char *signature;
__u32 e820ext_size; __u32 e820ext_size;
__u32 nr_desc, prev_nr_desc;
efi_status_t status; efi_status_t status;
__u32 desc_version; __u32 desc_version;
bool called_exit = false; struct efi_boot_memmap map;
u8 nr_entries; struct exit_boot_struct priv;
int i;
nr_desc = 0; map.map = &mem_map;
e820ext = NULL; map.map_size = &map_sz;
e820ext_size = 0; map.desc_size = &desc_size;
map.desc_ver = &desc_version;
get_map: map.key_ptr = &key;
status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size, map.buff_size = &buff_size;
&desc_version, &key); priv.boot_params = boot_params;
priv.efi = &boot_params->efi_info;
priv.e820ext = NULL;
priv.e820ext_size = 0;
priv.is64 = is64;
/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
exit_boot_func);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
return status; return status;
prev_nr_desc = nr_desc; e820ext = priv.e820ext;
nr_desc = map_sz / desc_size; e820ext_size = priv.e820ext_size;
if (nr_desc > prev_nr_desc &&
nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
if (status != EFI_SUCCESS)
goto free_mem_map;
efi_call_early(free_pool, mem_map);
goto get_map; /* Allocated memory, get map again */
}
signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
efi->efi_systab = (unsigned long)sys_table;
efi->efi_memdesc_size = desc_size;
efi->efi_memdesc_version = desc_version;
efi->efi_memmap = (unsigned long)mem_map;
efi->efi_memmap_size = map_sz;
#ifdef CONFIG_X86_64
efi->efi_systab_hi = (unsigned long)sys_table >> 32;
efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
#endif
/* Might as well exit boot services now */
status = efi_call_early(exit_boot_services, handle, key);
if (status != EFI_SUCCESS) {
/*
* ExitBootServices() will fail if any of the event
* handlers change the memory map. In which case, we
* must be prepared to retry, but only once so that
* we're guaranteed to exit on repeated failures instead
* of spinning forever.
*/
if (called_exit)
goto free_mem_map;
called_exit = true;
efi_call_early(free_pool, mem_map);
goto get_map;
}
/* Historic? */ /* Historic? */
boot_params->alt_mem_k = 32 * 1024; boot_params->alt_mem_k = 32 * 1024;
@ -1085,10 +1093,6 @@ get_map:
return status; return status;
return EFI_SUCCESS; return EFI_SUCCESS;
free_mem_map:
efi_call_early(free_pool, mem_map);
return status;
} }
/* /*

View File

@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{ {
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076, [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0081, [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */

View File

@ -29,6 +29,8 @@
#define COUNTER_SHIFT 16 #define COUNTER_SHIFT 16
static HLIST_HEAD(uncore_unused_list);
struct amd_uncore { struct amd_uncore {
int id; int id;
int refcnt; int refcnt;
@ -39,7 +41,7 @@ struct amd_uncore {
cpumask_t *active_mask; cpumask_t *active_mask;
struct pmu *pmu; struct pmu *pmu;
struct perf_event *events[MAX_COUNTERS]; struct perf_event *events[MAX_COUNTERS];
struct amd_uncore *free_when_cpu_online; struct hlist_node node;
}; };
static struct amd_uncore * __percpu *amd_uncore_nb; static struct amd_uncore * __percpu *amd_uncore_nb;
@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL; uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
uncore_nb->active_mask = &amd_nb_active_mask; uncore_nb->active_mask = &amd_nb_active_mask;
uncore_nb->pmu = &amd_nb_pmu; uncore_nb->pmu = &amd_nb_pmu;
uncore_nb->id = -1;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
} }
@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL; uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
uncore_l2->active_mask = &amd_l2_active_mask; uncore_l2->active_mask = &amd_l2_active_mask;
uncore_l2->pmu = &amd_l2_pmu; uncore_l2->pmu = &amd_l2_pmu;
uncore_l2->id = -1;
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
} }
@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
continue; continue;
if (this->id == that->id) { if (this->id == that->id) {
that->free_when_cpu_online = this; hlist_add_head(&this->node, &uncore_unused_list);
this = that; this = that;
break; break;
} }
@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
return 0; return 0;
} }
static void uncore_clean_online(void)
{
struct amd_uncore *uncore;
struct hlist_node *n;
hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
hlist_del(&uncore->node);
kfree(uncore);
}
}
static void uncore_online(unsigned int cpu, static void uncore_online(unsigned int cpu,
struct amd_uncore * __percpu *uncores) struct amd_uncore * __percpu *uncores)
{ {
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
kfree(uncore->free_when_cpu_online); uncore_clean_online();
uncore->free_when_cpu_online = NULL;
if (cpu == uncore->cpu) if (cpu == uncore->cpu)
cpumask_set_cpu(cpu, uncore->active_mask); cpumask_set_cpu(cpu, uncore->active_mask);

View File

@ -31,7 +31,17 @@
struct bts_ctx { struct bts_ctx {
struct perf_output_handle handle; struct perf_output_handle handle;
struct debug_store ds_back; struct debug_store ds_back;
int started; int state;
};
/* BTS context states: */
enum {
/* no ongoing AUX transactions */
BTS_STATE_STOPPED = 0,
/* AUX transaction is on, BTS tracing is disabled */
BTS_STATE_INACTIVE,
/* AUX transaction is on, BTS tracing is running */
BTS_STATE_ACTIVE,
}; };
static DEFINE_PER_CPU(struct bts_ctx, bts_ctx); static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
static int static int
bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle); bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
/*
* Ordering PMU callbacks wrt themselves and the PMI is done by means
* of bts::state, which:
* - is set when bts::handle::event is valid, that is, between
* perf_aux_output_begin() and perf_aux_output_end();
* - is zero otherwise;
* - is ordered against bts::handle::event with a compiler barrier.
*/
static void __bts_event_start(struct perf_event *event) static void __bts_event_start(struct perf_event *event)
{ {
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
/* /*
* local barrier to make sure that ds configuration made it * local barrier to make sure that ds configuration made it
* before we enable BTS * before we enable BTS and bts::state goes ACTIVE
*/ */
wmb(); wmb();
/* INACTIVE/STOPPED -> ACTIVE */
WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
intel_pmu_enable_bts(config); intel_pmu_enable_bts(config);
} }
@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
__bts_event_start(event); __bts_event_start(event);
/* PMI handler: this counter is running and likely generating PMIs */
ACCESS_ONCE(bts->started) = 1;
return; return;
fail_end_stop: fail_end_stop:
@ -263,30 +282,34 @@ fail_stop:
event->hw.state = PERF_HES_STOPPED; event->hw.state = PERF_HES_STOPPED;
} }
static void __bts_event_stop(struct perf_event *event) static void __bts_event_stop(struct perf_event *event, int state)
{ {
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
/* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
WRITE_ONCE(bts->state, state);
/* /*
* No extra synchronization is mandated by the documentation to have * No extra synchronization is mandated by the documentation to have
* BTS data stores globally visible. * BTS data stores globally visible.
*/ */
intel_pmu_disable_bts(); intel_pmu_disable_bts();
if (event->hw.state & PERF_HES_STOPPED)
return;
ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
} }
static void bts_event_stop(struct perf_event *event, int flags) static void bts_event_stop(struct perf_event *event, int flags)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
struct bts_buffer *buf = perf_get_aux(&bts->handle); struct bts_buffer *buf = NULL;
int state = READ_ONCE(bts->state);
/* PMI handler: don't restart this counter */ if (state == BTS_STATE_ACTIVE)
ACCESS_ONCE(bts->started) = 0; __bts_event_stop(event, BTS_STATE_STOPPED);
__bts_event_stop(event); if (state != BTS_STATE_STOPPED)
buf = perf_get_aux(&bts->handle);
event->hw.state |= PERF_HES_STOPPED;
if (flags & PERF_EF_UPDATE) { if (flags & PERF_EF_UPDATE) {
bts_update(bts); bts_update(bts);
@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
bts->handle.head = bts->handle.head =
local_xchg(&buf->data_size, local_xchg(&buf->data_size,
buf->nr_pages << PAGE_SHIFT); buf->nr_pages << PAGE_SHIFT);
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
!!local_xchg(&buf->lost, 0)); !!local_xchg(&buf->lost, 0));
} }
@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
void intel_bts_enable_local(void) void intel_bts_enable_local(void)
{ {
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
int state = READ_ONCE(bts->state);
if (bts->handle.event && bts->started) /*
* Here we transition from INACTIVE to ACTIVE;
* if we instead are STOPPED from the interrupt handler,
* stay that way. Can't be ACTIVE here though.
*/
if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
return;
if (state == BTS_STATE_STOPPED)
return;
if (bts->handle.event)
__bts_event_start(bts->handle.event); __bts_event_start(bts->handle.event);
} }
@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
{ {
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
/*
* Here we transition from ACTIVE to INACTIVE;
* do nothing for STOPPED or INACTIVE.
*/
if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
return;
if (bts->handle.event) if (bts->handle.event)
__bts_event_stop(bts->handle.event); __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
} }
static int static int
@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
return 0; return 0;
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
if (WARN_ON_ONCE(head != local_read(&buf->head)))
return -EINVAL;
phys = &buf->buf[buf->cur_buf]; phys = &buf->buf[buf->cur_buf];
space = phys->offset + phys->displacement + phys->size - head; space = phys->offset + phys->displacement + phys->size - head;
@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int intel_bts_interrupt(void) int intel_bts_interrupt(void)
{ {
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
struct perf_event *event = bts->handle.event; struct perf_event *event = bts->handle.event;
struct bts_buffer *buf; struct bts_buffer *buf;
s64 old_head; s64 old_head;
int err; int err = -ENOSPC, handled = 0;
if (!event || !bts->started) /*
return 0; * The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.
*/
if (ds->bts_index >= ds->bts_interrupt_threshold)
handled = 1;
/*
* this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
* so we can only be INACTIVE or STOPPED
*/
if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
return handled;
buf = perf_get_aux(&bts->handle); buf = perf_get_aux(&bts->handle);
if (!buf)
return handled;
/* /*
* Skip snapshot counters: they don't use the interrupt, but * Skip snapshot counters: they don't use the interrupt, but
* there's no other way of telling, because the pointer will * there's no other way of telling, because the pointer will
* keep moving * keep moving
*/ */
if (!buf || buf->snapshot) if (buf->snapshot)
return 0; return 0;
old_head = local_read(&buf->head); old_head = local_read(&buf->head);
@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
/* no new data */ /* no new data */
if (old_head == local_read(&buf->head)) if (old_head == local_read(&buf->head))
return 0; return handled;
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
!!local_xchg(&buf->lost, 0)); !!local_xchg(&buf->lost, 0));
buf = perf_aux_output_begin(&bts->handle, event); buf = perf_aux_output_begin(&bts->handle, event);
if (!buf) if (buf)
return 1; err = bts_buffer_reset(buf, &bts->handle);
err = bts_buffer_reset(buf, &bts->handle); if (err) {
if (err) WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
perf_aux_output_end(&bts->handle, 0, false);
if (buf) {
/*
* BTS_STATE_STOPPED should be visible before
* cleared handle::event
*/
barrier();
perf_aux_output_end(&bts->handle, 0, false);
}
}
return 1; return 1;
} }

View File

@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
* disabled state if called consecutively. * disabled state if called consecutively.
* *
* During consecutive calls, the same disable value will be written to related * During consecutive calls, the same disable value will be written to related
* registers, so the PMU state remains unchanged. hw.state in * registers, so the PMU state remains unchanged.
* intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive *
* calls. * intel_bts events don't coexist with intel PMU's BTS events because of
* x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
* disabled around intel PMU's event batching etc, only inside the PMI handler.
*/ */
static void __intel_pmu_disable_all(void) static void __intel_pmu_disable_all(void)
{ {
@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts(); intel_pmu_disable_bts();
else
intel_bts_disable_local();
intel_pmu_pebs_disable_all(); intel_pmu_pebs_disable_all();
} }
@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
return; return;
intel_pmu_enable_bts(event->hw.config); intel_pmu_enable_bts(event->hw.config);
} else }
intel_bts_enable_local();
} }
static void intel_pmu_enable_all(int added) static void intel_pmu_enable_all(int added)
@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
*/ */
if (!x86_pmu.late_ack) if (!x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_bts_disable_local();
__intel_pmu_disable_all(); __intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer(); handled = intel_pmu_drain_bts_buffer();
handled += intel_bts_interrupt(); handled += intel_bts_interrupt();
@ -2172,6 +2172,7 @@ done:
/* Only restore PMU state when it's active. See x86_pmu_disable(). */ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
if (cpuc->enabled) if (cpuc->enabled)
__intel_pmu_enable_all(0, true); __intel_pmu_enable_all(0, true);
intel_bts_enable_local();
/* /*
* Only unmask the NMI after the overflow counters * Only unmask the NMI after the overflow counters

View File

@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
static void init_mbm_sample(u32 rmid, u32 evt_type); static void init_mbm_sample(u32 rmid, u32 evt_type);
static void __intel_mbm_event_count(void *info); static void __intel_mbm_event_count(void *info);
static bool is_cqm_event(int e)
{
return (e == QOS_L3_OCCUP_EVENT_ID);
}
static bool is_mbm_event(int e) static bool is_mbm_event(int e)
{ {
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID); return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
(event->attr.config > QOS_MBM_LOCAL_EVENT_ID)) (event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
return -EINVAL; return -EINVAL;
if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
(is_mbm_event(event->attr.config) && !mbm_enabled))
return -EINVAL;
/* unsupported modes and filters */ /* unsupported modes and filters */
if (event->attr.exclude_user || if (event->attr.exclude_user ||
event->attr.exclude_kernel || event->attr.exclude_kernel ||

View File

@ -1274,18 +1274,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct pebs_record_nhm *p = at; struct pebs_record_nhm *p = at;
u64 pebs_status; u64 pebs_status;
/* PEBS v3 has accurate status bits */ pebs_status = p->status & cpuc->pebs_enabled;
pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
/* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) { if (x86_pmu.intel_cap.pebs_format >= 3) {
for_each_set_bit(bit, (unsigned long *)&p->status, for_each_set_bit(bit, (unsigned long *)&pebs_status,
MAX_PEBS_EVENTS) x86_pmu.max_pebs_events)
counts[bit]++; counts[bit]++;
continue; continue;
} }
pebs_status = p->status & cpuc->pebs_enabled;
pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
/* /*
* On some CPUs the PEBS status can be zero when PEBS is * On some CPUs the PEBS status can be zero when PEBS is
* racing with clearing of GLOBAL_STATUS. * racing with clearing of GLOBAL_STATUS.
@ -1333,8 +1333,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
continue; continue;
event = cpuc->events[bit]; event = cpuc->events[bit];
WARN_ON_ONCE(!event); if (WARN_ON_ONCE(!event))
WARN_ON_ONCE(!event->attr.precise_ip); continue;
if (WARN_ON_ONCE(!event->attr.precise_ip))
continue;
/* log dropped samples number */ /* log dropped samples number */
if (error[bit]) if (error[bit])

View File

@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
event->hw.addr_filters = NULL; event->hw.addr_filters = NULL;
} }
static inline bool valid_kernel_ip(unsigned long ip)
{
return virt_addr_valid(ip) && kernel_ip(ip);
}
static int pt_event_addr_filters_validate(struct list_head *filters) static int pt_event_addr_filters_validate(struct list_head *filters)
{ {
struct perf_addr_filter *filter; struct perf_addr_filter *filter;
@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
list_for_each_entry(filter, filters, entry) { list_for_each_entry(filter, filters, entry) {
/* PT doesn't support single address triggers */ /* PT doesn't support single address triggers */
if (!filter->range) if (!filter->range || !filter->size)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!filter->inode && !kernel_ip(filter->offset)) if (!filter->inode) {
return -EINVAL; if (!valid_kernel_ip(filter->offset))
return -EINVAL;
if (!valid_kernel_ip(filter->offset + filter->size))
return -EINVAL;
}
if (++range > pt_cap_get(PT_CAP_num_address_ranges)) if (++range > pt_cap_get(PT_CAP_num_address_ranges))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
} else { } else {
/* apply the offset */ /* apply the offset */
msr_a = filter->offset + offs[range]; msr_a = filter->offset + offs[range];
msr_b = filter->size + msr_a; msr_b = filter->size + msr_a - 1;
} }
filters->filter[range].msr_a = msr_a; filters->filter[range].msr_a = msr_a;

View File

@ -433,7 +433,11 @@ do { \
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \ asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \ ".section .fixup,\"ax\"\n" \
"3:xor"itype" %"rtype"0,%"rtype"0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_EX(1b, 3b) \
: ltype(x) : "m" (__m(addr))) : ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \ #define __put_user_nocheck(x, ptr, size) \

View File

@ -2093,7 +2093,6 @@ int generic_processor_info(int apicid, int version)
return -EINVAL; return -EINVAL;
} }
num_processors++;
if (apicid == boot_cpu_physical_apicid) { if (apicid == boot_cpu_physical_apicid) {
/* /*
* x86_bios_cpu_apicid is required to have processors listed * x86_bios_cpu_apicid is required to have processors listed
@ -2116,10 +2115,13 @@ int generic_processor_info(int apicid, int version)
pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n", pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
thiscpu, apicid); thiscpu, apicid);
disabled_cpus++; disabled_cpus++;
return -ENOSPC; return -ENOSPC;
} }
num_processors++;
/* /*
* Validate version * Validate version
*/ */

View File

@ -54,6 +54,7 @@ static LIST_HEAD(pcache);
*/ */
static u8 *container; static u8 *container;
static size_t container_size; static size_t container_size;
static bool ucode_builtin;
static u32 ucode_new_rev; static u32 ucode_new_rev;
static u8 amd_ucode_patch[PATCH_MAX_SIZE]; static u8 amd_ucode_patch[PATCH_MAX_SIZE];
@ -281,18 +282,22 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
void __init load_ucode_amd_bsp(unsigned int family) void __init load_ucode_amd_bsp(unsigned int family)
{ {
struct cpio_data cp; struct cpio_data cp;
bool *builtin;
void **data; void **data;
size_t *size; size_t *size;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
data = (void **)__pa_nodebug(&ucode_cpio.data); data = (void **)__pa_nodebug(&ucode_cpio.data);
size = (size_t *)__pa_nodebug(&ucode_cpio.size); size = (size_t *)__pa_nodebug(&ucode_cpio.size);
builtin = (bool *)__pa_nodebug(&ucode_builtin);
#else #else
data = &ucode_cpio.data; data = &ucode_cpio.data;
size = &ucode_cpio.size; size = &ucode_cpio.size;
builtin = &ucode_builtin;
#endif #endif
if (!load_builtin_amd_microcode(&cp, family)) *builtin = load_builtin_amd_microcode(&cp, family);
if (!*builtin)
cp = find_ucode_in_initrd(); cp = find_ucode_in_initrd();
if (!(cp.data && cp.size)) if (!(cp.data && cp.size))
@ -373,7 +378,8 @@ void load_ucode_amd_ap(void)
return; return;
/* Add CONFIG_RANDOMIZE_MEMORY offset. */ /* Add CONFIG_RANDOMIZE_MEMORY offset. */
cont += PAGE_OFFSET - __PAGE_OFFSET_BASE; if (!ucode_builtin)
cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
eax = cpuid_eax(0x00000001); eax = cpuid_eax(0x00000001);
eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ); eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
@ -439,7 +445,8 @@ int __init save_microcode_in_initrd_amd(void)
container = cont_va; container = cont_va;
/* Add CONFIG_RANDOMIZE_MEMORY offset. */ /* Add CONFIG_RANDOMIZE_MEMORY offset. */
container += PAGE_OFFSET - __PAGE_OFFSET_BASE; if (!ucode_builtin)
container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
eax = cpuid_eax(0x00000001); eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);

View File

@ -289,6 +289,7 @@ void __init kvmclock_init(void)
put_cpu(); put_cpu();
x86_platform.calibrate_tsc = kvm_get_tsc_khz; x86_platform.calibrate_tsc = kvm_get_tsc_khz;
x86_platform.calibrate_cpu = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock; x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock; x86_platform.set_wallclock = kvm_set_wallclock;
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC

View File

@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{ {
bool new_val, old_val; bool new_val, old_val;
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
union kvm_ioapic_redirect_entry *e; union kvm_ioapic_redirect_entry *e;
e = &ioapic->redirtbl[RTC_GSI]; e = &ioapic->redirtbl[RTC_GSI];
@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
return; return;
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); old_val = test_bit(vcpu->vcpu_id, dest_map->map);
if (new_val == old_val) if (new_val == old_val)
return; return;
if (new_val) { if (new_val) {
__set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); __set_bit(vcpu->vcpu_id, dest_map->map);
dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
ioapic->rtc_status.pending_eoi++; ioapic->rtc_status.pending_eoi++;
} else { } else {
__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); __clear_bit(vcpu->vcpu_id, dest_map->map);
ioapic->rtc_status.pending_eoi--; ioapic->rtc_status.pending_eoi--;
rtc_status_pending_eoi_check_valid(ioapic); rtc_status_pending_eoi_check_valid(ioapic);
} }

View File

@ -23,8 +23,8 @@
static struct kvm_event_hw_type_mapping amd_event_mapping[] = { static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES }, [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES }, [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },

View File

@ -2743,16 +2743,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (tsc_delta < 0) if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC"); mark_tsc_unstable("KVM discovered backwards TSC");
if (kvm_lapic_hv_timer_in_use(vcpu) &&
kvm_x86_ops->set_hv_timer(vcpu,
kvm_get_lapic_tscdeadline_msr(vcpu)))
kvm_lapic_switch_to_sw_timer(vcpu);
if (check_tsc_unstable()) { if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu, u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc); vcpu->arch.last_guest_tsc);
kvm_x86_ops->write_tsc_offset(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1; vcpu->arch.tsc_catchup = 1;
} }
if (kvm_lapic_hv_timer_in_use(vcpu) &&
kvm_x86_ops->set_hv_timer(vcpu,
kvm_get_lapic_tscdeadline_msr(vcpu)))
kvm_lapic_switch_to_sw_timer(vcpu);
/* /*
* On a host with synchronized TSC, there is no need to update * On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration * kvmclock on vcpu->cpu migration

View File

@ -553,15 +553,21 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
/* /*
* Broadwell EP Home Agent BARs erroneously return non-zero values when read. * Device [8086:2fc0]
* Erratum HSE43
* CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
* *
* See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html * Devices [8086:6f60,6fa0,6fc0]
* entry BDF2. * Erratum BDF2
* PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
*/ */
static void pci_bdwep_bar(struct pci_dev *dev) static void pci_invalid_bar(struct pci_dev *dev)
{ {
dev->non_compliant_bars = 1; dev->non_compliant_bars = 1;
} }
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);

View File

@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
static int cryptd_hash_import(struct ahash_request *req, const void *in) static int cryptd_hash_import(struct ahash_request *req, const void *in)
{ {
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct shash_desc *desc = cryptd_shash_desc(req);
return crypto_shash_import(&rctx->desc, in); desc->tfm = ctx->child;
desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
} }
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,

View File

@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL; struct device *parent = NULL;
int retval; int retval;
trace_rpm_suspend(dev, rpmflags); trace_rpm_suspend_rcuidle(dev, rpmflags);
repeat: repeat:
retval = rpm_check_suspend_allowed(dev); retval = rpm_check_suspend_allowed(dev);
@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
} }
out: out:
trace_rpm_return_int(dev, _THIS_IP_, retval); trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval; return retval;

View File

@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_BUS_I2S1] = { 0x2d0, BIT(13) }, [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
[RST_BUS_I2S2] = { 0x2d0, BIT(14) }, [RST_BUS_I2S2] = { 0x2d0, BIT(14) },
[RST_BUS_I2C0] = { 0x2d4, BIT(0) }, [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
[RST_BUS_I2C1] = { 0x2d4, BIT(1) }, [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
[RST_BUS_I2C2] = { 0x2d4, BIT(2) }, [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
[RST_BUS_UART0] = { 0x2d4, BIT(16) }, [RST_BUS_UART0] = { 0x2d8, BIT(16) },
[RST_BUS_UART1] = { 0x2d4, BIT(17) }, [RST_BUS_UART1] = { 0x2d8, BIT(17) },
[RST_BUS_UART2] = { 0x2d4, BIT(18) }, [RST_BUS_UART2] = { 0x2d8, BIT(18) },
[RST_BUS_UART3] = { 0x2d4, BIT(19) }, [RST_BUS_UART3] = { 0x2d8, BIT(19) },
[RST_BUS_SCR] = { 0x2d4, BIT(20) }, [RST_BUS_SCR] = { 0x2d8, BIT(20) },
}; };
static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {

View File

@ -14,9 +14,9 @@
#include "ccu_gate.h" #include "ccu_gate.h"
#include "ccu_nk.h" #include "ccu_nk.h"
void ccu_nk_find_best(unsigned long parent, unsigned long rate, static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
unsigned int max_n, unsigned int max_k, unsigned int max_n, unsigned int max_k,
unsigned int *n, unsigned int *k) unsigned int *n, unsigned int *k)
{ {
unsigned long best_rate = 0; unsigned long best_rate = 0;
unsigned int best_k = 0, best_n = 0; unsigned int best_k = 0, best_n = 0;

View File

@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
SUN4I_PLL2_PRE_DIV_WIDTH, SUN4I_PLL2_PRE_DIV_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&sun4i_a10_pll2_lock); &sun4i_a10_pll2_lock);
if (!prediv_clk) { if (IS_ERR(prediv_clk)) {
pr_err("Couldn't register the prediv clock\n"); pr_err("Couldn't register the prediv clock\n");
goto err_free_array; goto err_free_array;
} }
@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
&mult->hw, &clk_multiplier_ops, &mult->hw, &clk_multiplier_ops,
&gate->hw, &clk_gate_ops, &gate->hw, &clk_gate_ops,
CLK_SET_RATE_PARENT); CLK_SET_RATE_PARENT);
if (!base_clk) { if (IS_ERR(base_clk)) {
pr_err("Couldn't register the base multiplier clock\n"); pr_err("Couldn't register the base multiplier clock\n");
goto err_free_multiplier; goto err_free_multiplier;
} }

View File

@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
return; return;
reg = of_io_request_and_map(node, 0, of_node_full_name(node)); reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (!reg) { if (IS_ERR(reg)) {
pr_err("Could not get registers for sun8i-mbus-clk\n"); pr_err("Could not get registers for sun8i-mbus-clk\n");
goto err_free_parents; goto err_free_parents;
} }

View File

@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
} }
if (subnode) { if (subnode) {
node = of_get_flat_dt_subnode_by_name(node, subnode); int err = of_get_flat_dt_subnode_by_name(node, subnode);
if (node < 0)
if (err < 0)
return 0; return 0;
node = err;
} }
return __find_uefi_params(node, info, dt_params[i].params); return __find_uefi_params(node, info, dt_params[i].params);

View File

@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif #endif
#define EFI_MMAP_NR_SLACK_SLOTS 8
struct file_info { struct file_info {
efi_file_handle_t *handle; efi_file_handle_t *handle;
u64 size; u64 size;
@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
} }
} }
static inline bool mmap_has_headroom(unsigned long buff_size,
unsigned long map_size,
unsigned long desc_size)
{
unsigned long slack = buff_size - map_size;
return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
}
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
efi_memory_desc_t **map, struct efi_boot_memmap *map)
unsigned long *map_size,
unsigned long *desc_size,
u32 *desc_ver,
unsigned long *key_ptr)
{ {
efi_memory_desc_t *m = NULL; efi_memory_desc_t *m = NULL;
efi_status_t status; efi_status_t status;
unsigned long key; unsigned long key;
u32 desc_version; u32 desc_version;
*map_size = sizeof(*m) * 32; *map->desc_size = sizeof(*m);
*map->map_size = *map->desc_size * 32;
*map->buff_size = *map->map_size;
again: again:
/*
* Add an additional efi_memory_desc_t because we're doing an
* allocation which may be in a new descriptor region.
*/
*map_size += sizeof(*m);
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
*map_size, (void **)&m); *map->map_size, (void **)&m);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
goto fail; goto fail;
*desc_size = 0; *map->desc_size = 0;
key = 0; key = 0;
status = efi_call_early(get_memory_map, map_size, m, status = efi_call_early(get_memory_map, map->map_size, m,
&key, desc_size, &desc_version); &key, map->desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) { if (status == EFI_BUFFER_TOO_SMALL ||
!mmap_has_headroom(*map->buff_size, *map->map_size,
*map->desc_size)) {
efi_call_early(free_pool, m); efi_call_early(free_pool, m);
/*
* Make sure there is some entries of headroom so that the
* buffer can be reused for a new map after allocations are
* no longer permitted. Its unlikely that the map will grow to
* exceed this headroom once we are ready to trigger
* ExitBootServices()
*/
*map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
*map->buff_size = *map->map_size;
goto again; goto again;
} }
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
efi_call_early(free_pool, m); efi_call_early(free_pool, m);
if (key_ptr && status == EFI_SUCCESS) if (map->key_ptr && status == EFI_SUCCESS)
*key_ptr = key; *map->key_ptr = key;
if (desc_ver && status == EFI_SUCCESS) if (map->desc_ver && status == EFI_SUCCESS)
*desc_ver = desc_version; *map->desc_ver = desc_version;
fail: fail:
*map = m; *map->map = m;
return status; return status;
} }
@ -113,13 +128,20 @@ fail:
unsigned long get_dram_base(efi_system_table_t *sys_table_arg) unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
{ {
efi_status_t status; efi_status_t status;
unsigned long map_size; unsigned long map_size, buff_size;
unsigned long membase = EFI_ERROR; unsigned long membase = EFI_ERROR;
struct efi_memory_map map; struct efi_memory_map map;
efi_memory_desc_t *md; efi_memory_desc_t *md;
struct efi_boot_memmap boot_map;
status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map, boot_map.map = (efi_memory_desc_t **)&map.map;
&map_size, &map.desc_size, NULL, NULL); boot_map.map_size = &map_size;
boot_map.desc_size = &map.desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
return membase; return membase;
@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align, unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max) unsigned long *addr, unsigned long max)
{ {
unsigned long map_size, desc_size; unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map; efi_memory_desc_t *map;
efi_status_t status; efi_status_t status;
unsigned long nr_pages; unsigned long nr_pages;
u64 max_addr = 0; u64 max_addr = 0;
int i; int i;
struct efi_boot_memmap boot_map;
status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, boot_map.map = &map;
NULL, NULL); boot_map.map_size = &map_size;
boot_map.desc_size = &desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
goto fail; goto fail;
@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align, unsigned long size, unsigned long align,
unsigned long *addr) unsigned long *addr)
{ {
unsigned long map_size, desc_size; unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map; efi_memory_desc_t *map;
efi_status_t status; efi_status_t status;
unsigned long nr_pages; unsigned long nr_pages;
int i; int i;
struct efi_boot_memmap boot_map;
status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, boot_map.map = &map;
NULL, NULL); boot_map.map_size = &map_size;
boot_map.desc_size = &desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
goto fail; goto fail;
@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
*cmd_line_len = options_bytes; *cmd_line_len = options_bytes;
return (char *)cmdline_addr; return (char *)cmdline_addr;
} }
/*
* Handle calling ExitBootServices according to the requirements set out by the
* spec. Obtains the current memory map, and returns that info after calling
* ExitBootServices. The client must specify a function to perform any
* processing of the memory map data prior to ExitBootServices. A client
* specific structure may be passed to the function via priv. The client
* function may be called multiple times.
*/
efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
void *handle,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func)
{
efi_status_t status;
status = efi_get_memory_map(sys_table_arg, map);
if (status != EFI_SUCCESS)
goto fail;
status = priv_func(sys_table_arg, map, priv);
if (status != EFI_SUCCESS)
goto free_map;
status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
if (status == EFI_INVALID_PARAMETER) {
/*
* The memory map changed between efi_get_memory_map() and
* exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
* EFI_BOOT_SERVICES.ExitBootServices we need to get the
* updated map, and try again. The spec implies one retry
* should be sufficent, which is confirmed against the EDK2
* implementation. Per the spec, we can only invoke
* get_memory_map() and exit_boot_services() - we cannot alloc
* so efi_get_memory_map() cannot be used, and we must reuse
* the buffer. For all practical purposes, the headroom in the
* buffer should account for any changes in the map so the call
* to get_memory_map() is expected to succeed here.
*/
*map->map_size = *map->buff_size;
status = efi_call_early(get_memory_map,
map->map_size,
*map->map,
map->key_ptr,
map->desc_size,
map->desc_ver);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
status = priv_func(sys_table_arg, map, priv);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
}
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
return EFI_SUCCESS;
free_map:
efi_call_early(free_pool, *map->map);
fail:
return status;
}

View File

@ -152,6 +152,27 @@ fdt_set_fail:
#define EFI_FDT_ALIGN EFI_PAGE_SIZE #define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif #endif
struct exit_boot_struct {
efi_memory_desc_t *runtime_map;
int *runtime_entry_count;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv)
{
struct exit_boot_struct *p = priv;
/*
* Update the memory map with virtual addresses. The function will also
* populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
* entries so that we can pass it straight to SetVirtualAddressMap()
*/
efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
p->runtime_map, p->runtime_entry_count);
return EFI_SUCCESS;
}
/* /*
* Allocate memory for a new FDT, then add EFI, commandline, and * Allocate memory for a new FDT, then add EFI, commandline, and
* initrd related fields to the FDT. This routine increases the * initrd related fields to the FDT. This routine increases the
@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr, unsigned long fdt_addr,
unsigned long fdt_size) unsigned long fdt_size)
{ {
unsigned long map_size, desc_size; unsigned long map_size, desc_size, buff_size;
u32 desc_ver; u32 desc_ver;
unsigned long mmap_key; unsigned long mmap_key;
efi_memory_desc_t *memory_map, *runtime_map; efi_memory_desc_t *memory_map, *runtime_map;
unsigned long new_fdt_size; unsigned long new_fdt_size;
efi_status_t status; efi_status_t status;
int runtime_entry_count = 0; int runtime_entry_count = 0;
struct efi_boot_memmap map;
struct exit_boot_struct priv;
map.map = &runtime_map;
map.map_size = &map_size;
map.desc_size = &desc_size;
map.desc_ver = &desc_ver;
map.key_ptr = &mmap_key;
map.buff_size = &buff_size;
/* /*
* Get a copy of the current memory map that we will use to prepare * Get a copy of the current memory map that we will use to prepare
@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect * subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions. * the number of EFI_MEMORY_RUNTIME regions.
*/ */
status = efi_get_memory_map(sys_table, &runtime_map, &map_size, status = efi_get_memory_map(sys_table, &map);
&desc_size, &desc_ver, &mmap_key);
if (status != EFI_SUCCESS) { if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
return status; return status;
@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi(sys_table, pr_efi(sys_table,
"Exiting boot services and installing virtual address map...\n"); "Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
/* /*
* Estimate size of new FDT, and allocate memory for it. We * Estimate size of new FDT, and allocate memory for it. We
* will allocate a bigger buffer if this ends up being too * will allocate a bigger buffer if this ends up being too
@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* we can get the memory map key needed for * we can get the memory map key needed for
* exit_boot_services(). * exit_boot_services().
*/ */
status = efi_get_memory_map(sys_table, &memory_map, &map_size, status = efi_get_memory_map(sys_table, &map);
&desc_size, &desc_ver, &mmap_key);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
goto fail_free_new_fdt; goto fail_free_new_fdt;
@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
} }
} }
/* sys_table->boottime->free_pool(memory_map);
* Update the memory map with virtual addresses. The function will also priv.runtime_map = runtime_map;
* populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME priv.runtime_entry_count = &runtime_entry_count;
* entries so that we can pass it straight into SetVirtualAddressMap() status = efi_exit_boot_services(sys_table, handle, &map, &priv,
*/ exit_boot_func);
efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
&runtime_entry_count);
/* Now we are ready to exit_boot_services.*/
status = sys_table->boottime->exit_boot_services(handle, mmap_key);
if (status == EFI_SUCCESS) { if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam; efi_set_virtual_address_map_t *svam;

View File

@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
unsigned long random_seed) unsigned long random_seed)
{ {
unsigned long map_size, desc_size, total_slots = 0, target_slot; unsigned long map_size, desc_size, total_slots = 0, target_slot;
unsigned long buff_size;
efi_status_t status; efi_status_t status;
efi_memory_desc_t *memory_map; efi_memory_desc_t *memory_map;
int map_offset; int map_offset;
struct efi_boot_memmap map;
status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size, map.map = &memory_map;
&desc_size, NULL, NULL); map.map_size = &map_size;
map.desc_size = &desc_size;
map.desc_ver = NULL;
map.key_ptr = NULL;
map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &map);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
return status; return status;

View File

@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
} }
void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
{ {
struct atmel_hlcdc_crtc_state *state; struct atmel_hlcdc_crtc_state *state;

View File

@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
u32 *coeff_tab = heo_upscaling_ycoef; u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize; u32 max_memsize;
if (state->crtc_w < state->src_w) if (state->crtc_h < state->src_h)
coeff_tab = heo_downscaling_ycoef; coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++) for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer, atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i, 33 + i,
0xffffffff, 0xffffffff,
coeff_tab[i]); coeff_tab[i]);
factor = ((8 * 256 * state->src_w) - (256 * 4)) / factor = ((8 * 256 * state->src_h) - (256 * 4)) /
state->crtc_w; state->crtc_h;
factor++; factor++;
max_memsize = ((factor * state->crtc_w) + (256 * 4)) / max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
2048; 2048;
if (max_memsize > state->src_w) if (max_memsize > state->src_h)
factor--; factor--;
factor_reg |= (factor << 16) | 0x80000000; factor_reg |= (factor << 16) | 0x80000000;
} }

View File

@ -1281,6 +1281,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv); intel_runtime_pm_enable(dev_priv);
/* Everything is in place, we can now relax! */
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
driver.date, pci_name(pdev), dev_priv->drm.primary->index);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
return 0; return 0;

View File

@ -122,8 +122,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt = has_full_48bit_ppgtt =
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
if (intel_vgpu_active(dev_priv)) if (intel_vgpu_active(dev_priv)) {
has_full_ppgtt = false; /* emulation is too hard */ /* emulation is too hard */
has_full_ppgtt = false;
has_full_48bit_ppgtt = false;
}
if (!has_aliasing_ppgtt) if (!has_aliasing_ppgtt)
return 0; return 0;
@ -158,7 +161,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0; return 0;
} }
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
return has_full_48bit_ppgtt ? 3 : 2; return has_full_48bit_ppgtt ? 3 : 2;
else else
return has_aliasing_ppgtt ? 1 : 0; return has_aliasing_ppgtt ? 1 : 0;

View File

@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
if (!IS_HASWELL(dev_priv))
return;
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC) if (magic != VGT_MAGIC)
return; return;

View File

@ -554,7 +554,6 @@ void intel_dvo_init(struct drm_device *dev)
return; return;
} }
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo); kfree(intel_dvo);
kfree(intel_connector); kfree(intel_connector);
} }

View File

@ -1047,6 +1047,23 @@ err_out:
return err; return err;
} }
static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
{
DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
return 1;
}
static const struct dmi_system_id intel_use_opregion_panel_type[] = {
{
.callback = intel_use_opregion_panel_type_callback,
.ident = "Conrac GmbH IX45GM2",
.matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
},
},
{ }
};
int int
intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
{ {
@ -1072,6 +1089,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
return -ENODEV; return -ENODEV;
} }
/*
* So far we know that some machined must use it, others must not use it.
* There doesn't seem to be any way to determine which way to go, except
* via a quirk list :(
*/
if (!dmi_check_system(intel_use_opregion_panel_type)) {
DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
/* /*
* FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
* low vswing for eDP, whereas the VBT panel type (2) gives us normal * low vswing for eDP, whereas the VBT panel type (2) gives us normal

View File

@ -7859,6 +7859,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
case GEN6_PCODE_ILLEGAL_CMD: case GEN6_PCODE_ILLEGAL_CMD:
return -ENXIO; return -ENXIO;
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW; return -EOVERFLOW;
case GEN6_PCODE_TIMEOUT: case GEN6_PCODE_TIMEOUT:
return -ETIMEDOUT; return -ETIMEDOUT;

Some files were not shown because too many files have changed in this diff Show More