Merge branch 'linus' into perf/kprobes

Merge recent kprobes updates into perf/kprobes that came from -mm.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2020-11-07 13:18:49 +01:00
commit 0a986ea81e
258 changed files with 2838 additions and 1094 deletions

View File

@ -308,6 +308,7 @@ Tony Luck <tony.luck@intel.com>
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>

View File

@ -156,7 +156,6 @@ against. Possible keywords are:::
``line-range`` cannot contain space, e.g.
"1-30" is valid range but "1 - 30" is not.
``module=foo`` combined keyword=value form is interchangably accepted
The meanings of each keyword are:

View File

@ -23,8 +23,8 @@ Required properties:
- compatible:
Must be one of :
"brcm,spi-bcm-qspi", "brcm,spi-brcmstb-qspi" : MSPI+BSPI on BRCMSTB SoCs
"brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
"brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on BRCMSTB SoCs
"brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi" : Second Instance of MSPI
BRCMSTB SoCs
"brcm,spi-bcm7425-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs
@ -36,8 +36,8 @@ Required properties:
BRCMSTB SoCs
"brcm,spi-bcm7278-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs
"brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi" : MSPI+BSPI on Cygnus, NSP
"brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi" : NS2 SoCs
"brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on Cygnus, NSP
"brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi" : NS2 SoCs
- reg:
Define the bases and ranges of the associated I/O address spaces.
@ -86,7 +86,7 @@ BRCMSTB SoC Example:
spi@f03e3400 {
#address-cells = <0x1>;
#size-cells = <0x0>;
compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-qspi";
compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi";
reg = <0xf03e0920 0x4 0xf03e3400 0x188 0xf03e3200 0x50>;
reg-names = "cs_reg", "mspi", "bspi";
interrupts = <0x6 0x5 0x4 0x3 0x2 0x1 0x0>;
@ -149,7 +149,7 @@ BRCMSTB SoC Example:
#address-cells = <1>;
#size-cells = <0>;
clocks = <&upg_fixed>;
compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-mspi";
compatible = "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi";
reg = <0xf0416000 0x180>;
reg-names = "mspi";
interrupts = <0x14>;
@ -160,7 +160,7 @@ BRCMSTB SoC Example:
iProc SoC Example:
qspi: spi@18027200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18027200 0x184>,
<0x18027000 0x124>,
<0x1811c408 0x004>,
@ -191,7 +191,7 @@ iProc SoC Example:
NS2 SoC Example:
qspi: spi@66470200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,

View File

@ -179,7 +179,7 @@ DMA Fence uABI/Sync File
:internal:
Indefinite DMA Fences
~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~
At various times &dma_fence with an indefinite time until dma_fence_wait()
finishes have been proposed. Examples include:

View File

@ -61,6 +61,10 @@ These options can be used for each instance including global ftrace node.
ftrace.[instance.INSTANCE.]options = OPT1[, OPT2[...]]
Enable given ftrace options.
ftrace.[instance.INSTANCE.]tracing_on = 0|1
Enable/Disable tracing on this instance when starting boot-time tracing.
(you can enable it by the "traceon" event trigger action)
ftrace.[instance.INSTANCE.]trace_clock = CLOCK
Set given CLOCK to ftrace's trace_clock.
@ -164,6 +168,26 @@ is for tracing functions starting with "user\_", and others tracing
The instance node also accepts event nodes so that each instance
can customize its event tracing.
With the trigger action and kprobes, you can trace function-graph while
a function is called. For example, this will trace all function calls in
the pci_proc_init()::
ftrace {
tracing_on = 0
tracer = function_graph
event.kprobes {
start_event {
probes = "pci_proc_init"
actions = "traceon"
}
end_event {
probes = "pci_proc_init%return"
actions = "traceoff"
}
}
}
This boot-time tracing also supports ftrace kernel parameters via boot
config.
For example, following kernel parameters::

View File

@ -30,6 +30,7 @@ Synopsis of kprobe_events
p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
p:[GRP/]EVENT] [MOD:]SYM[+0]%return [FETCHARGS] : Set a return probe
-:[GRP/]EVENT : Clear a probe
GRP : Group name. If omitted, use "kprobes" for it.
@ -37,6 +38,7 @@ Synopsis of kprobe_events
based on SYM+offs or MEMADDR.
MOD : Module name which has given SYM.
SYM[+offs] : Symbol+offset where the probe is inserted.
SYM%return : Return address of the symbol
MEMADDR : Address where the probe is inserted.
MAXACTIVE : Maximum number of instances of the specified function that
can be probed simultaneously, or 0 for the default value

View File

@ -28,6 +28,7 @@ Synopsis of uprobe_tracer
p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a uprobe
r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return uprobe (uretprobe)
p[:[GRP/]EVENT] PATH:OFFSET%return [FETCHARGS] : Set a return uprobe (uretprobe)
-:[GRP/]EVENT : Clear uprobe or uretprobe event
GRP : Group name. If omitted, "uprobes" is the default value.
@ -35,6 +36,7 @@ Synopsis of uprobe_tracer
on PATH+OFFSET.
PATH : Path to an executable or a library.
OFFSET : Offset where the probe is inserted.
OFFSET%return : Offset where the return probe is inserted.
FETCHARGS : Arguments. Each probe can have up to 128 args.
%REG : Fetch register REG

View File

@ -6130,7 +6130,7 @@ HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
-----------------------------------
:Architecture: x86
:Architectures: x86
This capability indicates that KVM running on top of Hyper-V hypervisor
enables Direct TLB flush for its guests meaning that TLB flush
@ -6143,19 +6143,33 @@ in CPUID and only exposes Hyper-V identification. In this case, guest
thinks it's running on Hyper-V and only use Hyper-V hypercalls.
8.22 KVM_CAP_S390_VCPU_RESETS
-----------------------------
Architectures: s390
:Architectures: s390
This capability indicates that the KVM_S390_NORMAL_RESET and
KVM_S390_CLEAR_RESET ioctls are available.
8.23 KVM_CAP_S390_PROTECTED
---------------------------
Architecture: s390
:Architectures: s390
This capability indicates that the Ultravisor has been initialized and
KVM can therefore start protected VMs.
This capability governs the KVM_S390_PV_COMMAND ioctl and the
KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected
guests when the state change is invalid.
8.24 KVM_CAP_STEAL_TIME
-----------------------
:Architectures: arm64, x86
This capability indicates that KVM supports steal time accounting.
When steal time accounting is supported it may be enabled with
architecture-specific interfaces. This capability and the architecture-
specific interfaces must be consistent, i.e. if one says the feature
is supported, than the other should as well and vice versa. For arm64
see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL".
For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME".

View File

@ -6601,6 +6601,7 @@ F: fs/proc/bootconfig.c
F: include/linux/bootconfig.h
F: lib/bootconfig.c
F: tools/bootconfig/*
F: tools/bootconfig/scripts/*
EXYNOS DP DRIVER
M: Jingoo Han <jingoohan1@gmail.com>
@ -6901,6 +6902,14 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
FREESCALE DSPI DRIVER
M: Vladimir Oltean <olteanv@gmail.com>
L: linux-spi@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
F: drivers/spi/spi-fsl-dspi.c
F: include/linux/spi/spi-fsl-dspi.h
FREESCALE ENETC ETHERNET DRIVERS
M: Claudiu Manoil <claudiu.manoil@nxp.com>
L: netdev@vger.kernel.org
@ -9792,7 +9801,7 @@ F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tobin C. Harding <me@tobin.cc>
M: Tycho Andersen <tycho@tycho.ws>
M: Tycho Andersen <tycho@tycho.pizza>
L: kernel-hardening@lists.openwall.com
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks.git
@ -15569,6 +15578,7 @@ F: include/uapi/linux/sed*
SECURITY CONTACT
M: Security Officers <security@kernel.org>
S: Supported
F: Documentation/admin-guide/security-bugs.rst
SECURITY SUBSYSTEM
M: James Morris <jmorris@namei.org>

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 9
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@ -882,10 +882,6 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
LDFLAGS_vmlinux += --gc-sections
endif
ifdef CONFIG_LIVEPATCH
KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone)
endif
ifdef CONFIG_SHADOW_CALL_STACK
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(CC_FLAGS_SCS)

View File

@ -217,7 +217,7 @@
};
qspi: spi@27200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,

View File

@ -284,7 +284,7 @@
};
qspi: spi@27200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,

View File

@ -488,7 +488,7 @@
};
spi@18029200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18029200 0x184>,
<0x18029000 0x124>,
<0x1811b408 0x004>,

View File

@ -13,7 +13,7 @@
backlight: backlight-lvds {
compatible = "pwm-backlight";
pwms = <&pwm3 0 20000>;
pwms = <&pwm3 0 20000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
power-supply = <&reg_lcd>;

View File

@ -30,7 +30,7 @@
};
/* PRTWD2 rev 1 bitbang I2C for Ethernet Switch */
i2c@4 {
i2c {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4>;

View File

@ -22,8 +22,6 @@
gpio-keys {
compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
user-pb {
label = "user_pb";

View File

@ -1026,7 +1026,7 @@
#define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0
#define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4
#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0
#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x1 0x0
#define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1
#define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2
#define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1

View File

@ -58,7 +58,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&fec1_phy>;
status = "okay";

View File

@ -394,7 +394,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLC>;
clock-names = "gpio", "port";
gpio-ranges = <&iomuxc1 0 0 32>;
gpio-ranges = <&iomuxc1 0 0 20>;
};
gpio_ptd: gpio@40af0000 {
@ -408,7 +408,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLD>;
clock-names = "gpio", "port";
gpio-ranges = <&iomuxc1 0 32 32>;
gpio-ranges = <&iomuxc1 0 32 12>;
};
gpio_pte: gpio@40b00000 {
@ -422,7 +422,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLE>;
clock-names = "gpio", "port";
gpio-ranges = <&iomuxc1 0 64 32>;
gpio-ranges = <&iomuxc1 0 64 16>;
};
gpio_ptf: gpio@40b10000 {
@ -436,7 +436,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLF>;
clock-names = "gpio", "port";
gpio-ranges = <&iomuxc1 0 96 32>;
gpio-ranges = <&iomuxc1 0 96 20>;
};
};

View File

@ -51,6 +51,8 @@
&mcbsp2 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&mcbsp2_pins>;
};
&charger {
@ -102,35 +104,18 @@
regulator-max-microvolt = <3300000>;
};
lcd0: display@0 {
compatible = "panel-dpi";
label = "28";
status = "okay";
/* default-on; */
lcd0: display {
/* This isn't the exact LCD, but the timings meet spec */
compatible = "logicpd,type28";
pinctrl-names = "default";
pinctrl-0 = <&lcd_enable_pin>;
enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
backlight = <&bl>;
enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
panel-timing {
clock-frequency = <9000000>;
hactive = <480>;
vactive = <272>;
hfront-porch = <3>;
hback-porch = <2>;
hsync-len = <42>;
vback-porch = <3>;
vfront-porch = <2>;
vsync-len = <11>;
hsync-active = <1>;
vsync-active = <1>;
de-active = <1>;
pixelclk-active = <0>;
};
};
bl: backlight {

View File

@ -81,6 +81,8 @@
};
&mcbsp2 {
pinctrl-names = "default";
pinctrl-0 = <&mcbsp2_pins>;
status = "okay";
};

View File

@ -182,7 +182,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x1550000 0x0 0x10000>,
<0x0 0x40000000 0x0 0x40000000>;
<0x0 0x40000000 0x0 0x20000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";

View File

@ -488,11 +488,11 @@
};
};
target-module@5000 {
target-module@4000 {
compatible = "ti,sysc-omap2", "ti,sysc";
reg = <0x5000 0x4>,
<0x5010 0x4>,
<0x5014 0x4>;
reg = <0x4000 0x4>,
<0x4010 0x4>,
<0x4014 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
@ -504,7 +504,7 @@
ti,syss-mask = <1>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x5000 0x1000>;
ranges = <0 0x4000 0x1000>;
dsi1: encoder@0 {
compatible = "ti,omap5-dsi";
@ -514,8 +514,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
clock-names = "fck";
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
<&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
clock-names = "fck", "sys_clk";
};
};
@ -545,8 +546,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
clock-names = "fck";
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
<&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
clock-names = "fck", "sys_clk";
};
};

View File

@ -821,7 +821,7 @@
timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xffd01000 0x100>;
reg = <0xffd00100 0x100>;
clocks = <&l4_sys_free_clk>;
clock-names = "timer";
resets = <&rst L4SYSTIMER1_RESET>;

View File

@ -495,7 +495,7 @@
};
ocotp: ocotp@400a5000 {
compatible = "fsl,vf610-ocotp";
compatible = "fsl,vf610-ocotp", "syscon";
reg = <0x400a5000 0x1000>;
clocks = <&clks VF610_CLK_OCOTP>;
};

View File

@ -1,13 +1,11 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MULTI_V4T=y
CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
@ -15,19 +13,17 @@ CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
CONFIG_INTEGRATOR_IMPD1=y
CONFIG_ARCH_INTEGRATOR_CP=y
CONFIG_PCI=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
# CONFIG_ATAGS is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPUFREQ_DT=y
CONFIG_CMA=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@ -37,6 +33,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
@ -52,9 +49,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
CONFIG_E100=y
CONFIG_SMC91X=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_DRM=y
CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y

View File

@ -74,7 +74,7 @@ static struct powerdomain *_get_pwrdm(struct device *dev)
return pwrdm;
clk = of_clk_get(dev->of_node->parent, 0);
if (!clk) {
if (IS_ERR(clk)) {
dev_err(dev, "no fck found\n");
return NULL;
}

View File

@ -745,7 +745,7 @@
};
qspi: spi@66470200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,

View File

@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb

View File

@ -702,7 +702,7 @@
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>,
<&clk IMX8MP_CLK_SDMA1_ROOT>;
<&clk IMX8MP_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";

View File

@ -423,7 +423,7 @@
tmu: tmu@30260000 {
compatible = "fsl,imx8mq-tmu";
reg = <0x30260000 0x10000>;
interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MQ_CLK_TMU_ROOT>;
little-endian;
fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;

View File

@ -13,6 +13,7 @@
*/
#include <dt-bindings/power/xlnx-zynqmp-power.h>
#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
/ {
compatible = "xlnx,zynqmp";
@ -558,6 +559,15 @@
};
};
psgtr: phy@fd400000 {
compatible = "xlnx,zynqmp-psgtr-v1.1";
status = "disabled";
reg = <0x0 0xfd400000 0x0 0x40000>,
<0x0 0xfd3d0000 0x0 0x1000>;
reg-names = "serdes", "siou";
#phy-cells = <4>;
};
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@ -601,7 +611,7 @@
power-domains = <&zynqmp_firmware PD_SD_1>;
};
smmu: smmu@fd800000 {
smmu: iommu@fd800000 {
compatible = "arm,mmu-500";
reg = <0x0 0xfd800000 0x0 0x20000>;
status = "disabled";

View File

@ -724,6 +724,17 @@ CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_RENESAS_USB3=m
CONFIG_USB_TEGRA_XUDC=m
CONFIG_USB_CONFIGFS=m
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_ACM=y
CONFIG_USB_CONFIGFS_OBEX=y
CONFIG_USB_CONFIGFS_NCM=y
CONFIG_USB_CONFIGFS_ECM=y
CONFIG_USB_CONFIGFS_ECM_SUBSET=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_EEM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_FUSB302=m
@ -914,6 +925,7 @@ CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_K3_AM6_SOC=y
CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
CONFIG_IIO=y

View File

@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */
struct {
u64 steal;
u64 last_steal;
gpa_t base;
} steal;
@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
bool kvm_arm_pvtime_supported(void);
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,

View File

@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
case KVM_CAP_STEAL_TIME:
r = kvm_arm_pvtime_supported();
break;
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;

View File

@ -1877,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
vma_shift = PAGE_SHIFT;
}
/*
@ -1970,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
(fault_status == FSC_PERM &&
stage2_is_exec(mmu, fault_ipa, vma_pagesize));
if (vma_pagesize == PUD_SIZE) {
/*
* If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
* all we have is a 2-level page table. Trying to map a PUD in
* this case would be fatally wrong.
*/
if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
new_pud = kvm_pud_mkhuge(new_pud);

View File

@ -13,25 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
u64 steal;
__le64 steal_le;
u64 offset;
int idx;
u64 base = vcpu->arch.steal.base;
u64 last_steal = vcpu->arch.steal.last_steal;
u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
u64 steal = 0;
int idx;
if (base == GPA_INVALID)
return;
/* Let's do the local bookkeeping */
steal = vcpu->arch.steal.steal;
steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
vcpu->arch.steal.steal = steal;
steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
kvm_put_guest(kvm, base + offset, steal_le, u64);
if (!kvm_get_guest(kvm, base + offset, steal)) {
steal = le64_to_cpu(steal);
vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.steal.last_steal - last_steal;
kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
}
srcu_read_unlock(&kvm->srcu, idx);
}
@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_TIME_FEATURES:
case ARM_SMCCC_HV_PV_TIME_ST:
val = SMCCC_RET_SUCCESS;
if (vcpu->arch.steal.base != GPA_INVALID)
val = SMCCC_RET_SUCCESS;
break;
}
@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);
@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
static bool kvm_arm_pvtime_supported(void)
bool kvm_arm_pvtime_supported(void)
{
return !!sched_info_on();
}

View File

@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry,
__entry->vcpu_pc = vcpu_pc;
),
TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_exit,
@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
__entry->vcpu_pc = vcpu_pc;
),
TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx",
__print_symbolic(__entry->ret, kvm_arm_exception_type),
__entry->esr_ec,
__print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault,
__entry->ipa = ipa;
),
TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx",
__entry->ipa, __entry->hsr,
__entry->hxfar, __entry->vcpu_pc)
);
@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->cpsr = cpsr;
),
TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range,
__entry->end = end;
),
TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva,
__entry->hva = hva;
),
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_age_hva,
@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva,
__entry->end = end;
),
TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva,
__entry->hva = hva;
),
TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_set_way_flush,

View File

@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64,
__entry->is_wfe = is_wfe;
),
TP_printk("guest executed wf%c at: 0x%08lx",
TP_printk("guest executed wf%c at: 0x%016lx",
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->imm = imm;
),
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg,
__entry->write_value = write_value;
),
TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
);
TRACE_EVENT(kvm_handle_sys_reg,

View File

@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
case KVM_VM_MIPS_AUTO:
break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else

View File

@ -165,19 +165,19 @@ struct __large_struct {
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err, __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
long __gu_err; \
__get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(__gu_addr, size)) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
if (access_ok(__gu_addr, size)) \
__get_user_size((x), __gu_addr, (size), __gu_err); \
else \
(x) = (__typeof__(*(ptr))) 0; \
__gu_err; \
})
@ -191,11 +191,13 @@ do { \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, op) \
{ \
unsigned long __gu_tmp; \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
"2:\n" \
@ -209,10 +211,14 @@ do { \
" .align 2\n" \
" .long 1b,3b\n" \
".previous" \
: "=r"(err), "=r"(x) \
: "r"(addr), "i"(-EFAULT), "0"(err))
: "=r"(err), "=r"(__gu_tmp) \
: "r"(addr), "i"(-EFAULT), "0"(err)); \
(x) = (__typeof__(*(addr)))__gu_tmp; \
}
#define __get_user_asm2(x, addr, err) \
{ \
unsigned long long __gu_tmp; \
__asm__ __volatile__( \
"1: l.lwz %1,0(%2)\n" \
"2: l.lwz %H1,4(%2)\n" \
@ -229,8 +235,11 @@ do { \
" .long 1b,4b\n" \
" .long 2b,4b\n" \
".previous" \
: "=r"(err), "=&r"(x) \
: "r"(addr), "i"(-EFAULT), "0"(err))
: "=r"(err), "=&r"(__gu_tmp) \
: "r"(addr), "i"(-EFAULT), "0"(err)); \
(x) = (__typeof__(*(addr)))( \
(__typeof__((x)-(x)))__gu_tmp); \
}
/* more complex routines */

View File

@ -80,6 +80,16 @@ static void __init setup_memory(void)
*/
memblock_reserve(__pa(_stext), _end - _stext);
#ifdef CONFIG_BLK_DEV_INITRD
/* Then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start)) {
unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE);
unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE);
memblock_reserve(__pa(aligned_start), aligned_end - aligned_start);
}
#endif /* CONFIG_BLK_DEV_INITRD */
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();

View File

@ -16,7 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static void cache_loop(struct page *page, const unsigned int reg)
static __always_inline void cache_loop(struct page *page, const unsigned int reg)
{
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);

View File

@ -270,9 +270,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
u32 token;
irqentry_state_t state;
state = irqentry_enter(regs);
ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
@ -283,7 +282,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
}
irqentry_exit(regs, state);
set_irq_regs(old_regs);
}
@ -654,7 +652,6 @@ static void __init kvm_guest_init(void)
}
if (pv_tlb_flush_supported()) {
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pr_info("KVM setup pv remote TLB flush\n");
}
@ -767,6 +764,14 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
static void kvm_free_pv_cpu_mask(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
}
static __init int kvm_alloc_cpumask(void)
{
int cpu;
@ -785,11 +790,20 @@ static __init int kvm_alloc_cpumask(void)
if (alloc)
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu));
if (!zalloc_cpumask_var_node(
per_cpu_ptr(&__pv_cpu_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu))) {
goto zalloc_cpumask_fail;
}
}
apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
return 0;
zalloc_cpumask_fail:
kvm_free_pv_cpu_mask();
return -ENOMEM;
}
arch_initcall(kvm_alloc_cpumask);

View File

@ -2505,9 +2505,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smstate, 0x7fcc);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
return X86EMUL_UNHANDLEABLE;
val = GET_SMSTATE(u32, smstate, 0x7fc8);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
@ -2560,16 +2565,23 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smstate, 0x7f68);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
return X86EMUL_UNHANDLEABLE;
val = GET_SMSTATE(u32, smstate, 0x7f60);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
return X86EMUL_UNHANDLEABLE;
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
val = GET_SMSTATE(u64, smstate, 0x7ed0);
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);

View File

@ -2469,7 +2469,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
}
if (sp->unsync_children)
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp);

View File

@ -586,7 +586,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
/* Give the current vmcb to the guest */
svm_set_gif(svm, false);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
@ -632,6 +631,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* Restore the original control entries */
copy_vmcb_control_area(&vmcb->control, &hsave->control);
/* On vmexit the GIF is set to false */
svm_set_gif(svm, false);
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset;
@ -1132,6 +1134,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
load_nested_vmcb_control(svm, &ctl);
nested_prepare_vmcb_control(svm);
if (!nested_svm_vmrun_msrpm(svm))
return -EINVAL;
out_set_gif:
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;

View File

@ -1106,6 +1106,7 @@ void sev_vm_destroy(struct kvm *kvm)
list_for_each_safe(pos, q, head) {
__unregister_enc_region_locked(kvm,
list_entry(pos, struct enc_region, list));
cond_resched();
}
}

View File

@ -2938,8 +2938,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
svm_complete_interrupts(svm);
if (is_guest_mode(vcpu)) {
int vmexit;
@ -3504,7 +3502,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi();
/* Any pending NMI will happen here */
exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu);
@ -3518,6 +3515,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
vmcb_mark_all_clean(svm->vmcb);
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
@ -3537,7 +3535,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
vmcb_mark_all_clean(svm->vmcb);
svm_complete_interrupts(svm);
exit_fastpath = svm_exit_handlers_fastpath(vcpu);
return exit_fastpath;
}
@ -3900,21 +3899,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb;
struct kvm_host_map map;
u64 guest;
u64 vmcb;
int ret = 0;
guest = GET_SMSTATE(u64, smstate, 0x7ed8);
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (guest) {
if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
return 1;
nested_vmcb = map.hva;
ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
kvm_vcpu_unmap(&svm->vcpu, &map, true);
if (guest) {
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return 1;
if (!(saved_efer & EFER_SVME))
return 1;
if (kvm_vcpu_map(&svm->vcpu,
gpa_to_gfn(vmcb), &map) == -EINVAL)
return 1;
ret = enter_svm_guest_mode(svm, vmcb, map.hva);
kvm_vcpu_unmap(&svm->vcpu, &map, true);
}
}
return ret;

View File

@ -4404,6 +4404,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
/*
* VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
* now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
* up-to-date before switching to L1.
*/
if (enable_ept && is_pae_paging(vcpu))
vmx_ept_load_pdptrs(vcpu);
leave_guest_mode(vcpu);
if (nested_cpu_has_preemption_timer(vmcs12))
@ -4668,7 +4676,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
vmx->nested.msrs.entry_ctls_high &=
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
vmx->nested.msrs.exit_ctls_high &=
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
}
}

View File

@ -2971,7 +2971,7 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
vpid_sync_context(to_vmx(vcpu)->vpid);
}
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@ -3114,7 +3114,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
ept_load_pdptrs(vcpu);
vmx_ept_load_pdptrs(vcpu);
} else {
guest_cr3 = pgd;
}
@ -6054,6 +6054,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;

View File

@ -356,6 +356,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1

View File

@ -2731,7 +2731,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
if (!lapic_in_kernel(vcpu))
return 1;
return data ? 1 : 0;
vcpu->arch.apf.msr_en_val = data;
@ -3578,6 +3578,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SMALLER_MAXPHYADDR:
r = (int) allow_smaller_maxphyaddr;
break;
case KVM_CAP_STEAL_TIME:
r = sched_info_on();
break;
default:
break;
}

View File

@ -5895,18 +5895,6 @@ static void bfq_finish_requeue_request(struct request *rq)
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd;
/*
* Requeue and finish hooks are invoked in blk-mq without
* checking whether the involved request is actually still
* referenced in the scheduler. To handle this fact, the
* following two checks make this function exit in case of
* spurious invocations, for which there is nothing to do.
*
* First, check whether rq has nothing to do with an elevator.
*/
if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
return;
/*
* rq either is not associated with any icq, or is an already
* requeued request that has not (yet) been re-inserted into

View File

@ -879,8 +879,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
if (bio->bi_iter.bi_size > UINT_MAX - len)
if (bio->bi_iter.bi_size > UINT_MAX - len) {
*same_page = false;
return false;
}
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;

View File

@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.requeue_request)
if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
e->type->ops.requeue_request(rq);
}

View File

@ -537,7 +537,7 @@ int bdev_del_partition(struct block_device *bdev, int partno)
bdevp = bdget_disk(bdev->bd_disk, partno);
if (!bdevp)
return -ENOMEM;
return -ENXIO;
mutex_lock(&bdevp->bd_mutex);
mutex_lock_nested(&bdev->bd_mutex, 1);

View File

@ -807,9 +807,7 @@ static void device_link_put_kref(struct device_link *link)
void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_pm_lock();
device_link_put_kref(link);
device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
@ -830,7 +828,6 @@ void device_link_remove(void *consumer, struct device *supplier)
return;
device_links_write_lock();
device_pm_lock();
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer == consumer) {
@ -839,7 +836,6 @@ void device_link_remove(void *consumer, struct device *supplier)
}
}
device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_remove);
@ -4237,10 +4233,10 @@ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
vaf.va = &args;
if (err != -EPROBE_DEFER) {
dev_err(dev, "error %d: %pV", err, &vaf);
dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
} else {
device_set_deferred_probe_reason(dev, &vaf);
dev_dbg(dev, "error %d: %pV", err, &vaf);
dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
}
va_end(args);

View File

@ -142,10 +142,12 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
void fw_free_paged_buf(struct fw_priv *fw_priv);
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
int fw_map_paged_buf(struct fw_priv *fw_priv);
bool fw_is_paged_buf(struct fw_priv *fw_priv);
#else
static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
#endif
#endif /* __FIRMWARE_LOADER_H */

View File

@ -252,9 +252,11 @@ static void __free_fw_priv(struct kref *ref)
list_del(&fw_priv->list);
spin_unlock(&fwc->lock);
fw_free_paged_buf(fw_priv); /* free leftover pages */
if (!fw_priv->allocated_size)
if (fw_is_paged_buf(fw_priv))
fw_free_paged_buf(fw_priv);
else if (!fw_priv->allocated_size)
vfree(fw_priv->data);
kfree_const(fw_priv->fw_name);
kfree(fw_priv);
}
@ -268,6 +270,11 @@ static void free_fw_priv(struct fw_priv *fw_priv)
}
#ifdef CONFIG_FW_LOADER_PAGED_BUF
bool fw_is_paged_buf(struct fw_priv *fw_priv)
{
return fw_priv->is_paged_buf;
}
void fw_free_paged_buf(struct fw_priv *fw_priv)
{
int i;
@ -275,6 +282,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
if (!fw_priv->pages)
return;
vunmap(fw_priv->data);
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kvfree(fw_priv->pages);
@ -328,10 +337,6 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
if (!fw_priv->data)
return -ENOMEM;
/* page table is no longer needed after mapping, let's free */
kvfree(fw_priv->pages);
fw_priv->pages = NULL;
return 0;
}
#endif

View File

@ -5120,6 +5120,9 @@ static ssize_t rbd_config_info_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return sprintf(buf, "%s\n", rbd_dev->config_info);
}
@ -5231,6 +5234,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_client *rbdc;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
@ -7209,6 +7218,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
bool force = false;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
dev_id = -1;
opt_buf[0] = '\0';
sscanf(buf, "%d %5s", &dev_id, opt_buf);

View File

@ -320,8 +320,8 @@ static int mchp_tc_probe(struct platform_device *pdev)
}
regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
/* max. channels number is 2 when in QDEC mode */
priv->num_channels = of_property_count_u32_elems(np, "reg");

View File

@ -100,7 +100,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
return false;
}
if (!dax_dev && !bdev_dax_supported(bdev, blocksize)) {
if (!dax_dev || !bdev_dax_supported(bdev, blocksize)) {
pr_debug("%s: error: dax unsupported by block device\n",
bdevname(bdev, buf));
return false;

View File

@ -316,9 +316,9 @@ out:
* name of the dma-buf if the same piece of memory is used for multiple
* purpose between different devices.
*
* @dmabuf [in] dmabuf buffer that will be renamed.
* @buf: [in] A piece of userspace memory that contains the name of
* the dma-buf.
* @dmabuf: [in] dmabuf buffer that will be renamed.
* @buf: [in] A piece of userspace memory that contains the name of
* the dma-buf.
*
* Returns 0 on success. If the dma-buf buffer is already attached to
* devices, return -EBUSY.

View File

@ -222,6 +222,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
* @chain: the chain node to initialize
* @prev: the previous fence
* @fence: the current fence
* @seqno: the sequence number to use for the fence chain
*
* Initialize a new chain node and either start a new chain or add the node to
* the existing chain of the previous fence.

View File

@ -16,9 +16,9 @@
/* Exported for use by lib/test_firmware.c only */
LIST_HEAD(efi_embedded_fw_list);
EXPORT_SYMBOL_GPL(efi_embedded_fw_list);
static bool checked_for_fw;
EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
bool efi_embedded_fw_checked;
EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
static const struct dmi_system_id * const embedded_fw_table[] = {
#ifdef CONFIG_TOUCHSCREEN_DMI
@ -116,14 +116,14 @@ void __init efi_check_for_embedded_firmwares(void)
}
}
checked_for_fw = true;
efi_embedded_fw_checked = true;
}
int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size)
{
struct efi_embedded_fw *iter, *fw = NULL;
if (!checked_for_fw) {
if (!efi_embedded_fw_checked) {
pr_warn("Warning %s called while we did not check for embedded fw\n",
__func__);
return -ENOENT;

View File

@ -14956,12 +14956,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (dev_priv->wm.distrust_bios_wm)
any_ms = true;
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
}
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@ -14976,6 +14970,10 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;

View File

@ -45,6 +45,13 @@ struct eb_vma_array {
struct eb_vma vma[];
};
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
#define __EXEC_OBJECT_HAS_PIN BIT(31)
#define __EXEC_OBJECT_HAS_FENCE BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
@ -253,6 +260,8 @@ struct i915_execbuffer {
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
unsigned long vaddr; /** Current kmap address */
unsigned long page; /** Currently mapped page index */
unsigned int gen; /** Cached value of INTEL_GEN */
bool use_64bit_reloc : 1;
bool has_llc : 1;
@ -596,6 +605,23 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
const struct drm_i915_gem_object *obj)
{
if (!i915_gem_object_has_struct_page(obj))
return false;
if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
return true;
if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
return false;
return (cache->has_llc ||
obj->cache_dirty ||
obj->cache_level != I915_CACHE_NONE);
}
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
@ -926,6 +952,8 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
cache->page = -1;
cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->gen = INTEL_GEN(i915);
cache->has_llc = HAS_LLC(i915);
@ -937,6 +965,25 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->target = NULL;
}
static inline void *unmask_page(unsigned long p)
{
return (void *)(uintptr_t)(p & PAGE_MASK);
}
static inline unsigned int unmask_flags(unsigned long p)
{
return p & ~PAGE_MASK;
}
#define KMAP 0x4 /* after CLFLUSH_FLAGS */
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
struct drm_i915_private *i915 =
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
return &i915->ggtt;
}
#define RELOC_TAIL 4
static int reloc_gpu_chain(struct reloc_cache *cache)
@ -1049,6 +1096,181 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
return err;
}
static void reloc_cache_reset(struct reloc_cache *cache)
{
void *vaddr;
if (!cache->vaddr)
return;
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP) {
if (cache->vaddr & CLFLUSH_AFTER)
mb();
kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
}
cache->vaddr = 0;
cache->page = -1;
}
static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
unsigned long page)
{
void *vaddr;
if (cache->vaddr) {
kunmap_atomic(unmask_page(cache->vaddr));
} else {
unsigned int flushes;
int err;
err = i915_gem_object_prepare_write(obj, &flushes);
if (err)
return ERR_PTR(err);
BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
cache->vaddr = flushes | KMAP;
cache->node.mm = (void *)obj;
if (flushes)
mb();
}
vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
cache->page = page;
return vaddr;
}
static void *reloc_iomap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
unsigned long page)
{
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
unsigned long offset;
void *vaddr;
if (cache->vaddr) {
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int err;
if (i915_gem_object_is_tiled(obj))
return ERR_PTR(-EINVAL);
if (use_cpu_reloc(cache, obj))
return NULL;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
return ERR_PTR(err);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
}
}
offset = cache->node.start;
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
return vaddr;
}
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
unsigned long page)
{
void *vaddr;
if (cache->page == page) {
vaddr = unmask_page(cache->vaddr);
} else {
vaddr = NULL;
if ((cache->vaddr & KMAP) == 0)
vaddr = reloc_iomap(obj, cache, page);
if (!vaddr)
vaddr = reloc_kmap(obj, cache, page);
}
return vaddr;
}
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
{
if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
if (flushes & CLFLUSH_BEFORE) {
clflushopt(addr);
mb();
}
*addr = value;
/*
* Writes to the same cacheline are serialised by the CPU
* (including clflush). On the write path, we only require
* that it hits memory in an orderly fashion and place
* mb barriers at the start and end of the relocation phase
* to ensure ordering of clflush wrt to the system.
*/
if (flushes & CLFLUSH_AFTER)
clflushopt(addr);
} else
*addr = value;
}
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@ -1214,6 +1436,17 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
static inline bool use_reloc_gpu(struct i915_vma *vma)
{
if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
return true;
if (DBG_FORCE_RELOC)
return false;
return !dma_resv_test_signaled_rcu(vma->resv, true);
}
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
{
struct page *page;
@ -1228,10 +1461,10 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
static int __reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
{
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
@ -1247,7 +1480,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
return PTR_ERR(batch);
return false;
addr = gen8_canonical_addr(vma->node.start + offset);
if (gen >= 8) {
@ -1296,21 +1529,55 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = target_addr;
}
return 0;
return true;
}
static bool reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
{
if (eb->reloc_cache.vaddr)
return false;
if (!use_reloc_gpu(vma))
return false;
return __reloc_entry_gpu(eb, vma, offset, target_addr);
}
static u64
relocate_entry(struct i915_execbuffer *eb,
struct i915_vma *vma,
relocate_entry(struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
struct i915_execbuffer *eb,
const struct i915_vma *target)
{
u64 target_addr = relocation_target(reloc, target);
int err;
u64 offset = reloc->offset;
err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
if (err)
return err;
if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
bool wide = eb->reloc_cache.use_64bit_reloc;
void *vaddr;
repeat:
vaddr = reloc_vaddr(vma->obj,
&eb->reloc_cache,
offset >> PAGE_SHIFT);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
clflush_write32(vaddr + offset_in_page(offset),
lower_32_bits(target_addr),
eb->reloc_cache.vaddr);
if (wide) {
offset += sizeof(u32);
target_addr >>= 32;
wide = false;
goto repeat;
}
}
return target->node.start | UPDATE;
}
@ -1375,7 +1642,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
if (!DBG_FORCE_RELOC &&
gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@ -1407,7 +1675,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
return relocate_entry(eb, ev->vma, reloc, target->vma);
return relocate_entry(ev->vma, reloc, eb, target->vma);
}
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
@ -1445,8 +1713,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* this is bad and so lockdep complains vehemently.
*/
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied))
return -EFAULT;
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
}
remain -= count;
do {
@ -1454,7 +1724,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
return (int)offset;
remain = (int)offset;
goto out;
} else {
/*
* Note that reporting an error now
@ -1484,8 +1755,9 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
return 0;
out:
reloc_cache_reset(&eb->reloc_cache);
return remain;
}
static int eb_relocate(struct i915_execbuffer *eb)
@ -2392,7 +2664,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.i915 = i915;
eb.file = file;
eb.args = args;
if (!(args->flags & I915_EXEC_NO_RELOC))
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;

View File

@ -258,6 +258,10 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n);
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,

View File

@ -548,6 +548,20 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
return nth_page(sg_page(sg), offset);
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n)
{
struct page *page;
page = i915_gem_object_get_page(obj, n);
if (!obj->mm.dirty)
set_page_dirty(page);
return page;
}
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,

View File

@ -37,14 +37,20 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
if (err)
if (!__reloc_entry_gpu(eb, vma,
offsets[0] * sizeof(u32),
0)) {
err = -EIO;
goto unpin_vma;
}
/* !8-Byte aligned */
err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
if (err)
if (!__reloc_entry_gpu(eb, vma,
offsets[1] * sizeof(u32),
1)) {
err = -EIO;
goto unpin_vma;
}
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
@ -54,9 +60,12 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
if (err)
if (!__reloc_entry_gpu(eb, vma,
offsets[2] * sizeof(u32),
2)) {
err = -EIO;
goto unpin_vma;
}
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);

View File

@ -673,7 +673,7 @@ static void ingenic_drm_unbind_all(void *d)
component_unbind_all(priv->dev, &priv->drm);
}
static int ingenic_drm_bind(struct device *dev)
static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
const struct jz_soc_info *soc_info;
@ -808,7 +808,7 @@ static int ingenic_drm_bind(struct device *dev)
return ret;
}
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) {
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
ret = component_bind_all(dev, drm);
if (ret) {
if (ret != -EPROBE_DEFER)
@ -939,6 +939,11 @@ err_pixclk_disable:
return ret;
}
static int ingenic_drm_bind_with_components(struct device *dev)
{
return ingenic_drm_bind(dev, true);
}
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
@ -957,7 +962,7 @@ static void ingenic_drm_unbind(struct device *dev)
}
static const struct component_master_ops ingenic_master_ops = {
.bind = ingenic_drm_bind,
.bind = ingenic_drm_bind_with_components,
.unbind = ingenic_drm_unbind,
};
@ -968,16 +973,15 @@ static int ingenic_drm_probe(struct platform_device *pdev)
struct device_node *np;
if (!IS_ENABLED(CONFIG_DRM_INGENIC_IPU))
return ingenic_drm_bind(dev);
return ingenic_drm_bind(dev, false);
/* IPU is at port address 8 */
np = of_graph_get_remote_node(dev->of_node, 8, 0);
if (!np) {
dev_err(dev, "Unable to get IPU node\n");
return -EINVAL;
}
if (!np)
return ingenic_drm_bind(dev, false);
drm_of_component_match_add(dev, &match, compare_of, np);
of_node_put(np);
return component_master_add_with_match(dev, &ingenic_master_ops, match);
}

View File

@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but

View File

@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);

View File

@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;

View File

@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
a5xx_preempt_hw_init(gpu);
if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
a5xx_preempt_hw_init(gpu);
/* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev);
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
/* Restricting nr_rings to 1 to temporarily disable preemption */
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);

View File

@ -31,6 +31,7 @@ struct a5xx_gpu {
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];

View File

@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
struct drm_gem_object *bo = NULL;
u64 iova = 0;
void *counters;
struct drm_gem_object *bo = NULL, *counters_bo = NULL;
u64 iova = 0, counters_iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
msm_gem_kernel_put(bo, gpu->aspace, true);
return PTR_ERR(counters);
}
msm_gem_object_set_name(bo, "preempt");
msm_gem_object_set_name(counters_bo, "preempt_counters");
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
ptr->counter = counters_iova;
return 0;
}
@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++)
for (i = 0; i < gpu->nr_rings; i++) {
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
gpu->aspace, true);
}
}
void a5xx_preempt_init(struct msm_gpu *gpu)

View File

@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
if (adreno_is_a650(adreno_gpu)) {
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
}
@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
if (adreno_is_a650(adreno_gpu))
adreno_gpu->base.hw_apriv = true;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));

View File

@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->memptrs->rptr = 0;
}
/*
* Setup REG_CP_RB_CNTL. The same value is used across targets (with
* the excpetion of A430 that disables the RPTR shadow) - the cacluation
* for the ringbuffer size and block size is moved to msm_gpu.h for the
* pre-processor to deal with and the A430 variant is ORed in here
*/
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT |
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
rbmemptr(gpu->rb[0], rptr));
}
return 0;
}
@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
if (adreno_is_a430(adreno_gpu))
return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
return ring->memptrs->rptr;
return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)

View File

@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {

View File

@ -15,6 +15,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_ringbuffer.h"
#include "msm_gem.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
@ -139,6 +140,8 @@ struct msm_gpu {
} devfreq;
struct msm_gpu_state *crashstate;
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
};
/* It turns out that all targets use the same ringbuffer size */
@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->dev->struct_mutex);
}
/*
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
* support expanded privileges
*/
#define check_apriv(gpu, flags) \
(((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
#endif /* __MSM_GPU_H__ */

View File

@ -27,8 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
&ring->iova);
check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);

View File

@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
/* We can't have an alpha plane at the lowest position */
if (!backend->quirks->supports_lowest_plane_alpha &&
(plane_states[0]->fb->format->has_alpha ||
(plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)))
(plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
return -EINVAL;
for (i = 1; i < num_planes; i++) {
@ -995,7 +994,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = {
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
.needs_output_muxing = true,
.supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {

View File

@ -1433,14 +1433,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
if (ret)
if (ret) {
put_device(&pdev->dev);
return ret;
}
}
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
if (ret)
if (ret) {
put_device(&pdev->dev);
return ret;
}
}
return 0;

View File

@ -889,7 +889,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL);
if (!bounce)
return -ENOMEM;
@ -900,7 +900,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
len += sizeof(crc);
regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4));
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
kfree(bounce);

View File

@ -211,7 +211,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
return 0;
}
static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
{
if (!format->is_yuv)
return SUN8I_CSC_MODE_OFF;

View File

@ -14,6 +14,7 @@
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
#include <linux/delay.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_connector *connector = priv->connector;
u32 format = fb->format->format;
u32 ctrl1 = 0;
int retries;
clk_prepare_enable(priv->clk);
/* Reset the TVE200 and wait for it to come back online */
writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
for (retries = 0; retries < 5; retries++) {
usleep_range(30000, 50000);
if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET)
continue;
else
break;
}
if (retries == 5 &&
readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) {
dev_err(drm->dev, "can't get hardware out of reset\n");
return;
}
/* Function 1 */
ctrl1 |= TVE200_CTRL_CSMODE;
/* Interlace mode for CCIR656: parameterize? */
@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_vblank_off(crtc);
/* Disable and Power Down */
/* Disable put into reset and Power Down */
writel(0, priv->regs + TVE200_CTRL);
writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
clk_disable_unprepare(priv->clk);
}
@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
/* Clear any IRQs and enable */
writel(0xFF, priv->regs + TVE200_INT_CLR);
writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
return 0;
}

View File

@ -97,9 +97,6 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
output->enabled = true;
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@ -111,7 +108,6 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
output->enabled = false;
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
@ -123,6 +119,17 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
/*
* virtio-gpu can't do modeset and plane update operations
* independent from each other. So the actual modeset happens
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
if (drm_atomic_crtc_needs_modeset(crtc->state)) {
output->needs_modeset = true;
}
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {

View File

@ -137,7 +137,7 @@ struct virtio_gpu_output {
struct edid *edid;
int cur_x;
int cur_y;
bool enabled;
bool needs_modeset;
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)

View File

@ -151,7 +151,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
if (ret < 0)
return -EINVAL;
shmem->pages = drm_gem_shmem_get_pages_sgt(&bo->base.base);
/*
* virtio_gpu uses drm_gem_shmem_get_sg_table instead of
* drm_gem_shmem_get_pages_sgt because virtio has it's own set of
* dma-ops. This is discouraged for other drivers, but should be fine
* since virtio_gpu doesn't support dma-buf import from other devices.
*/
shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;

View File

@ -142,7 +142,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
if (!plane->state->fb || !output->enabled) {
if (!plane->state->fb || !output->crtc.state->active) {
DRM_DEBUG("nofb\n");
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
plane->state->src_w >> 16,
@ -163,7 +163,9 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w != old_state->src_w ||
plane->state->src_h != old_state->src_h ||
plane->state->src_x != old_state->src_x ||
plane->state->src_y != old_state->src_y) {
plane->state->src_y != old_state->src_y ||
output->needs_modeset) {
output->needs_modeset = false;
DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
bo->hw_res_handle,
plane->state->crtc_w, plane->state->crtc_h,

View File

@ -2,6 +2,7 @@ config DRM_ZYNQMP_DPSUB
tristate "ZynqMP DisplayPort Controller Driver"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on COMMON_CLK && DRM && OF
depends on DMADEVICES
select DMA_ENGINE
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER

View File

@ -41,8 +41,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap)
pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET);
pca_outw(adap, I2C_PCA_IND, 0xA5);
pca_outw(adap, I2C_PCA_IND, 0x5A);
/*
* After a reset we need to re-apply any configuration
* (calculated in pca_init) to get the bus in a working state.
*/
pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE);
pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode);
pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow);
pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi);
pca_set_con(adap, I2C_PCA_CON_ENSIO);
} else {
adap->reset_chip(adap->data);
pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq);
}
}
@ -423,13 +437,14 @@ static int pca_init(struct i2c_adapter *adap)
" Use the nominal frequency.\n", adap->name);
}
pca_reset(pca_data);
clock = pca_clock(pca_data);
printk(KERN_INFO "%s: Clock frequency is %dkHz\n",
adap->name, freqs[clock]);
pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock);
/* Store settings as these will be needed when the PCA chip is reset */
pca_data->bus_settings.clock_freq = clock;
pca_reset(pca_data);
} else {
int clock;
int mode;
@ -496,19 +511,15 @@ static int pca_init(struct i2c_adapter *adap)
thi = tlow * min_thi / min_tlow;
}
/* Store settings as these will be needed when the PCA chip is reset */
pca_data->bus_settings.mode = mode;
pca_data->bus_settings.tlow = tlow;
pca_data->bus_settings.thi = thi;
pca_reset(pca_data);
printk(KERN_INFO
"%s: Clock frequency is %dHz\n", adap->name, clock * 100);
pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE);
pca_outw(pca_data, I2C_PCA_IND, mode);
pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
pca_outw(pca_data, I2C_PCA_IND, tlow);
pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
pca_outw(pca_data, I2C_PCA_IND, thi);
pca_set_con(pca_data, I2C_PCA_CON_ENSIO);
}
udelay(500); /* 500 us for oscillator to stabilise */

View File

@ -2093,8 +2093,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
/* Adaptive TimeOut: astimated time in usec + 100% margin */
timeout_usec = (2 * 10000 / bus->bus_freq) * (2 + nread + nwrite);
/*
* Adaptive TimeOut: estimated time in usec + 100% margin:
* 2: double the timeout for clock stretching case
* 9: bits per transaction (including the ack/nack)
*/
timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);

View File

@ -189,6 +189,14 @@ struct bmc150_accel_data {
struct mutex mutex;
u8 fifo_mode, watermark;
s16 buffer[8];
/*
* Ensure there is sufficient space and correct alignment for
* the timestamp if enabled
*/
struct {
__le16 channels[3];
s64 ts __aligned(8);
} scan;
u8 bw_bits;
u32 slope_dur;
u32 slope_thres;
@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
* now.
*/
for (i = 0; i < count; i++) {
u16 sample[8];
int j, bit;
j = 0;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength)
memcpy(&sample[j++], &buffer[i * 3 + bit], 2);
memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
sizeof(data->scan.channels[0]));
iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp);
iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
tstamp);
tstamp += sample_period;
}

View File

@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
const struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct kxsd9_state *st = iio_priv(indio_dev);
/*
* Ensure correct positioning and alignment of timestamp.
* No need to zero initialize as all elements written.
*/
struct {
__be16 chan[4];
s64 ts __aligned(8);
} hw_values;
int ret;
/* 4 * 16bit values AND timestamp */
__be16 hw_values[8];
ret = regmap_bulk_read(st->map,
KXSD9_REG_X,
&hw_values,
8);
hw_values.chan,
sizeof(hw_values.chan));
if (ret) {
dev_err(st->dev,
"error reading data\n");
@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev,
hw_values,
&hw_values,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);

View File

@ -52,6 +52,14 @@
struct mma7455_data {
struct regmap *regmap;
/*
* Used to reorganize data. Will ensure correct alignment of
* the timestamp if present
*/
struct {
__le16 channels[3];
s64 ts __aligned(8);
} scan;
};
static int mma7455_drdy(struct mma7455_data *mma7455)
@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma7455_data *mma7455 = iio_priv(indio_dev);
u8 buf[16]; /* 3 x 16-bit channels + padding + ts */
int ret;
ret = mma7455_drdy(mma7455);
if (ret)
goto done;
ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf,
sizeof(__le16) * 3);
ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL,
mma7455->scan.channels,
sizeof(mma7455->scan.channels));
if (ret)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, buf,
iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan,
iio_get_time_ns(indio_dev));
done:

View File

@ -110,6 +110,12 @@ struct mma8452_data {
int sleep_val;
struct regulator *vdd_reg;
struct regulator *vddio_reg;
/* Ensure correct alignment of time stamp when present */
struct {
__be16 channels[3];
s64 ts __aligned(8);
} buffer;
};
/**
@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma8452_data *data = iio_priv(indio_dev);
u8 buffer[16]; /* 3 16-bit channels + padding + ts */
int ret;
ret = mma8452_read(data, (__be16 *)buffer);
ret = mma8452_read(data, data->buffer.channels);
if (ret < 0)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
done:

View File

@ -865,6 +865,8 @@ config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
depends on RESET_CONTROLLER
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the SARADC found in SoCs from
Rockchip.

Some files were not shown because too many files have changed in this diff Show More