Merge 6.2-rc7 into tty-next

We need the tty/serial fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2023-02-06 10:48:49 +01:00
commit f6b2ce79b5
490 changed files with 4384 additions and 2203 deletions

View File

@ -130,6 +130,7 @@ Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
Felipe W Damasio <felipewd@terra.com.br>
@ -214,6 +215,7 @@ Jisheng Zhang <jszhang@kernel.org> <jszhang@marvell.com>
Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
John Stultz <johnstul@us.ibm.com>
Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>

15
CREDITS
View File

@ -1173,6 +1173,10 @@ D: Future Domain TMC-16x0 SCSI driver (author)
D: APM driver (early port)
D: DRM drivers (author of several)
N: Veaceslav Falico
E: vfalico@gmail.com
D: Co-maintainer and co-author of the network bonding driver.
N: János Farkas
E: chexum@shadow.banki.hu
D: romfs, various (mostly networking) fixes
@ -2489,6 +2493,13 @@ D: XF86_Mach8
D: XF86_8514
D: cfdisk (curses based disk partitioning program)
N: Mat Martineau
E: mat@martineau.name
D: MPTCP subsystem co-maintainer 2020-2023
D: Keyctl restricted keyring and Diffie-Hellman UAPI
D: Bluetooth L2CAP ERTM mode and AMP
S: USA
N: John S. Marvin
E: jsm@fc.hp.com
D: PA-RISC port
@ -4172,6 +4183,10 @@ S: B-1206 Jingmao Guojigongyu
S: 16 Baliqiao Nanjie, Beijing 101100
S: People's Repulic of China
N: Vlad Yasevich
E: vyasevich@gmail.com
D: SCTP protocol maintainer.
N: Aviad Yehezkel
E: aviadye@nvidia.com
D: Kernel TLS implementation and offload support.

View File

@ -1245,13 +1245,17 @@ PAGE_SIZE multiple when read back.
This is a simple interface to trigger memory reclaim in the
target cgroup.
This file accepts a string which contains the number of bytes to
reclaim.
This file accepts a single key, the number of bytes to reclaim.
No nested keys are currently supported.
Example::
echo "1G" > memory.reclaim
The interface can be later extended with nested keys to
configure the reclaim behavior. For example, specify the
type of memory to reclaim from (anon, file, ..).
Please note that the kernel can over or under reclaim from
the target cgroup. If less bytes are reclaimed than the
specified amount, -EAGAIN is returned.
@ -1263,13 +1267,6 @@ PAGE_SIZE multiple when read back.
This means that the networking layer will not adapt based on
reclaim induced by memory.reclaim.
This file also allows the user to specify the nodes to reclaim from,
via the 'nodes=' key, for example::
echo "1G nodes=0,1" > memory.reclaim
The above instructs the kernel to reclaim memory from nodes 0,1.
memory.peak
A read-only single value file which exists on non-root
cgroups.

View File

@ -16,7 +16,7 @@ properties:
compatible:
items:
- enum:
- renesas,i2c-r9a09g011 # RZ/V2M
- renesas,r9a09g011-i2c # RZ/V2M
- const: renesas,rzv2m-i2c
reg:
@ -66,7 +66,7 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
i2c0: i2c@a4030000 {
compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c";
compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c";
reg = <0xa4030000 0x80>;
interrupts = <GIC_SPI 232 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;

View File

@ -19,8 +19,8 @@ description: |
additional information and example.
patternProperties:
# 25 LDOs
"^LDO([1-9]|[1][0-9]|2[0-5])$":
# 25 LDOs, without LDO10-12
"^LDO([1-9]|1[3-9]|2[0-5])$":
type: object
$ref: regulator.yaml#
unevaluatedProperties: false
@ -30,6 +30,23 @@ patternProperties:
required:
- regulator-name
"^LDO(1[0-2])$":
type: object
$ref: regulator.yaml#
unevaluatedProperties: false
description:
Properties for single LDO regulator.
properties:
samsung,ext-control-gpios:
maxItems: 1
description:
LDO10, LDO11 and LDO12 can be configured to external control over
GPIO.
required:
- regulator-name
# 5 bucks
"^BUCK[1-5]$":
type: object

View File

@ -83,7 +83,7 @@ properties:
insensitive, letters in the riscv,isa string must be all
lowercase to simplify parsing.
$ref: "/schemas/types.yaml#/definitions/string"
pattern: ^rv(?:64|32)imaf?d?q?c?b?v?k?h?(?:_[hsxz](?:[a-z])+)*$
pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
# RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
timebase-frequency: false

View File

@ -40,6 +40,8 @@ properties:
description:
Indicates that the setting of RTC time is allowed by the host CPU.
wakeup-source: true
required:
- compatible
- reg

View File

View File

@ -8,7 +8,7 @@ In order to use the Ethernet bridging functionality, you'll need the
userspace tools.
Documentation for Linux bridging is on:
http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
https://wiki.linuxfoundation.org/networking/bridge
The bridge-utilities are maintained at:
git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/bridge-utils.git

View File

@ -819,7 +819,7 @@ NAPI
----
This driver supports NAPI (Rx polling mode).
For more information on NAPI, see
https://www.linuxfoundation.org/collaborate/workgroups/networking/napi
https://wiki.linuxfoundation.org/networking/napi
MACVLAN

View File

@ -173,7 +173,9 @@ nf_conntrack_sctp_timeout_cookie_echoed - INTEGER (seconds)
default 3
nf_conntrack_sctp_timeout_established - INTEGER (seconds)
default 432000 (5 days)
default 210
Default is set to (hb_interval * path_max_retrans + rto_max)
nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
default 0.3
@ -190,12 +192,6 @@ nf_conntrack_sctp_timeout_heartbeat_sent - INTEGER (seconds)
This timeout is used to setup conntrack entry on secondary paths.
Default is set to hb_interval.
nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds)
default 210
This timeout is used to setup conntrack entry on secondary paths.
Default is set to (hb_interval * path_max_retrans + rto_max)
nf_conntrack_udp_timeout - INTEGER (seconds)
default 30

View File

@ -8070,9 +8070,13 @@ considering the state as complete. VMM needs to ensure that the dirty
state is final and avoid missing dirty pages from another ioctl ordered
after the bitmap collection.
NOTE: One example of using the backup bitmap is saving arm64 vgic/its
tables through KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} command on
KVM device "kvm-arm-vgic-its" when dirty ring is enabled.
NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
command on KVM device "kvm-arm-vgic-v3".
8.30 KVM_CAP_XEN_HVM
--------------------

View File

@ -95,3 +95,39 @@ by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
not enable SME, then Linux will not be able to activate memory encryption, even
if configured to do so by default or the mem_encrypt=on command line parameter
is specified.
Secure Nested Paging (SNP)
==========================
SEV-SNP introduces new features (SEV_FEATURES[1:63]) which can be enabled
by the hypervisor for security enhancements. Some of these features need
guest side implementation to function correctly. The below table lists the
expected guest behavior with various possible scenarios of guest/hypervisor
SNP feature support.
+-----------------+---------------+---------------+------------------+
| Feature Enabled | Guest needs | Guest has | Guest boot |
| by the HV | implementation| implementation| behaviour |
+=================+===============+===============+==================+
| No | No | No | Boot |
| | | | |
+-----------------+---------------+---------------+------------------+
| No | Yes | No | Boot |
| | | | |
+-----------------+---------------+---------------+------------------+
| No | Yes | Yes | Boot |
| | | | |
+-----------------+---------------+---------------+------------------+
| Yes | No | No | Boot with |
| | | | feature enabled |
+-----------------+---------------+---------------+------------------+
| Yes | Yes | No | Graceful boot |
| | | | failure |
+-----------------+---------------+---------------+------------------+
| Yes | Yes | Yes | Boot with |
| | | | feature enabled |
+-----------------+---------------+---------------+------------------+
More details in AMD64 APM[1] Vol 2: 15.34.10 SEV_STATUS MSR
[1] https://www.amd.com/system/files/TechDocs/40332.pdf

View File

@ -1097,7 +1097,6 @@ S: Maintained
F: drivers/dma/ptdma/
AMD SEATTLE DEVICE TREE SUPPORT
M: Brijesh Singh <brijeshkumar.singh@amd.com>
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
M: Tom Lendacky <thomas.lendacky@amd.com>
S: Supported
@ -2212,6 +2211,9 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
X: drivers/media/i2c/
F: arch/arm64/boot/dts/freescale/
X: arch/arm64/boot/dts/freescale/fsl-*
X: arch/arm64/boot/dts/freescale/qoriq-*
N: imx
N: mxs
@ -2450,11 +2452,14 @@ F: drivers/rtc/rtc-mt7622.c
ARM/Mediatek SoC support
M: Matthias Brugger <matthias.bgg@gmail.com>
R: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: https://mtk.wiki.kernel.org/
C: irc://chat.freenode.net/linux-mediatek
C: irc://irc.libera.chat/linux-mediatek
F: arch/arm/boot/dts/mt2*
F: arch/arm/boot/dts/mt6*
F: arch/arm/boot/dts/mt7*
F: arch/arm/boot/dts/mt8*
@ -2462,7 +2467,7 @@ F: arch/arm/mach-mediatek/
F: arch/arm64/boot/dts/mediatek/
F: drivers/soc/mediatek/
N: mtk
N: mt[678]
N: mt[2678]
K: mediatek
ARM/Mediatek USB3 PHY DRIVER
@ -3766,7 +3771,6 @@ F: net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <j.vosburgh@gmail.com>
M: Veaceslav Falico <vfalico@gmail.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported
@ -7615,7 +7619,6 @@ S: Maintained
F: drivers/firmware/efi/test/
EFI VARIABLE FILESYSTEM
M: Matthew Garrett <matthew.garrett@nebula.com>
M: Jeremy Kerr <jk@ozlabs.org>
M: Ard Biesheuvel <ardb@kernel.org>
L: linux-efi@vger.kernel.org
@ -7894,7 +7897,11 @@ F: include/linux/extcon/
EXTRA BOOT CONFIG
M: Masami Hiramatsu <mhiramat@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-trace-kernel@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-trace-kernel/list/
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git
F: Documentation/admin-guide/bootconfig.rst
F: fs/proc/bootconfig.c
F: include/linux/bootconfig.h
@ -8467,16 +8474,16 @@ F: fs/fscache/
F: include/linux/fscache*.h
FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
M: Eric Biggers <ebiggers@kernel.org>
M: Theodore Y. Ts'o <tytso@mit.edu>
M: Jaegeuk Kim <jaegeuk@kernel.org>
M: Eric Biggers <ebiggers@kernel.org>
L: linux-fscrypt@vger.kernel.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-fscrypt/list/
T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git
T: git https://git.kernel.org/pub/scm/fs/fscrypt/linux.git
F: Documentation/filesystems/fscrypt.rst
F: fs/crypto/
F: include/linux/fscrypt*.h
F: include/linux/fscrypt.h
F: include/uapi/linux/fscrypt.h
FSI SUBSYSTEM
@ -8519,10 +8526,10 @@ F: include/linux/fsnotify*.h
FSVERITY: READ-ONLY FILE-BASED AUTHENTICITY PROTECTION
M: Eric Biggers <ebiggers@kernel.org>
M: Theodore Y. Ts'o <tytso@mit.edu>
L: linux-fscrypt@vger.kernel.org
L: fsverity@lists.linux.dev
S: Supported
Q: https://patchwork.kernel.org/project/linux-fscrypt/list/
T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git fsverity
Q: https://patchwork.kernel.org/project/fsverity/list/
T: git https://git.kernel.org/pub/scm/fs/fsverity/linux.git
F: Documentation/filesystems/fsverity.rst
F: fs/verity/
F: include/linux/fsverity.h
@ -8570,6 +8577,7 @@ F: kernel/trace/fgraph.c
F: arch/*/*/*/*ftrace*
F: arch/*/*/*ftrace*
F: include/*/ftrace.h
F: samples/ftrace
FUNGIBLE ETHERNET DRIVERS
M: Dimitris Michailidis <dmichail@fungible.com>
@ -14600,7 +14608,6 @@ F: tools/testing/selftests/net/ipsec.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
@ -14633,7 +14640,6 @@ F: net/netfilter/xt_SECMARK.c
F: net/netlabel/
NETWORKING [MPTCP]
M: Mat Martineau <mathew.j.martineau@linux.intel.com>
M: Matthieu Baerts <matthieu.baerts@tessares.net>
L: netdev@vger.kernel.org
L: mptcp@lists.linux.dev
@ -15658,7 +15664,7 @@ OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
M: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
M: Stafford Horne <shorne@gmail.com>
L: openrisc@lists.librecores.org
L: linux-openrisc@vger.kernel.org
S: Maintained
W: http://openrisc.io
T: git https://github.com/openrisc/linux.git
@ -17962,6 +17968,7 @@ M: Albert Ou <aou@eecs.berkeley.edu>
L: linux-riscv@lists.infradead.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-riscv/list/
C: irc://irc.libera.chat/riscv
P: Documentation/riscv/patch-acceptance.rst
T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
F: arch/riscv/
@ -18683,9 +18690,9 @@ F: drivers/target/
F: include/target/
SCTP PROTOCOL
M: Vlad Yasevich <vyasevich@gmail.com>
M: Neil Horman <nhorman@tuxdriver.com>
M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
M: Xin Long <lucien.xin@gmail.com>
L: linux-sctp@vger.kernel.org
S: Maintained
W: http://lksctp.sourceforge.net
@ -21723,6 +21730,7 @@ F: include/uapi/linux/uvcvideo.h
USB WEBCAM GADGET
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
M: Daniel Scally <dan.scally@ideasonboard.com>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/gadget/function/*uvc*

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -132,7 +132,7 @@ AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
CFLAGS_ISA :=-Wa,-mimplicit-it=always $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb -D__thumb2__=2
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
CFLAGS_ISA +=-mthumb
else
CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN)

View File

@ -751,7 +751,7 @@
};
pca9849@75 {
compatible = "nxp,pca849";
compatible = "nxp,pca9849";
reg = <0x75>;
#address-cells = <1>;
#size-cells = <0>;

View File

@ -198,6 +198,7 @@
&usbotg2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotg2>;
over-current-active-low;
dr_mode = "host";
status = "okay";
};
@ -374,7 +375,7 @@
pinctrl_usbotg2: usbotg2grp {
fsl,pins = <
MX7D_PAD_UART3_RTS_B__USB_OTG2_OC 0x04
MX7D_PAD_UART3_RTS_B__USB_OTG2_OC 0x5c
>;
};

View File

@ -480,6 +480,7 @@
reg = <0xc8000000 0x1000>, <0xc0000000 0x4000000>;
reg-names = "control", "memory";
clocks = <&clk 0>;
nuvoton,shm = <&shm>;
status = "disabled";
};

View File

@ -53,7 +53,12 @@ $(obj)/%-core.S: $(src)/%-armv4.pl
clean-files += poly1305-core.S sha256-core.S sha512-core.S
aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1
AFLAGS_sha256-core.o += $(aflags-thumb2-y)
AFLAGS_sha512-core.o += $(aflags-thumb2-y)
# massage the perlasm code a bit so we only get the NEON routine if we need it
poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
AFLAGS_poly1305-core.o += $(poly1305-aflags-y)
AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)

View File

@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc)
mpu_setup();
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);

View File

@ -6,6 +6,7 @@
* VM_EXEC
*/
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/thread_info.h>
#ifdef CONFIG_CPU_V7M

View File

@ -164,7 +164,7 @@
sc_pwrkey: keys {
compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key";
linux,keycode = <KEY_POWER>;
linux,keycodes = <KEY_POWER>;
wakeup-source;
};

View File

@ -88,6 +88,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_watchdog_gpio>;
compatible = "linux,wdt-gpio";
always-running;
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
hw_algo = "level";
/* Reset triggers in 2..3 seconds */

View File

@ -602,7 +602,7 @@
#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0
#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0
#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0
#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1
#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0
#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0

View File

@ -33,7 +33,6 @@
pinctrl-0 = <&pinctrl_uart2>;
rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};

View File

@ -33,7 +33,6 @@
pinctrl-0 = <&pinctrl_uart2>;
rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};

View File

@ -222,7 +222,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_bten>;
cts-gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
bluetooth {

View File

@ -733,7 +733,6 @@
dtr-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};
@ -749,7 +748,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
cts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};
@ -758,7 +756,6 @@
pinctrl-0 = <&pinctrl_uart4>, <&pinctrl_uart4_gpio>;
cts-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio5 12 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};

View File

@ -664,7 +664,6 @@
pinctrl-0 = <&pinctrl_uart1>, <&pinctrl_uart1_gpio>;
rts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio4 24 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};
@ -681,7 +680,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
bluetooth {
@ -699,7 +697,6 @@
dtr-gpios = <&gpio4 3 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio4 4 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio4 6 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};

View File

@ -581,7 +581,6 @@
dtr-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
};

View File

@ -98,6 +98,7 @@
off-on-delay = <500000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_eth>;
regulator-always-on;
regulator-boot-on;
regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <3300000>;

View File

@ -643,7 +643,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
bluetooth {

View File

@ -623,7 +623,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
cts-gpios = <&gpio3 21 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
uart-has-rtscts;
status = "okay";
bluetooth {

View File

@ -48,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
})
extern spinlock_t efi_rt_lock;
extern u64 *efi_rt_stack_top;
efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
/*
* efi_rt_stack_top[-1] contains the value the stack pointer had before
* switching to the EFI runtime stack.
*/
#define current_in_efi() \
(!preemptible() && efi_rt_stack_top != NULL && \
on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
/*

View File

@ -106,4 +106,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void)
#define stackinfo_get_sdei_critical() stackinfo_get_unknown()
#endif
#ifdef CONFIG_EFI
extern u64 *efi_rt_stack_top;
static inline struct stack_info stackinfo_get_efi(void)
{
unsigned long high = (u64)efi_rt_stack_top;
unsigned long low = high - THREAD_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
#endif
#endif /* __ASM_STACKTRACE_H */

View File

@ -46,7 +46,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
mov x4, x6
blr x8
mov x16, sp
mov sp, x29
str xzr, [x16, #8] // clear recorded task SP value
ldp x1, x2, [sp, #16]
cmp x2, x18
ldp x29, x30, [sp], #112
@ -71,6 +74,9 @@ SYM_FUNC_END(__efi_rt_asm_wrapper)
SYM_CODE_START(__efi_rt_asm_recover)
mov sp, x30
ldr_l x16, efi_rt_stack_top // clear recorded task SP value
str xzr, [x16, #-8]
ldp x19, x20, [sp, #32]
ldp x21, x22, [sp, #48]
ldp x23, x24, [sp, #64]

View File

@ -11,6 +11,7 @@
#include <linux/init.h>
#include <asm/efi.h>
#include <asm/stacktrace.h>
static bool region_is_misaligned(const efi_memory_desc_t *md)
{
@ -154,7 +155,7 @@ asmlinkage efi_status_t __efi_rt_asm_recover(void);
bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
{
/* Check whether the exception occurred while running the firmware */
if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
return false;
pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);

View File

@ -5,6 +5,7 @@
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/sched.h>
@ -12,6 +13,7 @@
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <asm/efi.h>
#include <asm/irq.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
: stackinfo_get_unknown(); \
})
#define STACKINFO_EFI \
({ \
((task == current) && current_in_efi()) \
? stackinfo_get_efi() \
: stackinfo_get_unknown(); \
})
noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
@ -199,6 +208,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
STACKINFO_SDEI(normal),
STACKINFO_SDEI(critical),
#endif
#ifdef CONFIG_EFI
STACKINFO_EFI,
#endif
};
struct unwind_state state = {

View File

@ -1079,7 +1079,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
/* uaccess failed, don't leave stale tags */
if (num_tags != MTE_GRANULES_PER_PAGE)
mte_clear_page_tags(page);
mte_clear_page_tags(maddr);
set_page_mte_tagged(page);
kvm_release_pfn_dirty(pfn);

View File

@ -2187,7 +2187,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
}
/**
@ -2339,7 +2339,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
}
/**
@ -2526,7 +2526,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id);
val = cpu_to_le64(val);
return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
}
/*
@ -2607,7 +2607,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/
val = 0;
BUG_ON(cte_esz > sizeof(val));
ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret;
}
@ -2743,7 +2743,6 @@ static int vgic_its_has_attr(struct kvm_device *dev,
static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
{
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
@ -2763,9 +2762,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
vgic_its_reset(kvm, its);
break;
case KVM_DEV_ARM_ITS_SAVE_TABLES:
dist->save_its_tables_in_progress = true;
ret = abi->save_tables(its);
dist->save_its_tables_in_progress = false;
break;
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
ret = abi->restore_tables(its);
@ -2792,7 +2789,7 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
return dist->save_its_tables_in_progress;
return dist->table_write_in_progress;
}
static int vgic_its_set_attr(struct kvm_device *dev,

View File

@ -339,7 +339,7 @@ retry:
if (status) {
/* clear consumed data */
val &= ~(1 << bit_nr);
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
}
@ -350,26 +350,23 @@ retry:
* The deactivation of the doorbell interrupt will trigger the
* unmapping of the associated vPE.
*/
static void unmap_all_vpes(struct vgic_dist *dist)
static void unmap_all_vpes(struct kvm *kvm)
{
struct irq_desc *desc;
struct vgic_dist *dist = &kvm->arch.vgic;
int i;
for (i = 0; i < dist->its_vm.nr_vpes; i++) {
desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
}
for (i = 0; i < dist->its_vm.nr_vpes; i++)
free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
}
static void map_all_vpes(struct vgic_dist *dist)
static void map_all_vpes(struct kvm *kvm)
{
struct irq_desc *desc;
struct vgic_dist *dist = &kvm->arch.vgic;
int i;
for (i = 0; i < dist->its_vm.nr_vpes; i++) {
desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
}
for (i = 0; i < dist->its_vm.nr_vpes; i++)
WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
dist->its_vm.vpes[i]->irq));
}
/**
@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
* and enabling of the doorbells have already been done.
*/
if (kvm_vgic_global_state.has_gicv4_1) {
unmap_all_vpes(dist);
unmap_all_vpes(kvm);
vlpi_avail = true;
}
@ -437,14 +434,14 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
else
val &= ~(1 << bit_nr);
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
goto out;
}
out:
if (vlpi_avail)
map_all_vpes(dist);
map_all_vpes(kvm);
return ret;
}

View File

@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
*val = !!(*ptr & mask);
}
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
{
return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
}
/**
* vgic_v4_init - Initialize the GICv4 data structures
* @kvm: Pointer to the VM being initialized
@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
irq_flags &= ~IRQ_NOAUTOEN;
irq_set_status_flags(irq, irq_flags);
ret = request_irq(irq, vgic_v4_doorbell_handler,
0, "vcpu", vcpu);
ret = vgic_v4_request_vpe_irq(vcpu, irq);
if (ret) {
kvm_err("failed to allocate vcpu IRQ%d\n", irq);
/*

View File

@ -6,6 +6,7 @@
#define __KVM_ARM_VGIC_NEW_H__
#include <linux/irqchip/arm-gic-common.h>
#include <asm/kvm_mmu.h>
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
#define IMPLEMENTER_ARM 0x43b
@ -131,6 +132,19 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
return vgic_irq_get_lr_count(irq) > 1;
}
static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
struct vgic_dist *dist = &kvm->arch.vgic;
int ret;
dist->table_write_in_progress = true;
ret = kvm_write_guest_lock(kvm, gpa, data, len);
dist->table_write_in_progress = false;
return ret;
}
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
@ -331,5 +345,6 @@ int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
#endif

View File

@ -170,6 +170,9 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
asmlinkage long
ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
{
struct timespec64 rtn_tp;
s64 tick_ns;
/*
* ia64's clock_gettime() syscall is implemented as a vdso call
* fsys_clock_gettime(). Currently it handles only
@ -185,8 +188,8 @@ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *
switch (which_clock) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
rtn_tp = ns_to_timespec64(tick_ns);
return put_timespec64(&rtn_tp, tp);
}

View File

@ -1303,7 +1303,7 @@ static char iodc_dbuf[4096] __page_aligned_bss;
*/
int pdc_iodc_print(const unsigned char *str, unsigned count)
{
unsigned int i;
unsigned int i, found = 0;
unsigned long flags;
count = min_t(unsigned int, count, sizeof(iodc_dbuf));
@ -1315,6 +1315,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
found = 1;
goto print;
default:
iodc_dbuf[i] = str[i];
@ -1330,7 +1331,7 @@ print:
__pa(pdc_result), 0, __pa(iodc_dbuf), i, 0);
spin_unlock_irqrestore(&pdc_lock, flags);
return i;
return i - found;
}
#if !defined(BOOTLOADER)

View File

@ -126,6 +126,12 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned long tmp;
long ret = -EIO;
unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
#ifdef CONFIG_64BIT
if (is_compat_task())
user_regs_struct_size /= 2;
#endif
switch (request) {
/* Read the word at location addr in the USER area. For ptraced
@ -166,7 +172,7 @@ long arch_ptrace(struct task_struct *child, long request,
addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
data |= 3; /* ensure userspace privilege */
data |= PRIV_USER; /* ensure userspace privilege */
}
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
@ -181,14 +187,14 @@ long arch_ptrace(struct task_struct *child, long request,
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
0, user_regs_struct_size,
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
0, user_regs_struct_size,
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
@ -285,7 +291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
data |= 3; /* ensure userspace privilege */
data |= PRIV_USER; /* ensure userspace privilege */
}
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
@ -302,6 +308,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
}
break;
case PTRACE_GETREGS:
case PTRACE_SETREGS:
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
return arch_ptrace(child, request, addr, data);
default:
ret = compat_ptrace_request(child, request, addr, data);
@ -484,7 +495,7 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
case RI(iaoq[0]):
case RI(iaoq[1]):
/* set 2 lowest bits to ensure userspace privilege: */
regs->iaoq[num - RI(iaoq[0])] = val | 3;
regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER;
return;
case RI(sar): regs->sar = val;
return;

View File

@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
{
if (radix_enabled())
radix__tlb_flush(tlb);
return hash__tlb_flush(tlb);
}
#ifdef CONFIG_SMP

View File

@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
return flags;
}
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
{
unsigned long flags = irq_soft_mask_return();
irq_soft_mask_set(flags & ~mask);
return flags;
}
static inline unsigned long arch_local_save_flags(void)
{
return irq_soft_mask_return();
@ -192,7 +201,7 @@ static inline void arch_local_irq_enable(void)
static inline unsigned long arch_local_irq_save(void)
{
return irq_soft_mask_set_return(IRQS_DISABLED);
return irq_soft_mask_or_return(IRQS_DISABLED);
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
* is a different soft-masked interrupt pending that requires hard
* masking.
*/
static inline bool should_hard_irq_enable(void)
static inline bool should_hard_irq_enable(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(mfmsr() & MSR_EE);
}
@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
*
* TODO: Add test for 64e
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
return false;
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
if (!power_pmu_wants_prompt_pmi())
return false;
/*
* If PMIs are disabled then IRQs should be disabled as well,
* so we shouldn't see this condition, check for it just in
* case because we are about to enable PMIs.
*/
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
return false;
}
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false;
@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
/*
* Do the hard enabling, only call this if should_hard_irq_enable is true.
* This allows PMI interrupts to profile irq handlers.
*/
static inline void do_hard_irq_enable(void)
{
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
WARN_ON(mfmsr() & MSR_EE);
}
/*
* This allows PMI interrupts (and watchdog soft-NMIs) through.
* There is no other reason to enable this way.
* Asynch interrupts come in with IRQS_ALL_DISABLED,
* PACA_IRQ_HARD_DIS, and MSR[EE]=0.
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}
@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
static __always_inline bool should_hard_irq_enable(void)
static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
{
return false;
}

View File

@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync();
if (should_hard_irq_enable())
if (should_hard_irq_enable(regs))
do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());

View File

@ -864,7 +864,7 @@ _GLOBAL(load_up_spe)
* SPE unavailable trap from kernel - print a message, but let
* the task use SPE in the kernel until it returns to user mode.
*/
KernelSPE:
SYM_FUNC_START_LOCAL(KernelSPE)
lwz r3,_MSR(r1)
oris r3,r3,MSR_SPE@h
stw r3,_MSR(r1) /* enable use of SPE after return */
@ -881,6 +881,7 @@ KernelSPE:
#endif
.align 4,0
SYM_FUNC_END(KernelSPE)
#endif /* CONFIG_SPE */
/*

View File

@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
if (should_hard_irq_enable())
if (should_hard_irq_enable(regs))
do_hard_irq_enable();
/* And finally process it */

View File

@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
}
/* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable()) {
if (should_hard_irq_enable(regs)) {
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.

View File

@ -989,10 +989,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
* linux,drconf-usable-memory properties. Get an approximate on the
* number of usable memory entries and use for FDT size estimation.
*/
usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
extra_size = (unsigned int)(usm_entries * sizeof(u64));
if (drmem_lmb_size()) {
usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
extra_size = (unsigned int)(usm_entries * sizeof(u64));
} else {
extra_size = 0;
}
/*
* Get the number of CPU nodes in the current DT. This allows to

View File

@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
static void kvmppc_fill_pt_regs(struct pt_regs *regs)
{
ulong r1, ip, msr, lr;
ulong r1, msr, lr;
asm("mr %0, 1" : "=r"(r1));
asm("mflr %0" : "=r"(lr));
asm("mfmsr %0" : "=r"(msr));
asm("bl 1f; 1: mflr %0" : "=r"(ip));
memset(regs, 0, sizeof(*regs));
regs->gpr[1] = r1;
regs->nip = ip;
regs->nip = _THIS_IP_;
regs->msr = msr;
regs->link = lr;
}

View File

@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
end = (unsigned long)__end_rodata;
radix__change_memory_range(start, end, _PAGE_WRITE);
for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
end = start + PAGE_SIZE;
if (overlaps_interrupt_vector_text(start, end))
radix__change_memory_range(start, end, _PAGE_WRITE);
else
break;
}
}
void radix__mark_initmem_nx(void)
@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
static unsigned long next_boundary(unsigned long addr, unsigned long end)
{
#ifdef CONFIG_STRICT_KERNEL_RWX
unsigned long stext_phys;
stext_phys = __pa_symbol(_stext);
// Relocatable kernel running at non-zero real address
if (stext_phys != 0) {
// The end of interrupts code at zero is a rodata boundary
unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
if (addr < end_intr)
return end_intr;
// Start of relocated kernel text is a rodata boundary
if (addr < stext_phys)
return stext_phys;
}
if (addr < __pa_symbol(__srwx_boundary))
return __pa_symbol(__srwx_boundary);
#endif

View File

@ -22,7 +22,7 @@
* Used to avoid races in counting the nest-pmu units during hotplug
* register and unregister
*/
static DEFINE_SPINLOCK(nest_init_lock);
static DEFINE_MUTEX(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask;
@ -1629,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
{
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
spin_lock(&nest_init_lock);
mutex_lock(&nest_init_lock);
if (nest_pmus == 1) {
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
kfree(nest_imc_refc);
@ -1639,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
if (nest_pmus > 0)
nest_pmus--;
spin_unlock(&nest_init_lock);
mutex_unlock(&nest_init_lock);
}
/* Free core_imc memory */
@ -1796,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
* rest. To handle the cpuhotplug callback unregister, we track
* the number of nest pmus in "nest_pmus".
*/
spin_lock(&nest_init_lock);
mutex_lock(&nest_init_lock);
if (nest_pmus == 0) {
ret = init_nest_pmu_ref();
if (ret) {
spin_unlock(&nest_init_lock);
mutex_unlock(&nest_init_lock);
kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL;
goto err_free_mem;
@ -1808,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
/* Register for cpu hotplug notification. */
ret = nest_pmu_cpumask_init();
if (ret) {
spin_unlock(&nest_init_lock);
mutex_unlock(&nest_init_lock);
kfree(nest_imc_refc);
kfree(per_nest_pmu_arr);
per_nest_pmu_arr = NULL;
@ -1816,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
}
}
nest_pmus++;
spin_unlock(&nest_init_lock);
mutex_unlock(&nest_init_lock);
break;
case IMC_DOMAIN_CORE:
ret = core_imc_pmu_cpumask_init();

View File

@ -80,6 +80,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
KBUILD_CFLAGS += -fno-omit-frame-pointer
endif
# Avoid generating .eh_frame sections.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)

View File

@ -46,7 +46,7 @@
.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
new_c_2, vendor_id_2, errata_id_2, enable_2
ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1
ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \errata_id_1, \enable_1
ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
.endm

View File

@ -70,7 +70,6 @@ static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX);
*/
enum riscv_isa_ext_key {
RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */
RISCV_ISA_EXT_KEY_ZIHINTPAUSE,
RISCV_ISA_EXT_KEY_SVINVAL,
RISCV_ISA_EXT_KEY_MAX,
};
@ -91,8 +90,6 @@ static __always_inline int riscv_isa_ext2key(int num)
return RISCV_ISA_EXT_KEY_FPU;
case RISCV_ISA_EXT_d:
return RISCV_ISA_EXT_KEY_FPU;
case RISCV_ISA_EXT_ZIHINTPAUSE:
return RISCV_ISA_EXT_KEY_ZIHINTPAUSE;
case RISCV_ISA_EXT_SVINVAL:
return RISCV_ISA_EXT_KEY_SVINVAL;
default:

View File

@ -4,30 +4,26 @@
#ifndef __ASSEMBLY__
#include <linux/jump_label.h>
#include <asm/barrier.h>
#include <asm/hwcap.h>
static inline void cpu_relax(void)
{
if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) {
#ifdef __riscv_muldiv
int dummy;
/* In lieu of a halt instruction, induce a long-latency stall. */
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
int dummy;
/* In lieu of a halt instruction, induce a long-latency stall. */
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
} else {
/*
* Reduce instruction retirement.
* This assumes the PC changes.
*/
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
__asm__ __volatile__ ("pause");
#ifdef __riscv_zihintpause
/*
* Reduce instruction retirement.
* This assumes the PC changes.
*/
__asm__ __volatile__ ("pause");
#else
/* Encoding of the pause instruction */
__asm__ __volatile__ (".4byte 0x100000F");
/* Encoding of the pause instruction */
__asm__ __volatile__ (".4byte 0x100000F");
#endif
}
barrier();
}

View File

@ -326,7 +326,7 @@ clear_bss_done:
call soc_early_init
tail start_kernel
#if CONFIG_RISCV_BOOT_SPINWAIT
#ifdef CONFIG_RISCV_BOOT_SPINWAIT
.Lsecondary_start:
/* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park

View File

@ -48,6 +48,21 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
post_kprobe_handler(p, kcb, regs);
}
static bool __kprobes arch_check_kprobe(struct kprobe *p)
{
unsigned long tmp = (unsigned long)p->addr - p->offset;
unsigned long addr = (unsigned long)p->addr;
while (tmp <= addr) {
if (tmp == addr)
return true;
tmp += GET_INSN_LENGTH(*(u16 *)tmp);
}
return false;
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
@ -55,6 +70,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (probe_addr & 0x1)
return -EILSEQ;
if (!arch_check_kprobe(p))
return -EILSEQ;
/* copy instruction */
p->opcode = *p->addr;

View File

@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg
u32 rd_index = (opcode >> 7) & 0x1f;
u32 rs1_index = (opcode >> 15) & 0x1f;
ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
if (!ret)
return ret;
ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
if (!ret)
return ret;

View File

@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
init_cpu_topology();
}
void __init smp_prepare_cpus(unsigned int max_cpus)
@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int ret;
unsigned int curr_cpuid;
init_cpu_topology();
curr_cpuid = smp_processor_id();
store_cpu_topology(curr_cpuid);
numa_store_cpu_info(curr_cpuid);

View File

@ -80,6 +80,6 @@ void *decompress_kernel(void)
void *output = (void *)decompress_offset;
__decompress(_compressed_start, _compressed_end - _compressed_start,
NULL, NULL, output, 0, NULL, error);
NULL, NULL, output, vmlinux.image_size, NULL, error);
return output;
}

View File

@ -4,6 +4,7 @@
* Written by Niibe Yutaka and Paul Mundt
*/
OUTPUT_ARCH(sh)
#define RUNTIME_DISCARD_EXIT
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>

View File

@ -14,13 +14,13 @@ endif
ifdef CONFIG_CC_IS_GCC
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
endif
ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
endif
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
ifdef CONFIG_RETHUNK
RETHUNK_CFLAGS := -mfunction-return=thunk-extern

View File

@ -180,6 +180,12 @@ void initialize_identity_maps(void *rmode)
/* Load the new page-table. */
write_cr3(top_level_pgt);
/*
* Now that the required page table mappings are established and a
* GHCB can be used, check for SNP guest/HV feature compatibility.
*/
snp_check_features();
}
static pte_t *split_large_pmd(struct x86_mapping_info *info,

View File

@ -126,6 +126,7 @@ static inline void console_init(void)
#ifdef CONFIG_AMD_MEM_ENCRYPT
void sev_enable(struct boot_params *bp);
void snp_check_features(void);
void sev_es_shutdown_ghcb(void);
extern bool sev_es_check_ghcb_fault(unsigned long address);
void snp_set_page_private(unsigned long paddr);
@ -143,6 +144,7 @@ static inline void sev_enable(struct boot_params *bp)
if (bp)
bp->cc_blob_address = 0;
}
static inline void snp_check_features(void) { }
static inline void sev_es_shutdown_ghcb(void) { }
static inline bool sev_es_check_ghcb_fault(unsigned long address)
{

View File

@ -208,6 +208,23 @@ void sev_es_shutdown_ghcb(void)
error("Can't unmap GHCB page");
}
static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set,
unsigned int reason, u64 exit_info_2)
{
u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST);
ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
sev_es_wr_ghcb_msr(__pa(ghcb));
VMGEXIT();
while (true)
asm volatile("hlt\n" : : : "memory");
}
bool sev_es_check_ghcb_fault(unsigned long address)
{
/* Check whether the fault was on the GHCB page */
@ -270,6 +287,59 @@ static void enforce_vmpl0(void)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
}
/*
* SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
* guest side implementation for proper functioning of the guest. If any
* of these features are enabled in the hypervisor but are lacking guest
* side implementation, the behavior of the guest will be undefined. The
* guest could fail in non-obvious way making it difficult to debug.
*
* As the behavior of reserved feature bits is unknown to be on the
* safe side add them to the required features mask.
*/
#define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \
MSR_AMD64_SNP_REFLECT_VC | \
MSR_AMD64_SNP_RESTRICTED_INJ | \
MSR_AMD64_SNP_ALT_INJ | \
MSR_AMD64_SNP_DEBUG_SWAP | \
MSR_AMD64_SNP_VMPL_SSS | \
MSR_AMD64_SNP_SECURE_TSC | \
MSR_AMD64_SNP_VMGEXIT_PARAM | \
MSR_AMD64_SNP_VMSA_REG_PROTECTION | \
MSR_AMD64_SNP_RESERVED_BIT13 | \
MSR_AMD64_SNP_RESERVED_BIT15 | \
MSR_AMD64_SNP_RESERVED_MASK)
/*
* SNP_FEATURES_PRESENT is the mask of SNP features that are implemented
* by the guest kernel. As and when a new feature is implemented in the
* guest kernel, a corresponding bit should be added to the mask.
*/
#define SNP_FEATURES_PRESENT (0)
void snp_check_features(void)
{
u64 unsupported;
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
/*
* Terminate the boot if hypervisor has enabled any feature lacking
* guest side implementation. Pass on the unsupported features mask through
* EXIT_INFO_2 of the GHCB protocol so that those features can be reported
* as part of the guest boot failure.
*/
unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
if (unsupported) {
if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN,
GHCB_SNP_UNSUPPORTED, unsupported);
}
}
void sev_enable(struct boot_params *bp)
{
unsigned int eax, ebx, ecx, edx;

View File

@ -6339,6 +6339,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_EMERALDRAPIDS_X:
pmem = true;
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));

View File

@ -677,6 +677,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),

View File

@ -14,6 +14,7 @@
#include <asm/mmu.h>
#include <asm/mpspec.h>
#include <asm/x86_init.h>
#include <asm/cpufeature.h>
#ifdef CONFIG_ACPI_APEI
# include <asm/pgtable_types.h>
@ -63,6 +64,13 @@ extern int (*acpi_suspend_lowlevel)(void);
/* Physical address to resume after wakeup */
unsigned long acpi_get_wakeup_address(void);
static inline bool acpi_skip_set_wakeup_address(void)
{
return cpu_feature_enabled(X86_FEATURE_XENPV);
}
#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address
/*
* Check if the CPU can handle C2 and deeper
*/

View File

@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno)
asm("mov %%db6, %0" :"=r" (val));
break;
case 7:
asm("mov %%db7, %0" :"=r" (val));
/*
* Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
* with other code.
*
* This is needed because a DR7 access can cause a #VC exception
* when running under SEV-ES. Taking a #VC exception is not a
* safe thing to do just anywhere in the entry code and
* re-ordering might place the access into an unsafe location.
*
* This happened in the NMI handler, where the DR7 read was
* re-ordered to happen before the call to sev_es_ist_enter(),
* causing stack recursion.
*/
asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
break;
default:
BUG();
@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value)
asm("mov %0, %%db6" ::"r" (value));
break;
case 7:
asm("mov %0, %%db7" ::"r" (value));
/*
* Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
* with other code.
*
* While is didn't happen with a DR7 write (see the DR7 read
* comment above which explains where it happened), add the
* __FORCE_ORDER here too to avoid similar problems in the
* future.
*/
asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
break;
default:
BUG();

View File

@ -566,6 +566,26 @@
#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
/* SNP feature bits enabled by the hypervisor */
#define MSR_AMD64_SNP_VTOM BIT_ULL(3)
#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4)
#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5)
#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6)
#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7)
#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8)
#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9)
#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10)
#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11)
#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12)
#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14)
#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16)
#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17)
/* SNP feature bits reserved for future use. */
#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13)
#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15)
#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
/* AMD Collaborative Processor Performance Control MSRs */

View File

@ -116,6 +116,12 @@
#define SVM_VMGEXIT_AP_CREATE 1
#define SVM_VMGEXIT_AP_DESTROY 2
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
/* SW_EXITINFO1[3:0] */ \
(((((u64)reason_set) & 0xf)) | \
/* SW_EXITINFO1[11:4] */ \
((((u64)reason_code) & 0xff) << 4))
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
/* Exit code reserved for hypervisor/software use */

View File

@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void)
static void disable_freq_invariance_workfn(struct work_struct *work)
{
int cpu;
static_branch_disable(&arch_scale_freq_key);
/*
* Set arch_freq_scale to a default value on all cpus
* This negates the effect of scaling
*/
for_each_possible_cpu(cpu)
per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
}
static DECLARE_WORK(disable_freq_invariance_work,

View File

@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq);
irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
irq_set_status_flags(irq, IRQ_LEVEL);
enable_irq(irq);
lapic_assign_legacy_vector(irq, true);
}

View File

@ -65,8 +65,10 @@ void __init init_ISA_irqs(void)
legacy_pic->init(0);
for (i = 0; i < nr_legacy_irqs(); i++)
for (i = 0; i < nr_legacy_irqs(); i++) {
irq_set_chip_and_handler(i, chip, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
void __init init_IRQ(void)

View File

@ -3440,18 +3440,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
if (var->unusable || !var->present)
ar = 1 << 16;
else {
ar = var->type & 15;
ar |= (var->s & 1) << 4;
ar |= (var->dpl & 3) << 5;
ar |= (var->present & 1) << 7;
ar |= (var->avl & 1) << 12;
ar |= (var->l & 1) << 13;
ar |= (var->db & 1) << 14;
ar |= (var->g & 1) << 15;
}
ar = var->type & 15;
ar |= (var->s & 1) << 4;
ar |= (var->dpl & 3) << 5;
ar |= (var->present & 1) << 7;
ar |= (var->avl & 1) << 12;
ar |= (var->l & 1) << 13;
ar |= (var->db & 1) << 14;
ar |= (var->g & 1) << 15;
ar |= (var->unusable || !var->present) << 16;
return ar;
}

View File

@ -392,6 +392,7 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_ASSOCIATED) {
for (i = 0; i < msidesc->nvec_used; i++)
xen_destroy_irq(msidesc->irq + i);
msidesc->irq = 0;
}
}
@ -433,6 +434,7 @@ static struct msi_domain_ops xen_pci_msi_domain_ops = {
};
static struct msi_domain_info xen_pci_msi_domain_info = {
.flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS,
.ops = &xen_pci_msi_domain_ops,
};

View File

@ -769,8 +769,8 @@ static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
* request from the old cgroup.
*/
bfq_put_cooperator(sync_bfqq);
bfq_release_process_ref(bfqd, sync_bfqq);
bic_set_bfqq(bic, NULL, true);
bfq_release_process_ref(bfqd, sync_bfqq);
}
}
}

View File

@ -5425,9 +5425,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bfqq = bic_to_bfqq(bic, false);
if (bfqq) {
bfq_release_process_ref(bfqd, bfqq);
struct bfq_queue *old_bfqq = bfqq;
bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
bic_set_bfqq(bic, bfqq, false);
bfq_release_process_ref(bfqd, old_bfqq);
}
bfqq = bic_to_bfqq(bic, true);

View File

@ -2001,6 +2001,10 @@ void blk_cgroup_bio_start(struct bio *bio)
struct blkg_iostat_set *bis;
unsigned long flags;
/* Root-level stats are sourced from system-wide IO stats */
if (!cgroup_parent(blkcg->css.cgroup))
return;
cpu = get_cpu();
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
flags = u64_stats_update_begin_irqsave(&bis->sync);

View File

@ -4069,8 +4069,9 @@ EXPORT_SYMBOL(blk_mq_init_queue);
* blk_mq_destroy_queue - shutdown a request queue
* @q: request queue to shutdown
*
* This shuts down a request queue allocated by blk_mq_init_queue() and drops
* the initial reference. All future requests will failed with -ENODEV.
* This shuts down a request queue allocated by blk_mq_init_queue(). All future
* requests will be failed with -ENODEV. The caller is responsible for dropping
* the reference from blk_mq_init_queue() by calling blk_put_queue().
*
* Context: can sleep
*/

View File

@ -23,8 +23,8 @@ $(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE
targets += blacklist_hash_list
quiet_cmd_extract_certs = CERT $@
cmd_extract_certs = $(obj)/extract-cert $(extract-cert-in) $@
extract-cert-in = $(or $(filter-out $(obj)/extract-cert, $(real-prereqs)),"")
cmd_extract_certs = $(obj)/extract-cert "$(extract-cert-in)" $@
extract-cert-in = $(filter-out $(obj)/extract-cert, $(real-prereqs))
$(obj)/system_certificates.o: $(obj)/x509_certificate_list

View File

@ -60,13 +60,17 @@ static struct notifier_block tts_notifier = {
.priority = 0,
};
#ifndef acpi_skip_set_wakeup_address
#define acpi_skip_set_wakeup_address() false
#endif
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
unsigned long acpi_wakeup_address;
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
acpi_wakeup_address = acpi_get_wakeup_address();
if (!acpi_wakeup_address)
return -EFAULT;

View File

@ -110,26 +110,6 @@ static bool nvidia_wmi_ec_supported(void)
}
#endif
static bool apple_gmux_backlight_present(void)
{
struct acpi_device *adev;
struct device *dev;
adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
if (!adev)
return false;
dev = acpi_get_first_physical_node(adev);
if (!dev)
return false;
/*
* drivers/platform/x86/apple-gmux.c only supports old style
* Apple GMUX with an IO-resource.
*/
return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
}
/* Force to use vendor driver when the ACPI device is known to be
* buggy */
static int video_detect_force_vendor(const struct dmi_system_id *d)
@ -610,6 +590,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
},
},
{
.callback = video_detect_force_native,
/* Asus U46E */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "U46E"),
},
},
{
.callback = video_detect_force_native,
/* Asus UX303UB */
@ -618,6 +606,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
},
},
{
.callback = video_detect_force_native,
/* HP EliteBook 8460p */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8460p"),
},
},
{
.callback = video_detect_force_native,
/* HP Pavilion g6-1d80nr / B4U19UA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
DMI_MATCH(DMI_PRODUCT_SKU, "B4U19UA"),
},
},
{
.callback = video_detect_force_native,
/* Samsung N150P */
@ -766,6 +771,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
{
static DEFINE_MUTEX(init_mutex);
static bool nvidia_wmi_ec_present;
static bool apple_gmux_present;
static bool native_available;
static bool init_done;
static long video_caps;
@ -779,6 +785,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
apple_gmux_present = apple_gmux_detect(NULL, NULL);
init_done = true;
}
if (native)
@ -800,7 +807,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
if (apple_gmux_backlight_present())
if (apple_gmux_present)
return acpi_backlight_apple_gmux;
/* Use ACPI video if available, except when native should be preferred. */

View File

@ -3109,7 +3109,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
else
else if (link->sata_spd)
return -EINVAL;
/* were we already at the bottom? */

View File

@ -137,7 +137,7 @@ struct ublk_device {
char *__queues;
unsigned short queue_size;
unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@ -2092,13 +2092,12 @@ static void __exit ublk_exit(void)
struct ublk_device *ub;
int id;
class_destroy(ublk_chr_class);
misc_deregister(&ublk_misc);
idr_for_each_entry(&ublk_index_idr, ub, id)
ublk_remove(ub);
class_destroy(ublk_chr_class);
misc_deregister(&ublk_misc);
idr_destroy(&ublk_index_idr);
unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
}

View File

@ -857,7 +857,13 @@ static int __init sunxi_rsb_init(void)
return ret;
}
return platform_driver_register(&sunxi_rsb_driver);
ret = platform_driver_register(&sunxi_rsb_driver);
if (ret) {
bus_unregister(&sunxi_rsb_bus);
return ret;
}
return 0;
}
module_init(sunxi_rsb_init);

View File

@ -736,4 +736,3 @@ module_exit(cxl_acpi_exit);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
MODULE_IMPORT_NS(ACPI);
MODULE_SOFTDEP("pre: cxl_pmem");

View File

@ -227,34 +227,16 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
return cxl_nvd;
}
static void cxl_nvd_unregister(void *_cxl_nvd)
{
struct cxl_nvdimm *cxl_nvd = _cxl_nvd;
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
/*
* Either the bridge is in ->remove() context under the device_lock(),
* or cxlmd_release_nvdimm() is cancelling the bridge's release action
* for @cxl_nvd and doing it itself (while manually holding the bridge
* lock).
*/
device_lock_assert(&cxl_nvb->dev);
cxl_nvd->cxlmd = NULL;
cxlmd->cxl_nvd = NULL;
device_unregister(&cxl_nvd->dev);
}
static void cxlmd_release_nvdimm(void *_cxlmd)
{
struct cxl_memdev *cxlmd = _cxlmd;
struct cxl_nvdimm *cxl_nvd = cxlmd->cxl_nvd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
device_lock(&cxl_nvb->dev);
if (cxlmd->cxl_nvd)
devm_release_action(&cxl_nvb->dev, cxl_nvd_unregister,
cxlmd->cxl_nvd);
device_unlock(&cxl_nvb->dev);
cxl_nvd->cxlmd = NULL;
cxlmd->cxl_nvd = NULL;
cxlmd->cxl_nvb = NULL;
device_unregister(&cxl_nvd->dev);
put_device(&cxl_nvb->dev);
}
@ -293,22 +275,6 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev));
/*
* The two actions below arrange for @cxl_nvd to be deleted when either
* the top-level PMEM bridge goes down, or the endpoint device goes
* through ->remove().
*/
device_lock(&cxl_nvb->dev);
if (cxl_nvb->dev.driver)
rc = devm_add_action_or_reset(&cxl_nvb->dev, cxl_nvd_unregister,
cxl_nvd);
else
rc = -ENXIO;
device_unlock(&cxl_nvb->dev);
if (rc)
goto err_alloc;
/* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd);

View File

@ -554,8 +554,11 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
/* If multiple errors, log header points to first error from ctrl reg */
if (hweight32(status) > 1) {
addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, readl(addr)));
void __iomem *rcc_addr =
cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
readl(rcc_addr)));
} else {
fe = status;
}

View File

@ -225,11 +225,35 @@ static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
}
static int detach_nvdimm(struct device *dev, void *data)
{
struct cxl_nvdimm *cxl_nvd;
bool release = false;
if (!is_cxl_nvdimm(dev))
return 0;
device_lock(dev);
if (!dev->driver)
goto out;
cxl_nvd = to_cxl_nvdimm(dev);
if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
release = true;
out:
device_unlock(dev);
if (release)
device_release_driver(dev);
return 0;
}
static void unregister_nvdimm_bus(void *_cxl_nvb)
{
struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
struct nvdimm_bus *nvdimm_bus = cxl_nvb->nvdimm_bus;
bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, detach_nvdimm);
cxl_nvb->nvdimm_bus = NULL;
nvdimm_bus_unregister(nvdimm_bus);
}

View File

@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
0, 0);
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&dma_fence_stub.flags);
&fence->flags);
dma_fence_signal(fence);

View File

@ -34,6 +34,9 @@
static DEFINE_MUTEX(device_ctls_mutex);
static LIST_HEAD(edac_device_list);
/* Default workqueue processing interval on this instance, in msecs */
#define DEFAULT_POLL_INTERVAL 1000
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
* whole one second to save timers firing all over the period
* between integral seconds
*/
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
* timers firing on sub-second basis, while they are happy
* to fire together on the 1 second exactly
*/
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_queue_work(&edac_dev->work, edac_dev->delay);
@ -400,7 +403,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
edac_dev->delay = msecs_to_jiffies(msec);
/* See comment in edac_device_workq_setup() above */
if (edac_dev->poll_msec == 1000)
if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
else
edac_mod_work(&edac_dev->work, edac_dev->delay);
@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
/* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
/*
* enable workq processing on this instance,
* default = 1000 msec
*/
edac_device_workq_setup(edac_dev, 1000);
edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
} else {
edac_dev->op_state = OP_RUNNING_INTERRUPT;
}

View File

@ -252,7 +252,7 @@ clear:
static int
dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
{
struct llcc_drv_data *drv = edev_ctl->pvt_info;
struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
int ret;
ret = dump_syn_reg_values(drv, bank, err_type);
@ -289,7 +289,7 @@ static irqreturn_t
llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
int ret;
@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
edev_ctl->dev_name = dev_name(dev);
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
edev_ctl->pvt_info = llcc_driv_data;
rc = edac_device_add_device(edev_ctl);
if (rc)

View File

@ -819,8 +819,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
r = container_of(resource, struct inbound_transaction_resource,
resource);
if (is_fcp_request(r->request))
if (is_fcp_request(r->request)) {
kfree(r->data);
goto out;
}
if (a->length != fw_get_response_length(r->request)) {
ret = -EINVAL;

Some files were not shown because too many files have changed in this diff Show More