mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
net/dsa/dsa2.c commitafb3cc1a39
("net: dsa: unlock the rtnl_mutex when dsa_master_setup() fails") commite83d565378
("net: dsa: replay master state events in dsa_tree_{setup,teardown}_master") https://lore.kernel.org/all/20220307101436.7ae87da0@canb.auug.org.au/ drivers/net/ethernet/intel/ice/ice.h commit97b0129146
("ice: Fix error with handling of bonding MTU") commit43113ff734
("ice: add TTY for GNSS module for E810T device") https://lore.kernel.org/all/20220310112843.3233bcf1@canb.auug.org.au/ drivers/staging/gdm724x/gdm_lte.c commitfc7f750dc9
("staging: gdm724x: fix use after free in gdm_lte_rx()") commit4bcc4249b4
("staging: Use netif_rx().") https://lore.kernel.org/all/20220308111043.1018a59d@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
1e8a3f0d2a
3
.mailmap
3
.mailmap
@ -187,6 +187,8 @@ Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
|
||||
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
|
||||
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.cz>
|
||||
Jiri Slaby <jirislaby@kernel.org> <xslaby@fi.muni.cz>
|
||||
Jisheng Zhang <jszhang@kernel.org> <jszhang@marvell.com>
|
||||
Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
|
||||
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
||||
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
||||
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
@ -216,6 +218,7 @@ Koushik <raghavendra.koushik@neterion.com>
|
||||
Krishna Manikandan <quic_mkrishn@quicinc.com> <mkrishn@codeaurora.org>
|
||||
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
|
||||
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
|
||||
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
|
||||
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
|
||||
Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
|
||||
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
|
||||
|
@ -60,8 +60,8 @@ privileged data touched during the speculative execution.
|
||||
Spectre variant 1 attacks take advantage of speculative execution of
|
||||
conditional branches, while Spectre variant 2 attacks use speculative
|
||||
execution of indirect branches to leak privileged memory.
|
||||
See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[7] <spec_ref7>`
|
||||
:ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
|
||||
See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[6] <spec_ref6>`
|
||||
:ref:`[7] <spec_ref7>` :ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
|
||||
|
||||
Spectre variant 1 (Bounds Check Bypass)
|
||||
---------------------------------------
|
||||
@ -131,6 +131,19 @@ steer its indirect branch speculations to gadget code, and measure the
|
||||
speculative execution's side effects left in level 1 cache to infer the
|
||||
victim's data.
|
||||
|
||||
Yet another variant 2 attack vector is for the attacker to poison the
|
||||
Branch History Buffer (BHB) to speculatively steer an indirect branch
|
||||
to a specific Branch Target Buffer (BTB) entry, even if the entry isn't
|
||||
associated with the source address of the indirect branch. Specifically,
|
||||
the BHB might be shared across privilege levels even in the presence of
|
||||
Enhanced IBRS.
|
||||
|
||||
Currently the only known real-world BHB attack vector is via
|
||||
unprivileged eBPF. Therefore, it's highly recommended to not enable
|
||||
unprivileged eBPF, especially when eIBRS is used (without retpolines).
|
||||
For a full mitigation against BHB attacks, it's recommended to use
|
||||
retpolines (or eIBRS combined with retpolines).
|
||||
|
||||
Attack scenarios
|
||||
----------------
|
||||
|
||||
@ -364,13 +377,15 @@ The possible values in this file are:
|
||||
|
||||
- Kernel status:
|
||||
|
||||
==================================== =================================
|
||||
'Not affected' The processor is not vulnerable
|
||||
'Vulnerable' Vulnerable, no mitigation
|
||||
'Mitigation: Full generic retpoline' Software-focused mitigation
|
||||
'Mitigation: Full AMD retpoline' AMD-specific software mitigation
|
||||
'Mitigation: Enhanced IBRS' Hardware-focused mitigation
|
||||
==================================== =================================
|
||||
======================================== =================================
|
||||
'Not affected' The processor is not vulnerable
|
||||
'Mitigation: None' Vulnerable, no mitigation
|
||||
'Mitigation: Retpolines' Use Retpoline thunks
|
||||
'Mitigation: LFENCE' Use LFENCE instructions
|
||||
'Mitigation: Enhanced IBRS' Hardware-focused mitigation
|
||||
'Mitigation: Enhanced IBRS + Retpolines' Hardware-focused + Retpolines
|
||||
'Mitigation: Enhanced IBRS + LFENCE' Hardware-focused + LFENCE
|
||||
======================================== =================================
|
||||
|
||||
- Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is
|
||||
used to protect against Spectre variant 2 attacks when calling firmware (x86 only).
|
||||
@ -583,12 +598,13 @@ kernel command line.
|
||||
|
||||
Specific mitigations can also be selected manually:
|
||||
|
||||
retpoline
|
||||
replace indirect branches
|
||||
retpoline,generic
|
||||
google's original retpoline
|
||||
retpoline,amd
|
||||
AMD-specific minimal thunk
|
||||
retpoline auto pick between generic,lfence
|
||||
retpoline,generic Retpolines
|
||||
retpoline,lfence LFENCE; indirect branch
|
||||
retpoline,amd alias for retpoline,lfence
|
||||
eibrs enhanced IBRS
|
||||
eibrs,retpoline enhanced IBRS + Retpolines
|
||||
eibrs,lfence enhanced IBRS + LFENCE
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
@ -599,7 +615,7 @@ kernel command line.
|
||||
spectre_v2=off. Spectre variant 1 mitigations
|
||||
cannot be disabled.
|
||||
|
||||
For spectre_v2_user see :doc:`/admin-guide/kernel-parameters`.
|
||||
For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
|
||||
|
||||
Mitigation selection guide
|
||||
--------------------------
|
||||
@ -681,7 +697,7 @@ AMD white papers:
|
||||
|
||||
.. _spec_ref6:
|
||||
|
||||
[6] `Software techniques for managing speculation on AMD processors <https://developer.amd.com/wp-content/resources/90343-B_SoftwareTechniquesforManagingSpeculation_WP_7-18Update_FNL.pdf>`_.
|
||||
[6] `Software techniques for managing speculation on AMD processors <https://developer.amd.com/wp-content/resources/Managing-Speculation-on-AMD-Processors.pdf>`_.
|
||||
|
||||
ARM white papers:
|
||||
|
||||
|
@ -5361,8 +5361,12 @@
|
||||
Specific mitigations can also be selected manually:
|
||||
|
||||
retpoline - replace indirect branches
|
||||
retpoline,generic - google's original retpoline
|
||||
retpoline,amd - AMD-specific minimal thunk
|
||||
retpoline,generic - Retpolines
|
||||
retpoline,lfence - LFENCE; indirect branch
|
||||
retpoline,amd - alias for retpoline,lfence
|
||||
eibrs - enhanced IBRS
|
||||
eibrs,retpoline - enhanced IBRS + Retpolines
|
||||
eibrs,lfence - enhanced IBRS + LFENCE
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
|
@ -23,7 +23,7 @@ There are four components to pagemap:
|
||||
* Bit 56 page exclusively mapped (since 4.2)
|
||||
* Bit 57 pte is uffd-wp write-protected (since 5.13) (see
|
||||
:ref:`Documentation/admin-guide/mm/userfaultfd.rst <userfaultfd>`)
|
||||
* Bits 57-60 zero
|
||||
* Bits 58-60 zero
|
||||
* Bit 61 page is file-page or shared-anon (since 3.5)
|
||||
* Bit 62 page swapped
|
||||
* Bit 63 page present
|
||||
|
@ -130,11 +130,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
|
||||
subsystem that the buffer is fully accessible at the elevated privilege
|
||||
level (and ideally inaccessible or at least read-only at the
|
||||
lesser-privileged levels).
|
||||
|
||||
DMA_ATTR_OVERWRITE
|
||||
------------------
|
||||
|
||||
This is a hint to the DMA-mapping subsystem that the device is expected to
|
||||
overwrite the entire mapped size, thus the caller does not require any of the
|
||||
previous buffer contents to be preserved. This allows bounce-buffering
|
||||
implementations to optimise DMA_FROM_DEVICE transfers.
|
||||
|
@ -48,7 +48,6 @@ description: |
|
||||
sdx65
|
||||
sm7225
|
||||
sm8150
|
||||
sdx65
|
||||
sm8250
|
||||
sm8350
|
||||
sm8450
|
||||
@ -228,11 +227,6 @@ properties:
|
||||
- qcom,sdx65-mtp
|
||||
- const: qcom,sdx65
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,sdx65-mtp
|
||||
- const: qcom,sdx65
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,ipq6018-cp01
|
||||
|
@ -91,22 +91,7 @@ properties:
|
||||
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||
unevaluatedProperties: false
|
||||
description:
|
||||
MIPI DSI/DPI input.
|
||||
|
||||
properties:
|
||||
endpoint:
|
||||
$ref: /schemas/media/video-interfaces.yaml#
|
||||
type: object
|
||||
additionalProperties: false
|
||||
|
||||
properties:
|
||||
remote-endpoint: true
|
||||
|
||||
bus-type:
|
||||
enum: [1, 5]
|
||||
default: 1
|
||||
|
||||
data-lanes: true
|
||||
Video port for MIPI DSI input.
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
@ -155,8 +140,6 @@ examples:
|
||||
reg = <0>;
|
||||
anx7625_in: endpoint {
|
||||
remote-endpoint = <&mipi_dsi>;
|
||||
bus-type = <5>;
|
||||
data-lanes = <0 1 2 3>;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -39,7 +39,7 @@ patternProperties:
|
||||
'^phy@[a-f0-9]+$':
|
||||
$ref: ../phy/bcm-ns-usb2-phy.yaml
|
||||
|
||||
'^pin-controller@[a-f0-9]+$':
|
||||
'^pinctrl@[a-f0-9]+$':
|
||||
$ref: ../pinctrl/brcm,ns-pinmux.yaml
|
||||
|
||||
'^syscon@[a-f0-9]+$':
|
||||
@ -94,7 +94,7 @@ examples:
|
||||
reg = <0x180 0x4>;
|
||||
};
|
||||
|
||||
pin-controller@1c0 {
|
||||
pinctrl@1c0 {
|
||||
compatible = "brcm,bcm4708-pinmux";
|
||||
reg = <0x1c0 0x24>;
|
||||
reg-names = "cru_gpio_control";
|
||||
|
@ -126,7 +126,7 @@ properties:
|
||||
clock-frequency:
|
||||
const: 12288000
|
||||
|
||||
lochnagar-pinctrl:
|
||||
pinctrl:
|
||||
type: object
|
||||
$ref: /schemas/pinctrl/cirrus,lochnagar.yaml#
|
||||
|
||||
@ -255,7 +255,7 @@ required:
|
||||
- reg
|
||||
- reset-gpios
|
||||
- lochnagar-clk
|
||||
- lochnagar-pinctrl
|
||||
- pinctrl
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
@ -293,7 +293,7 @@ examples:
|
||||
clock-frequency = <32768>;
|
||||
};
|
||||
|
||||
lochnagar-pinctrl {
|
||||
pinctrl {
|
||||
compatible = "cirrus,lochnagar-pinctrl";
|
||||
|
||||
gpio-controller;
|
||||
|
@ -37,6 +37,12 @@ properties:
|
||||
max bit rate supported in bps
|
||||
minimum: 1
|
||||
|
||||
mux-states:
|
||||
description:
|
||||
mux controller node to route the signals from controller to
|
||||
transceiver.
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- '#phy-cells'
|
||||
@ -53,4 +59,5 @@ examples:
|
||||
max-bitrate = <5000000>;
|
||||
standby-gpios = <&wakeup_gpio1 16 GPIO_ACTIVE_LOW>;
|
||||
enable-gpios = <&main_gpio1 67 GPIO_ACTIVE_HIGH>;
|
||||
mux-states = <&mux0 1>;
|
||||
};
|
||||
|
@ -107,9 +107,6 @@ properties:
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
allOf:
|
||||
- $ref: "pinctrl.yaml#"
|
||||
|
||||
required:
|
||||
- pinctrl-0
|
||||
- pinctrl-names
|
||||
|
37
MAINTAINERS
37
MAINTAINERS
@ -2572,7 +2572,7 @@ F: sound/soc/rockchip/
|
||||
N: rockchip
|
||||
|
||||
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
@ -2739,7 +2739,7 @@ N: stm32
|
||||
N: stm
|
||||
|
||||
ARM/Synaptics SoC support
|
||||
M: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
|
||||
M: Jisheng Zhang <jszhang@kernel.org>
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
@ -3907,7 +3907,7 @@ M: Scott Branden <sbranden@broadcom.com>
|
||||
M: bcm-kernel-feedback-list@broadcom.com
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://github.com/broadcom/cygnus-linux.git
|
||||
T: git git://github.com/broadcom/stblinux.git
|
||||
F: arch/arm64/boot/dts/broadcom/northstar2/*
|
||||
F: arch/arm64/boot/dts/broadcom/stingray/*
|
||||
F: drivers/clk/bcm/clk-ns*
|
||||
@ -11683,7 +11683,7 @@ F: drivers/iio/proximity/mb1232.c
|
||||
|
||||
MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
|
||||
R: Iskren Chernev <iskren.chernev@gmail.com>
|
||||
R: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
R: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
R: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Matheus Castello <matheus@castello.eng.br>
|
||||
L: linux-pm@vger.kernel.org
|
||||
@ -11693,7 +11693,7 @@ F: drivers/power/supply/max17040_battery.c
|
||||
|
||||
MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
|
||||
R: Hans de Goede <hdegoede@redhat.com>
|
||||
R: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
R: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
R: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
|
||||
R: Purism Kernel Team <kernel@puri.sm>
|
||||
@ -11738,7 +11738,7 @@ F: Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
|
||||
F: drivers/power/supply/max77976_charger.c
|
||||
|
||||
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
@ -11747,7 +11747,7 @@ F: drivers/power/supply/max77693_charger.c
|
||||
|
||||
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
|
||||
M: Chanwoo Choi <cw00.choi@samsung.com>
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
@ -12436,7 +12436,7 @@ F: include/linux/memblock.h
|
||||
F: mm/memblock.c
|
||||
|
||||
MEMORY CONTROLLER DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
|
||||
@ -13573,7 +13573,7 @@ F: include/uapi/linux/nexthop.h
|
||||
F: net/ipv4/nexthop.c
|
||||
|
||||
NFC SUBSYSTEM
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-nfc@lists.01.org (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -13887,7 +13887,7 @@ F: Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
|
||||
F: drivers/regulator/pf8x00-regulator.c
|
||||
|
||||
NXP PTN5150A CC LOGIC AND EXTCON DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
|
||||
@ -15313,7 +15313,7 @@ F: drivers/pinctrl/renesas/
|
||||
|
||||
PIN CONTROLLER - SAMSUNG
|
||||
M: Tomasz Figa <tomasz.figa@gmail.com>
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
@ -16954,7 +16954,7 @@ W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: drivers/s390/scsi/zfcp_*
|
||||
|
||||
S3C ADC BATTERY DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
F: drivers/power/supply/s3c_adc_battery.c
|
||||
@ -16999,7 +16999,7 @@ F: Documentation/admin-guide/LSM/SafeSetID.rst
|
||||
F: security/safesetid/
|
||||
|
||||
SAMSUNG AUDIO (ASoC) DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
@ -17007,7 +17007,7 @@ F: Documentation/devicetree/bindings/sound/samsung*
|
||||
F: sound/soc/samsung/
|
||||
|
||||
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -17042,7 +17042,7 @@ S: Maintained
|
||||
F: drivers/platform/x86/samsung-laptop.c
|
||||
|
||||
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
@ -17068,7 +17068,7 @@ F: drivers/media/platform/s3c-camif/
|
||||
F: include/media/drv-intf/s3c_camif.h
|
||||
|
||||
SAMSUNG S3FWRN5 NFC DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Krzysztof Opasiak <k.opasiak@samsung.com>
|
||||
L: linux-nfc@lists.01.org (subscribers-only)
|
||||
S: Maintained
|
||||
@ -17090,7 +17090,7 @@ S: Supported
|
||||
F: drivers/media/i2c/s5k5baf.c
|
||||
|
||||
SAMSUNG S5P Security SubSystem (SSS) DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Vladimir Zapolskiy <vz@mleia.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
@ -17125,7 +17125,7 @@ F: include/linux/clk/samsung.h
|
||||
F: include/linux/platform_data/clk-s3c2410.h
|
||||
|
||||
SAMSUNG SPI DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Andi Shyti <andi@etezian.org>
|
||||
L: linux-spi@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
@ -21475,7 +21475,6 @@ THE REST
|
||||
M: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Buried alive in reporters
|
||||
Q: http://patchwork.kernel.org/project/LKML/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
||||
F: *
|
||||
F: */
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -118,7 +118,7 @@
|
||||
};
|
||||
|
||||
pinctrl_fwqspid_default: fwqspid_default {
|
||||
function = "FWQSPID";
|
||||
function = "FWSPID";
|
||||
groups = "FWQSPID";
|
||||
};
|
||||
|
||||
|
@ -290,6 +290,7 @@
|
||||
|
||||
hvs: hvs@7e400000 {
|
||||
compatible = "brcm,bcm2711-hvs";
|
||||
reg = <0x7e400000 0x8000>;
|
||||
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
|
@ -5,7 +5,13 @@
|
||||
|
||||
/ {
|
||||
/* Version of Nyan Big with 1080p panel */
|
||||
panel {
|
||||
compatible = "auo,b133htn01";
|
||||
host1x@50000000 {
|
||||
dpaux@545c0000 {
|
||||
aux-bus {
|
||||
panel: panel {
|
||||
compatible = "auo,b133htn01";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -107,6 +107,16 @@
|
||||
.endm
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 7
|
||||
.macro dsb, args
|
||||
mcr p15, 0, r0, c7, c10, 4
|
||||
.endm
|
||||
|
||||
.macro isb, args
|
||||
mcr p15, 0, r0, c7, c5, 4
|
||||
.endm
|
||||
#endif
|
||||
|
||||
.macro asm_trace_hardirqs_off, save=1
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS)
|
||||
.if \save
|
||||
|
32
arch/arm/include/asm/spectre.h
Normal file
32
arch/arm/include/asm/spectre.h
Normal file
@ -0,0 +1,32 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_SPECTRE_H
|
||||
#define __ASM_SPECTRE_H
|
||||
|
||||
enum {
|
||||
SPECTRE_UNAFFECTED,
|
||||
SPECTRE_MITIGATED,
|
||||
SPECTRE_VULNERABLE,
|
||||
};
|
||||
|
||||
enum {
|
||||
__SPECTRE_V2_METHOD_BPIALL,
|
||||
__SPECTRE_V2_METHOD_ICIALLU,
|
||||
__SPECTRE_V2_METHOD_SMC,
|
||||
__SPECTRE_V2_METHOD_HVC,
|
||||
__SPECTRE_V2_METHOD_LOOP8,
|
||||
};
|
||||
|
||||
enum {
|
||||
SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
|
||||
SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
|
||||
SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
|
||||
SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
|
||||
SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
|
||||
};
|
||||
|
||||
void spectre_v2_update_state(unsigned int state, unsigned int methods);
|
||||
|
||||
int spectre_bhb_update_vectors(unsigned int method);
|
||||
|
||||
#endif
|
@ -26,6 +26,19 @@
|
||||
#define ARM_MMU_DISCARD(x) x
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ld.lld does not support NOCROSSREFS:
|
||||
* https://github.com/ClangBuiltLinux/linux/issues/1609
|
||||
*/
|
||||
#ifdef CONFIG_LD_IS_LLD
|
||||
#define NOCROSSREFS
|
||||
#endif
|
||||
|
||||
/* Set start/end symbol names to the LMA for the section */
|
||||
#define ARM_LMA(sym, section) \
|
||||
sym##_start = LOADADDR(section); \
|
||||
sym##_end = LOADADDR(section) + SIZEOF(section)
|
||||
|
||||
#define PROC_INFO \
|
||||
. = ALIGN(4); \
|
||||
__proc_info_begin = .; \
|
||||
@ -110,19 +123,31 @@
|
||||
* only thing that matters is their relative offsets
|
||||
*/
|
||||
#define ARM_VECTORS \
|
||||
__vectors_start = .; \
|
||||
.vectors 0xffff0000 : AT(__vectors_start) { \
|
||||
*(.vectors) \
|
||||
__vectors_lma = .; \
|
||||
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
|
||||
.vectors { \
|
||||
*(.vectors) \
|
||||
} \
|
||||
.vectors.bhb.loop8 { \
|
||||
*(.vectors.bhb.loop8) \
|
||||
} \
|
||||
.vectors.bhb.bpiall { \
|
||||
*(.vectors.bhb.bpiall) \
|
||||
} \
|
||||
} \
|
||||
. = __vectors_start + SIZEOF(.vectors); \
|
||||
__vectors_end = .; \
|
||||
ARM_LMA(__vectors, .vectors); \
|
||||
ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \
|
||||
ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \
|
||||
. = __vectors_lma + SIZEOF(.vectors) + \
|
||||
SIZEOF(.vectors.bhb.loop8) + \
|
||||
SIZEOF(.vectors.bhb.bpiall); \
|
||||
\
|
||||
__stubs_start = .; \
|
||||
.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \
|
||||
__stubs_lma = .; \
|
||||
.stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \
|
||||
*(.stubs) \
|
||||
} \
|
||||
. = __stubs_start + SIZEOF(.stubs); \
|
||||
__stubs_end = .; \
|
||||
ARM_LMA(__stubs, .stubs); \
|
||||
. = __stubs_lma + SIZEOF(.stubs); \
|
||||
\
|
||||
PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
|
||||
|
||||
|
@ -106,4 +106,6 @@ endif
|
||||
|
||||
obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
|
||||
|
||||
extra-y := $(head-y) vmlinux.lds
|
||||
|
@ -1002,12 +1002,11 @@ vector_\name:
|
||||
sub lr, lr, #\correction
|
||||
.endif
|
||||
|
||||
@
|
||||
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
|
||||
@ (parent CPSR)
|
||||
@
|
||||
@ Save r0, lr_<exception> (parent PC)
|
||||
stmia sp, {r0, lr} @ save r0, lr
|
||||
mrs lr, spsr
|
||||
|
||||
@ Save spsr_<exception> (parent CPSR)
|
||||
2: mrs lr, spsr
|
||||
str lr, [sp, #8] @ save spsr
|
||||
|
||||
@
|
||||
@ -1028,6 +1027,44 @@ vector_\name:
|
||||
movs pc, lr @ branch to handler in SVC mode
|
||||
ENDPROC(vector_\name)
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||
.subsection 1
|
||||
.align 5
|
||||
vector_bhb_loop8_\name:
|
||||
.if \correction
|
||||
sub lr, lr, #\correction
|
||||
.endif
|
||||
|
||||
@ Save r0, lr_<exception> (parent PC)
|
||||
stmia sp, {r0, lr}
|
||||
|
||||
@ bhb workaround
|
||||
mov r0, #8
|
||||
1: b . + 4
|
||||
subs r0, r0, #1
|
||||
bne 1b
|
||||
dsb
|
||||
isb
|
||||
b 2b
|
||||
ENDPROC(vector_bhb_loop8_\name)
|
||||
|
||||
vector_bhb_bpiall_\name:
|
||||
.if \correction
|
||||
sub lr, lr, #\correction
|
||||
.endif
|
||||
|
||||
@ Save r0, lr_<exception> (parent PC)
|
||||
stmia sp, {r0, lr}
|
||||
|
||||
@ bhb workaround
|
||||
mcr p15, 0, r0, c7, c5, 6 @ BPIALL
|
||||
@ isb not needed due to "movs pc, lr" in the vector stub
|
||||
@ which gives a "context synchronisation".
|
||||
b 2b
|
||||
ENDPROC(vector_bhb_bpiall_\name)
|
||||
.previous
|
||||
#endif
|
||||
|
||||
.align 2
|
||||
@ handler addresses follow this label
|
||||
1:
|
||||
@ -1036,6 +1073,10 @@ ENDPROC(vector_\name)
|
||||
.section .stubs, "ax", %progbits
|
||||
@ This must be the first word
|
||||
.word vector_swi
|
||||
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||
.word vector_bhb_loop8_swi
|
||||
.word vector_bhb_bpiall_swi
|
||||
#endif
|
||||
|
||||
vector_rst:
|
||||
ARM( swi SYS_ERROR0 )
|
||||
@ -1150,8 +1191,10 @@ vector_addrexcptn:
|
||||
* FIQ "NMI" handler
|
||||
*-----------------------------------------------------------------------------
|
||||
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
|
||||
* systems.
|
||||
* systems. This must be the last vector stub, so lets place it in its own
|
||||
* subsection.
|
||||
*/
|
||||
.subsection 2
|
||||
vector_stub fiq, FIQ_MODE, 4
|
||||
|
||||
.long __fiq_usr @ 0 (USR_26 / USR_32)
|
||||
@ -1184,6 +1227,30 @@ vector_addrexcptn:
|
||||
W(b) vector_irq
|
||||
W(b) vector_fiq
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||
.section .vectors.bhb.loop8, "ax", %progbits
|
||||
.L__vectors_bhb_loop8_start:
|
||||
W(b) vector_rst
|
||||
W(b) vector_bhb_loop8_und
|
||||
W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004
|
||||
W(b) vector_bhb_loop8_pabt
|
||||
W(b) vector_bhb_loop8_dabt
|
||||
W(b) vector_addrexcptn
|
||||
W(b) vector_bhb_loop8_irq
|
||||
W(b) vector_bhb_loop8_fiq
|
||||
|
||||
.section .vectors.bhb.bpiall, "ax", %progbits
|
||||
.L__vectors_bhb_bpiall_start:
|
||||
W(b) vector_rst
|
||||
W(b) vector_bhb_bpiall_und
|
||||
W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008
|
||||
W(b) vector_bhb_bpiall_pabt
|
||||
W(b) vector_bhb_bpiall_dabt
|
||||
W(b) vector_addrexcptn
|
||||
W(b) vector_bhb_bpiall_irq
|
||||
W(b) vector_bhb_bpiall_fiq
|
||||
#endif
|
||||
|
||||
.data
|
||||
.align 2
|
||||
|
||||
|
@ -153,6 +153,29 @@ ENDPROC(ret_from_fork)
|
||||
*-----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
.align 5
|
||||
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||
ENTRY(vector_bhb_loop8_swi)
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
stmia sp, {r0 - r12}
|
||||
mov r8, #8
|
||||
1: b 2f
|
||||
2: subs r8, r8, #1
|
||||
bne 1b
|
||||
dsb
|
||||
isb
|
||||
b 3f
|
||||
ENDPROC(vector_bhb_loop8_swi)
|
||||
|
||||
.align 5
|
||||
ENTRY(vector_bhb_bpiall_swi)
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
stmia sp, {r0 - r12}
|
||||
mcr p15, 0, r8, c7, c5, 6 @ BPIALL
|
||||
isb
|
||||
b 3f
|
||||
ENDPROC(vector_bhb_bpiall_swi)
|
||||
#endif
|
||||
.align 5
|
||||
ENTRY(vector_swi)
|
||||
#ifdef CONFIG_CPU_V7M
|
||||
@ -160,6 +183,7 @@ ENTRY(vector_swi)
|
||||
#else
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
stmia sp, {r0 - r12} @ Calling r0 - r12
|
||||
3:
|
||||
ARM( add r8, sp, #S_PC )
|
||||
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
|
||||
THUMB( mov r8, sp )
|
||||
|
71
arch/arm/kernel/spectre.c
Normal file
71
arch/arm/kernel/spectre.c
Normal file
@ -0,0 +1,71 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#include <asm/spectre.h>
|
||||
|
||||
static bool _unprivileged_ebpf_enabled(void)
|
||||
{
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
return !sysctl_unprivileged_bpf_disabled;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
}
|
||||
|
||||
static unsigned int spectre_v2_state;
|
||||
static unsigned int spectre_v2_methods;
|
||||
|
||||
void spectre_v2_update_state(unsigned int state, unsigned int method)
|
||||
{
|
||||
if (state > spectre_v2_state)
|
||||
spectre_v2_state = state;
|
||||
spectre_v2_methods |= method;
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
const char *method;
|
||||
|
||||
if (spectre_v2_state == SPECTRE_UNAFFECTED)
|
||||
return sprintf(buf, "%s\n", "Not affected");
|
||||
|
||||
if (spectre_v2_state != SPECTRE_MITIGATED)
|
||||
return sprintf(buf, "%s\n", "Vulnerable");
|
||||
|
||||
if (_unprivileged_ebpf_enabled())
|
||||
return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
|
||||
|
||||
switch (spectre_v2_methods) {
|
||||
case SPECTRE_V2_METHOD_BPIALL:
|
||||
method = "Branch predictor hardening";
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_METHOD_ICIALLU:
|
||||
method = "I-cache invalidation";
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_METHOD_SMC:
|
||||
case SPECTRE_V2_METHOD_HVC:
|
||||
method = "Firmware call";
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_METHOD_LOOP8:
|
||||
method = "History overwrite";
|
||||
break;
|
||||
|
||||
default:
|
||||
method = "Multiple mitigations";
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "Mitigation: %s\n", method);
|
||||
}
|
@ -30,6 +30,7 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/spectre.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/ptrace.h>
|
||||
@ -789,10 +790,59 @@ static inline void __init kuser_init(void *vectors)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
|
||||
{
|
||||
memcpy(vma, lma_start, lma_end - lma_start);
|
||||
}
|
||||
|
||||
static void flush_vectors(void *vma, size_t offset, size_t size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vma + offset;
|
||||
unsigned long end = start + size;
|
||||
|
||||
flush_icache_range(start, end);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||
int spectre_bhb_update_vectors(unsigned int method)
|
||||
{
|
||||
extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
|
||||
extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
|
||||
void *vec_start, *vec_end;
|
||||
|
||||
if (system_state >= SYSTEM_FREEING_INITMEM) {
|
||||
pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
|
||||
smp_processor_id());
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
switch (method) {
|
||||
case SPECTRE_V2_METHOD_LOOP8:
|
||||
vec_start = __vectors_bhb_loop8_start;
|
||||
vec_end = __vectors_bhb_loop8_end;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_METHOD_BPIALL:
|
||||
vec_start = __vectors_bhb_bpiall_start;
|
||||
vec_end = __vectors_bhb_bpiall_end;
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("CPU%u: unknown Spectre BHB state %d\n",
|
||||
smp_processor_id(), method);
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
copy_from_lma(vectors_page, vec_start, vec_end);
|
||||
flush_vectors(vectors_page, 0, vec_end - vec_start);
|
||||
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init early_trap_init(void *vectors_base)
|
||||
{
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
unsigned long vectors = (unsigned long)vectors_base;
|
||||
extern char __stubs_start[], __stubs_end[];
|
||||
extern char __vectors_start[], __vectors_end[];
|
||||
unsigned i;
|
||||
@ -813,17 +863,20 @@ void __init early_trap_init(void *vectors_base)
|
||||
* into the vector page, mapped at 0xffff0000, and ensure these
|
||||
* are visible to the instruction stream.
|
||||
*/
|
||||
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
|
||||
memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
|
||||
copy_from_lma(vectors_base, __vectors_start, __vectors_end);
|
||||
copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
|
||||
|
||||
kuser_init(vectors_base);
|
||||
|
||||
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
|
||||
flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
|
||||
}
|
||||
#else /* ifndef CONFIG_CPU_V7M */
|
||||
void __init early_trap_init(void *vectors_base)
|
||||
{
|
||||
/*
|
||||
* on V7-M there is no need to copy the vector table to a dedicated
|
||||
* memory area. The address is configurable and so a table in the kernel
|
||||
* image can be used.
|
||||
*/
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
@ -3,6 +3,7 @@ menuconfig ARCH_MSTARV7
|
||||
depends on ARCH_MULTI_V7
|
||||
select ARM_GIC
|
||||
select ARM_HEAVY_MB
|
||||
select HAVE_ARM_ARCH_TIMER
|
||||
select MST_IRQ
|
||||
select MSTAR_MSC313_MPLL
|
||||
help
|
||||
|
@ -830,6 +830,7 @@ config CPU_BPREDICT_DISABLE
|
||||
|
||||
config CPU_SPECTRE
|
||||
bool
|
||||
select GENERIC_CPU_VULNERABILITIES
|
||||
|
||||
config HARDEN_BRANCH_PREDICTOR
|
||||
bool "Harden the branch predictor against aliasing attacks" if EXPERT
|
||||
@ -850,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HARDEN_BRANCH_HISTORY
|
||||
bool "Harden Spectre style attacks against branch history" if EXPERT
|
||||
depends on CPU_SPECTRE
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors can
|
||||
make use of branch history to influence future speculation. When
|
||||
taking an exception, a sequence of branches overwrites the branch
|
||||
history, or branch history is invalidated.
|
||||
|
||||
config TLS_REG_EMUL
|
||||
bool
|
||||
select NEED_KUSER_HELPERS
|
||||
|
@ -6,8 +6,35 @@
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm/spectre.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
#ifdef CONFIG_ARM_PSCI
|
||||
static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
|
||||
switch ((int)res.a0) {
|
||||
case SMCCC_RET_SUCCESS:
|
||||
return SPECTRE_MITIGATED;
|
||||
|
||||
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
|
||||
return SPECTRE_UNAFFECTED;
|
||||
|
||||
default:
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
|
||||
{
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
|
||||
|
||||
@ -36,13 +63,61 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void cpu_v7_spectre_init(void)
|
||||
static unsigned int spectre_v2_install_workaround(unsigned int method)
|
||||
{
|
||||
const char *spectre_v2_method = NULL;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (per_cpu(harden_branch_predictor_fn, cpu))
|
||||
return;
|
||||
return SPECTRE_MITIGATED;
|
||||
|
||||
switch (method) {
|
||||
case SPECTRE_V2_METHOD_BPIALL:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
harden_branch_predictor_bpiall;
|
||||
spectre_v2_method = "BPIALL";
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_METHOD_ICIALLU:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
harden_branch_predictor_iciallu;
|
||||
spectre_v2_method = "ICIALLU";
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_METHOD_HVC:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_hvc_arch_workaround_1;
|
||||
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
||||
spectre_v2_method = "hypervisor";
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_METHOD_SMC:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_smc_arch_workaround_1;
|
||||
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
||||
spectre_v2_method = "firmware";
|
||||
break;
|
||||
}
|
||||
|
||||
if (spectre_v2_method)
|
||||
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
||||
smp_processor_id(), spectre_v2_method);
|
||||
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
#else
|
||||
static unsigned int spectre_v2_install_workaround(unsigned int method)
|
||||
{
|
||||
pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
|
||||
smp_processor_id());
|
||||
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void cpu_v7_spectre_v2_init(void)
|
||||
{
|
||||
unsigned int state, method = 0;
|
||||
|
||||
switch (read_cpuid_part()) {
|
||||
case ARM_CPU_PART_CORTEX_A8:
|
||||
@ -51,69 +126,133 @@ static void cpu_v7_spectre_init(void)
|
||||
case ARM_CPU_PART_CORTEX_A17:
|
||||
case ARM_CPU_PART_CORTEX_A73:
|
||||
case ARM_CPU_PART_CORTEX_A75:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
harden_branch_predictor_bpiall;
|
||||
spectre_v2_method = "BPIALL";
|
||||
state = SPECTRE_MITIGATED;
|
||||
method = SPECTRE_V2_METHOD_BPIALL;
|
||||
break;
|
||||
|
||||
case ARM_CPU_PART_CORTEX_A15:
|
||||
case ARM_CPU_PART_BRAHMA_B15:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
harden_branch_predictor_iciallu;
|
||||
spectre_v2_method = "ICIALLU";
|
||||
state = SPECTRE_MITIGATED;
|
||||
method = SPECTRE_V2_METHOD_ICIALLU;
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_ARM_PSCI
|
||||
case ARM_CPU_PART_BRAHMA_B53:
|
||||
/* Requires no workaround */
|
||||
state = SPECTRE_UNAFFECTED;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Other ARM CPUs require no workaround */
|
||||
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
|
||||
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
|
||||
state = SPECTRE_UNAFFECTED;
|
||||
break;
|
||||
fallthrough;
|
||||
/* Cortex A57/A72 require firmware workaround */
|
||||
case ARM_CPU_PART_CORTEX_A57:
|
||||
case ARM_CPU_PART_CORTEX_A72: {
|
||||
struct arm_smccc_res res;
|
||||
}
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 != 0)
|
||||
return;
|
||||
fallthrough;
|
||||
|
||||
/* Cortex A57/A72 require firmware workaround */
|
||||
case ARM_CPU_PART_CORTEX_A57:
|
||||
case ARM_CPU_PART_CORTEX_A72:
|
||||
state = spectre_v2_get_cpu_fw_mitigation_state();
|
||||
if (state != SPECTRE_MITIGATED)
|
||||
break;
|
||||
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_hvc_arch_workaround_1;
|
||||
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
||||
spectre_v2_method = "hypervisor";
|
||||
method = SPECTRE_V2_METHOD_HVC;
|
||||
break;
|
||||
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_smc_arch_workaround_1;
|
||||
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
||||
spectre_v2_method = "firmware";
|
||||
method = SPECTRE_V2_METHOD_SMC;
|
||||
break;
|
||||
|
||||
default:
|
||||
state = SPECTRE_VULNERABLE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (state == SPECTRE_MITIGATED)
|
||||
state = spectre_v2_install_workaround(method);
|
||||
|
||||
spectre_v2_update_state(state, method);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
|
||||
static int spectre_bhb_method;
|
||||
|
||||
static const char *spectre_bhb_method_name(int method)
|
||||
{
|
||||
switch (method) {
|
||||
case SPECTRE_V2_METHOD_LOOP8:
|
||||
return "loop";
|
||||
|
||||
case SPECTRE_V2_METHOD_BPIALL:
|
||||
return "BPIALL";
|
||||
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
static int spectre_bhb_install_workaround(int method)
|
||||
{
|
||||
if (spectre_bhb_method != method) {
|
||||
if (spectre_bhb_method) {
|
||||
pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
|
||||
smp_processor_id());
|
||||
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
|
||||
if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
|
||||
return SPECTRE_VULNERABLE;
|
||||
|
||||
spectre_bhb_method = method;
|
||||
}
|
||||
|
||||
if (spectre_v2_method)
|
||||
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
||||
smp_processor_id(), spectre_v2_method);
|
||||
pr_info("CPU%u: Spectre BHB: using %s workaround\n",
|
||||
smp_processor_id(), spectre_bhb_method_name(method));
|
||||
|
||||
return SPECTRE_MITIGATED;
|
||||
}
|
||||
#else
|
||||
static void cpu_v7_spectre_init(void)
|
||||
static int spectre_bhb_install_workaround(int method)
|
||||
{
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void cpu_v7_spectre_bhb_init(void)
|
||||
{
|
||||
unsigned int state, method = 0;
|
||||
|
||||
switch (read_cpuid_part()) {
|
||||
case ARM_CPU_PART_CORTEX_A15:
|
||||
case ARM_CPU_PART_BRAHMA_B15:
|
||||
case ARM_CPU_PART_CORTEX_A57:
|
||||
case ARM_CPU_PART_CORTEX_A72:
|
||||
state = SPECTRE_MITIGATED;
|
||||
method = SPECTRE_V2_METHOD_LOOP8;
|
||||
break;
|
||||
|
||||
case ARM_CPU_PART_CORTEX_A73:
|
||||
case ARM_CPU_PART_CORTEX_A75:
|
||||
state = SPECTRE_MITIGATED;
|
||||
method = SPECTRE_V2_METHOD_BPIALL;
|
||||
break;
|
||||
|
||||
default:
|
||||
state = SPECTRE_UNAFFECTED;
|
||||
break;
|
||||
}
|
||||
|
||||
if (state == SPECTRE_MITIGATED)
|
||||
state = spectre_bhb_install_workaround(method);
|
||||
|
||||
spectre_v2_update_state(state, method);
|
||||
}
|
||||
|
||||
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
|
||||
u32 mask, const char *msg)
|
||||
{
|
||||
@ -142,16 +281,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
|
||||
void cpu_v7_ca8_ibe(void)
|
||||
{
|
||||
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
|
||||
cpu_v7_spectre_init();
|
||||
cpu_v7_spectre_v2_init();
|
||||
}
|
||||
|
||||
void cpu_v7_ca15_ibe(void)
|
||||
{
|
||||
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
|
||||
cpu_v7_spectre_init();
|
||||
cpu_v7_spectre_v2_init();
|
||||
}
|
||||
|
||||
void cpu_v7_bugs_init(void)
|
||||
{
|
||||
cpu_v7_spectre_init();
|
||||
cpu_v7_spectre_v2_init();
|
||||
cpu_v7_spectre_bhb_init();
|
||||
}
|
||||
|
@ -1252,9 +1252,6 @@ config HW_PERF_EVENTS
|
||||
def_bool y
|
||||
depends on ARM_PMU
|
||||
|
||||
config ARCH_HAS_FILTER_PGPROT
|
||||
def_bool y
|
||||
|
||||
# Supported by clang >= 7.0
|
||||
config CC_HAVE_SHADOW_CALL_STACK
|
||||
def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
|
||||
@ -1383,6 +1380,15 @@ config UNMAP_KERNEL_AT_EL0
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
bool "Mitigate Spectre style attacks against branch history" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors can
|
||||
make use of branch history to influence future speculation.
|
||||
When taking an exception from user-space, a sequence of branches
|
||||
or a firmware call overwrites the branch history.
|
||||
|
||||
config RODATA_FULL_DEFAULT_ENABLED
|
||||
bool "Apply r/o permissions of VM areas also to their linear aliases"
|
||||
default y
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
aliases {
|
||||
spi0 = &spi0;
|
||||
ethernet0 = ð0;
|
||||
ethernet1 = ð1;
|
||||
mmc0 = &sdhci0;
|
||||
mmc1 = &sdhci1;
|
||||
@ -138,7 +139,9 @@
|
||||
/*
|
||||
* U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
|
||||
* contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
|
||||
* 2 size cells and also expects that the second range starts at 16 MB offset. If these
|
||||
* 2 size cells and also expects that the second range starts at 16 MB offset. Also it
|
||||
* expects that first range uses same address for PCI (child) and CPU (parent) cells (so
|
||||
* no remapping) and that this address is the lowest from all specified ranges. If these
|
||||
* conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
|
||||
* space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
|
||||
* for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
|
||||
@ -147,6 +150,9 @@
|
||||
* https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
|
||||
* https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
|
||||
* https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
|
||||
* Bug related to requirement of same child and parent addresses for first range is fixed
|
||||
* in U-Boot version 2022.04 by following commit:
|
||||
* https://source.denx.de/u-boot/u-boot/-/commit/1fd54253bca7d43d046bba4853fe5fafd034bc17
|
||||
*/
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
|
@ -499,7 +499,7 @@
|
||||
* (totaling 127 MiB) for MEM.
|
||||
*/
|
||||
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
|
||||
0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
|
||||
0x81000000 0 0x00000000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc 0>,
|
||||
<0 0 0 2 &pcie_intc 1>,
|
||||
|
@ -1584,7 +1584,7 @@
|
||||
#iommu-cells = <1>;
|
||||
|
||||
nvidia,memory-controller = <&mc>;
|
||||
status = "okay";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
smmu: iommu@12000000 {
|
||||
|
@ -807,3 +807,8 @@
|
||||
|
||||
qcom,snoc-host-cap-8bit-quirk;
|
||||
};
|
||||
|
||||
&crypto {
|
||||
/* FIXME: qce_start triggers an SError */
|
||||
status= "disable";
|
||||
};
|
||||
|
@ -35,6 +35,24 @@
|
||||
clock-frequency = <32000>;
|
||||
#clock-cells = <0>;
|
||||
};
|
||||
|
||||
ufs_phy_rx_symbol_0_clk: ufs-phy-rx-symbol-0 {
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <1000>;
|
||||
#clock-cells = <0>;
|
||||
};
|
||||
|
||||
ufs_phy_rx_symbol_1_clk: ufs-phy-rx-symbol-1 {
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <1000>;
|
||||
#clock-cells = <0>;
|
||||
};
|
||||
|
||||
ufs_phy_tx_symbol_0_clk: ufs-phy-tx-symbol-0 {
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <1000>;
|
||||
#clock-cells = <0>;
|
||||
};
|
||||
};
|
||||
|
||||
cpus {
|
||||
@ -603,9 +621,9 @@
|
||||
<0>,
|
||||
<0>,
|
||||
<0>,
|
||||
<0>,
|
||||
<0>,
|
||||
<0>,
|
||||
<&ufs_phy_rx_symbol_0_clk>,
|
||||
<&ufs_phy_rx_symbol_1_clk>,
|
||||
<&ufs_phy_tx_symbol_0_clk>,
|
||||
<0>,
|
||||
<0>;
|
||||
};
|
||||
@ -1923,8 +1941,8 @@
|
||||
<75000000 300000000>,
|
||||
<0 0>,
|
||||
<0 0>,
|
||||
<75000000 300000000>,
|
||||
<75000000 300000000>;
|
||||
<0 0>,
|
||||
<0 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -726,7 +726,7 @@
|
||||
compatible = "qcom,sm8450-smmu-500", "arm,mmu-500";
|
||||
reg = <0 0x15000000 0 0x100000>;
|
||||
#iommu-cells = <2>;
|
||||
#global-interrupts = <2>;
|
||||
#global-interrupts = <1>;
|
||||
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
|
||||
@ -813,6 +813,7 @@
|
||||
<GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
|
||||
@ -1072,9 +1073,10 @@
|
||||
<&gcc GCC_USB30_PRIM_MASTER_CLK>,
|
||||
<&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
|
||||
<&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
|
||||
<&gcc GCC_USB30_PRIM_SLEEP_CLK>;
|
||||
<&gcc GCC_USB30_PRIM_SLEEP_CLK>,
|
||||
<&gcc GCC_USB3_0_CLKREF_EN>;
|
||||
clock-names = "cfg_noc", "core", "iface", "mock_utmi",
|
||||
"sleep";
|
||||
"sleep", "xo";
|
||||
|
||||
assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
|
||||
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
|
||||
|
@ -108,6 +108,13 @@
|
||||
hint #20
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Clear Branch History instruction
|
||||
*/
|
||||
.macro clearbhb
|
||||
hint #22
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Speculation barrier
|
||||
*/
|
||||
@ -850,4 +857,50 @@ alternative_endif
|
||||
|
||||
#endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
|
||||
|
||||
.macro __mitigate_spectre_bhb_loop tmp
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
alternative_cb spectre_bhb_patch_loop_iter
|
||||
mov \tmp, #32 // Patched to correct the immediate
|
||||
alternative_cb_end
|
||||
.Lspectre_bhb_loop\@:
|
||||
b . + 4
|
||||
subs \tmp, \tmp, #1
|
||||
b.ne .Lspectre_bhb_loop\@
|
||||
sb
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
|
||||
.macro mitigate_spectre_bhb_loop tmp
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
alternative_cb spectre_bhb_patch_loop_mitigation_enable
|
||||
b .L_spectre_bhb_loop_done\@ // Patched to NOP
|
||||
alternative_cb_end
|
||||
__mitigate_spectre_bhb_loop \tmp
|
||||
.L_spectre_bhb_loop_done\@:
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
|
||||
/* Save/restores x0-x3 to the stack */
|
||||
.macro __mitigate_spectre_bhb_fw
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
stp x0, x1, [sp, #-16]!
|
||||
stp x2, x3, [sp, #-16]!
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
|
||||
alternative_cb smccc_patch_fw_mitigation_conduit
|
||||
nop // Patched to SMC/HVC #0
|
||||
alternative_cb_end
|
||||
ldp x2, x3, [sp], #16
|
||||
ldp x0, x1, [sp], #16
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
|
||||
.macro mitigate_spectre_bhb_clear_insn
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
alternative_cb spectre_bhb_patch_clearbhb
|
||||
/* Patched to NOP when not supported */
|
||||
clearbhb
|
||||
isb
|
||||
alternative_cb_end
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
.endm
|
||||
#endif /* __ASM_ASSEMBLER_H */
|
||||
|
@ -637,6 +637,35 @@ static inline bool cpu_supports_mixed_endian_el0(void)
|
||||
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
|
||||
}
|
||||
|
||||
|
||||
static inline bool supports_csv2p3(int scope)
|
||||
{
|
||||
u64 pfr0;
|
||||
u8 csv2_val;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU)
|
||||
pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
|
||||
else
|
||||
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
|
||||
ID_AA64PFR0_CSV2_SHIFT);
|
||||
return csv2_val == 3;
|
||||
}
|
||||
|
||||
static inline bool supports_clearbhb(int scope)
|
||||
{
|
||||
u64 isar2;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU)
|
||||
isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
|
||||
else
|
||||
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
|
||||
|
||||
return cpuid_feature_extract_unsigned_field(isar2,
|
||||
ID_AA64ISAR2_CLEARBHB_SHIFT);
|
||||
}
|
||||
|
||||
const struct cpumask *system_32bit_el0_cpumask(void);
|
||||
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
|
||||
|
||||
|
@ -73,10 +73,14 @@
|
||||
#define ARM_CPU_PART_CORTEX_A76 0xD0B
|
||||
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
|
||||
#define ARM_CPU_PART_CORTEX_A77 0xD0D
|
||||
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
|
||||
#define ARM_CPU_PART_CORTEX_A78 0xD41
|
||||
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
#define ARM_CPU_PART_CORTEX_X2 0xD48
|
||||
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
||||
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
|
||||
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
@ -117,10 +121,14 @@
|
||||
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
|
||||
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
|
||||
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
|
||||
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
|
||||
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
|
||||
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
|
||||
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
||||
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
|
@ -62,9 +62,11 @@ enum fixed_addresses {
|
||||
#endif /* CONFIG_ACPI_APEI_GHES */
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
FIX_ENTRY_TRAMP_TEXT3,
|
||||
FIX_ENTRY_TRAMP_TEXT2,
|
||||
FIX_ENTRY_TRAMP_TEXT1,
|
||||
FIX_ENTRY_TRAMP_DATA,
|
||||
FIX_ENTRY_TRAMP_TEXT,
|
||||
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
|
||||
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
|
@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op {
|
||||
AARCH64_INSN_HINT_PSB = 0x11 << 5,
|
||||
AARCH64_INSN_HINT_TSB = 0x12 << 5,
|
||||
AARCH64_INSN_HINT_CSDB = 0x14 << 5,
|
||||
AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5,
|
||||
|
||||
AARCH64_INSN_HINT_BTI = 0x20 << 5,
|
||||
AARCH64_INSN_HINT_BTIC = 0x22 << 5,
|
||||
|
@ -714,6 +714,11 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
|
||||
ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
|
||||
}
|
||||
|
||||
static inline bool kvm_system_needs_idmapped_vectors(void)
|
||||
{
|
||||
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
|
||||
}
|
||||
|
||||
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#ifndef __ASM_MTE_KASAN_H
|
||||
#define __ASM_MTE_KASAN_H
|
||||
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/mte-def.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -92,7 +92,7 @@ extern bool arm64_use_ng_mappings;
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_READONLY
|
||||
#define __P011 PAGE_READONLY
|
||||
#define __P100 PAGE_EXECONLY
|
||||
#define __P100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_READONLY_EXEC
|
||||
#define __P111 PAGE_READONLY_EXEC
|
||||
@ -101,7 +101,7 @@ extern bool arm64_use_ng_mappings;
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_EXECONLY
|
||||
#define __S100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
@ -1017,17 +1017,6 @@ static inline bool arch_wants_old_prefaulted_pte(void)
|
||||
}
|
||||
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
|
||||
|
||||
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_EPAN))
|
||||
return prot;
|
||||
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
|
||||
return prot;
|
||||
|
||||
return PAGE_READONLY_EXEC;
|
||||
}
|
||||
|
||||
static inline bool pud_sect_supported(void)
|
||||
{
|
||||
return PAGE_SIZE == SZ_4K;
|
||||
|
@ -5,7 +5,7 @@
|
||||
#ifndef __ASM_RWONCE_H
|
||||
#define __ASM_RWONCE_H
|
||||
|
||||
#ifdef CONFIG_LTO
|
||||
#if defined(CONFIG_LTO) && !defined(__ASSEMBLY__)
|
||||
|
||||
#include <linux/compiler_types.h>
|
||||
#include <asm/alternative-macros.h>
|
||||
@ -66,7 +66,7 @@
|
||||
})
|
||||
|
||||
#endif /* !BUILD_VDSO */
|
||||
#endif /* CONFIG_LTO */
|
||||
#endif /* CONFIG_LTO && !__ASSEMBLY__ */
|
||||
|
||||
#include <asm-generic/rwonce.h>
|
||||
|
||||
|
@ -23,4 +23,9 @@ extern char __mmuoff_data_start[], __mmuoff_data_end[];
|
||||
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
|
||||
extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
|
||||
|
||||
static inline size_t entry_tramp_text_size(void)
|
||||
{
|
||||
return __entry_tramp_text_end - __entry_tramp_text_start;
|
||||
}
|
||||
|
||||
#endif /* __ASM_SECTIONS_H */
|
||||
|
@ -93,5 +93,9 @@ void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
|
||||
|
||||
enum mitigation_state arm64_get_meltdown_state(void);
|
||||
|
||||
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
||||
u8 spectre_bhb_loop_affected(int scope);
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_SPECTRE_H */
|
||||
|
@ -773,6 +773,7 @@
|
||||
#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
|
||||
|
||||
/* id_aa64isar2 */
|
||||
#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
|
||||
#define ID_AA64ISAR2_RPRES_SHIFT 4
|
||||
#define ID_AA64ISAR2_WFXT_SHIFT 0
|
||||
|
||||
@ -904,6 +905,7 @@
|
||||
#endif
|
||||
|
||||
/* id_aa64mmfr1 */
|
||||
#define ID_AA64MMFR1_ECBHB_SHIFT 60
|
||||
#define ID_AA64MMFR1_AFP_SHIFT 44
|
||||
#define ID_AA64MMFR1_ETS_SHIFT 36
|
||||
#define ID_AA64MMFR1_TWED_SHIFT 32
|
||||
|
73
arch/arm64/include/asm/vectors.h
Normal file
73
arch/arm64/include/asm/vectors.h
Normal file
@ -0,0 +1,73 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2022 ARM Ltd.
|
||||
*/
|
||||
#ifndef __ASM_VECTORS_H
|
||||
#define __ASM_VECTORS_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
extern char vectors[];
|
||||
extern char tramp_vectors[];
|
||||
extern char __bp_harden_el1_vectors[];
|
||||
|
||||
/*
|
||||
* Note: the order of this enum corresponds to two arrays in entry.S:
|
||||
* tramp_vecs and __bp_harden_el1_vectors. By default the canonical
|
||||
* 'full fat' vectors are used directly.
|
||||
*/
|
||||
enum arm64_bp_harden_el1_vectors {
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
/*
|
||||
* Perform the BHB loop mitigation, before branching to the canonical
|
||||
* vectors.
|
||||
*/
|
||||
EL1_VECTOR_BHB_LOOP,
|
||||
|
||||
/*
|
||||
* Make the SMC call for firmware mitigation, before branching to the
|
||||
* canonical vectors.
|
||||
*/
|
||||
EL1_VECTOR_BHB_FW,
|
||||
|
||||
/*
|
||||
* Use the ClearBHB instruction, before branching to the canonical
|
||||
* vectors.
|
||||
*/
|
||||
EL1_VECTOR_BHB_CLEAR_INSN,
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
|
||||
/*
|
||||
* Remap the kernel before branching to the canonical vectors.
|
||||
*/
|
||||
EL1_VECTOR_KPTI,
|
||||
};
|
||||
|
||||
#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
#define EL1_VECTOR_BHB_LOOP -1
|
||||
#define EL1_VECTOR_BHB_FW -1
|
||||
#define EL1_VECTOR_BHB_CLEAR_INSN -1
|
||||
#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
|
||||
/* The vectors to use on return from EL0. e.g. to remap the kernel */
|
||||
DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
|
||||
|
||||
#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define TRAMP_VALIAS 0
|
||||
#endif
|
||||
|
||||
static inline const char *
|
||||
arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
|
||||
{
|
||||
if (arm64_kernel_unmapped_at_el0())
|
||||
return (char *)TRAMP_VALIAS + SZ_2K * slot;
|
||||
|
||||
WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
|
||||
|
||||
return __bp_harden_el1_vectors + SZ_2K * slot;
|
||||
}
|
||||
|
||||
#endif /* __ASM_VECTORS_H */
|
@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags {
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
|
||||
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
|
||||
|
||||
/* SVE registers */
|
||||
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
|
||||
|
@ -502,6 +502,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.matches = has_spectre_v4,
|
||||
.cpu_enable = spectre_v4_enable_mitigation,
|
||||
},
|
||||
{
|
||||
.desc = "Spectre-BHB",
|
||||
.capability = ARM64_SPECTRE_BHB,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = is_spectre_bhb_affected,
|
||||
.cpu_enable = spectre_bhb_enable_mitigation,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1418040
|
||||
{
|
||||
.desc = "ARM erratum 1418040",
|
||||
|
@ -73,6 +73,8 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
@ -85,6 +87,7 @@
|
||||
#include <asm/smp.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/vectors.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
|
||||
@ -110,6 +113,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
|
||||
bool arm64_use_ng_mappings = false;
|
||||
EXPORT_SYMBOL(arm64_use_ng_mappings);
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
|
||||
|
||||
/*
|
||||
* Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
|
||||
* support it?
|
||||
@ -226,6 +231,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
@ -1590,6 +1596,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (__this_cpu_read(this_cpu_vector) == vectors) {
|
||||
const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
|
||||
|
||||
__this_cpu_write(this_cpu_vector, v);
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't need to rewrite the page-tables if either we've done
|
||||
* it already or we have KASLR enabled and therefore have not
|
||||
|
@ -37,18 +37,21 @@
|
||||
|
||||
.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
|
||||
.align 7
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
.Lventry_start\@:
|
||||
.if \el == 0
|
||||
alternative_if ARM64_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* This must be the first instruction of the EL0 vector entries. It is
|
||||
* skipped by the trampoline vectors, to trigger the cleanup.
|
||||
*/
|
||||
b .Lskip_tramp_vectors_cleanup\@
|
||||
.if \regsize == 64
|
||||
mrs x30, tpidrro_el0
|
||||
msr tpidrro_el0, xzr
|
||||
.else
|
||||
mov x30, xzr
|
||||
.endif
|
||||
alternative_else_nop_endif
|
||||
.Lskip_tramp_vectors_cleanup\@:
|
||||
.endif
|
||||
#endif
|
||||
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
@ -95,11 +98,15 @@ alternative_else_nop_endif
|
||||
mrs x0, tpidrro_el0
|
||||
#endif
|
||||
b el\el\ht\()_\regsize\()_\label
|
||||
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
|
||||
.endm
|
||||
|
||||
.macro tramp_alias, dst, sym
|
||||
.macro tramp_alias, dst, sym, tmp
|
||||
mov_q \dst, TRAMP_VALIAS
|
||||
add \dst, \dst, #(\sym - .entry.tramp.text)
|
||||
adr_l \tmp, \sym
|
||||
add \dst, \dst, \tmp
|
||||
adr_l \tmp, .entry.tramp.text
|
||||
sub \dst, \dst, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
@ -116,7 +123,7 @@ alternative_cb_end
|
||||
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
|
||||
mov w1, #\state
|
||||
alternative_cb spectre_v4_patch_fw_mitigation_conduit
|
||||
alternative_cb smccc_patch_fw_mitigation_conduit
|
||||
nop // Patched to SMC/HVC #0
|
||||
alternative_cb_end
|
||||
.L__asm_ssbd_skip\@:
|
||||
@ -413,21 +420,26 @@ alternative_else_nop_endif
|
||||
ldp x24, x25, [sp, #16 * 12]
|
||||
ldp x26, x27, [sp, #16 * 13]
|
||||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #PT_REGS_SIZE // restore sp
|
||||
|
||||
.if \el == 0
|
||||
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #PT_REGS_SIZE // restore sp
|
||||
eret
|
||||
alternative_else_nop_endif
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
bne 4f
|
||||
msr far_el1, x30
|
||||
tramp_alias x30, tramp_exit_native
|
||||
msr far_el1, x29
|
||||
tramp_alias x30, tramp_exit_native, x29
|
||||
br x30
|
||||
4:
|
||||
tramp_alias x30, tramp_exit_compat
|
||||
tramp_alias x30, tramp_exit_compat, x29
|
||||
br x30
|
||||
#endif
|
||||
.else
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #PT_REGS_SIZE // restore sp
|
||||
|
||||
/* Ensure any device/NC reads complete */
|
||||
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
|
||||
|
||||
@ -594,12 +606,6 @@ SYM_CODE_END(ret_to_user)
|
||||
|
||||
.popsection // .entry.text
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* Exception vectors trampoline.
|
||||
*/
|
||||
.pushsection ".entry.tramp.text", "ax"
|
||||
|
||||
// Move from tramp_pg_dir to swapper_pg_dir
|
||||
.macro tramp_map_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
@ -633,12 +639,47 @@ alternative_else_nop_endif
|
||||
*/
|
||||
.endm
|
||||
|
||||
.macro tramp_ventry, regsize = 64
|
||||
.macro tramp_data_page dst
|
||||
adr_l \dst, .entry.tramp.text
|
||||
sub \dst, \dst, PAGE_SIZE
|
||||
.endm
|
||||
|
||||
.macro tramp_data_read_var dst, var
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
tramp_data_page \dst
|
||||
add \dst, \dst, #:lo12:__entry_tramp_data_\var
|
||||
ldr \dst, [\dst]
|
||||
#else
|
||||
ldr \dst, =\var
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define BHB_MITIGATION_NONE 0
|
||||
#define BHB_MITIGATION_LOOP 1
|
||||
#define BHB_MITIGATION_FW 2
|
||||
#define BHB_MITIGATION_INSN 3
|
||||
|
||||
.macro tramp_ventry, vector_start, regsize, kpti, bhb
|
||||
.align 7
|
||||
1:
|
||||
.if \regsize == 64
|
||||
msr tpidrro_el0, x30 // Restored in kernel_ventry
|
||||
.endif
|
||||
|
||||
.if \bhb == BHB_MITIGATION_LOOP
|
||||
/*
|
||||
* This sequence must appear before the first indirect branch. i.e. the
|
||||
* ret out of tramp_ventry. It appears here because x30 is free.
|
||||
*/
|
||||
__mitigate_spectre_bhb_loop x30
|
||||
.endif // \bhb == BHB_MITIGATION_LOOP
|
||||
|
||||
.if \bhb == BHB_MITIGATION_INSN
|
||||
clearbhb
|
||||
isb
|
||||
.endif // \bhb == BHB_MITIGATION_INSN
|
||||
|
||||
.if \kpti == 1
|
||||
/*
|
||||
* Defend against branch aliasing attacks by pushing a dummy
|
||||
* entry onto the return stack and using a RET instruction to
|
||||
@ -648,46 +689,75 @@ alternative_else_nop_endif
|
||||
b .
|
||||
2:
|
||||
tramp_map_kernel x30
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x30, tramp_vectors + PAGE_SIZE
|
||||
alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
ldr x30, [x30]
|
||||
#else
|
||||
ldr x30, =vectors
|
||||
#endif
|
||||
tramp_data_read_var x30, vectors
|
||||
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
|
||||
prfm plil1strm, [x30, #(1b - tramp_vectors)]
|
||||
prfm plil1strm, [x30, #(1b - \vector_start)]
|
||||
alternative_else_nop_endif
|
||||
|
||||
msr vbar_el1, x30
|
||||
add x30, x30, #(1b - tramp_vectors)
|
||||
isb
|
||||
.else
|
||||
ldr x30, =vectors
|
||||
.endif // \kpti == 1
|
||||
|
||||
.if \bhb == BHB_MITIGATION_FW
|
||||
/*
|
||||
* The firmware sequence must appear before the first indirect branch.
|
||||
* i.e. the ret out of tramp_ventry. But it also needs the stack to be
|
||||
* mapped to save/restore the registers the SMC clobbers.
|
||||
*/
|
||||
__mitigate_spectre_bhb_fw
|
||||
.endif // \bhb == BHB_MITIGATION_FW
|
||||
|
||||
add x30, x30, #(1b - \vector_start + 4)
|
||||
ret
|
||||
.org 1b + 128 // Did we overflow the ventry slot?
|
||||
.endm
|
||||
|
||||
.macro tramp_exit, regsize = 64
|
||||
adr x30, tramp_vectors
|
||||
tramp_data_read_var x30, this_cpu_vector
|
||||
get_this_cpu_offset x29
|
||||
ldr x30, [x30, x29]
|
||||
|
||||
msr vbar_el1, x30
|
||||
tramp_unmap_kernel x30
|
||||
ldr lr, [sp, #S_LR]
|
||||
tramp_unmap_kernel x29
|
||||
.if \regsize == 64
|
||||
mrs x30, far_el1
|
||||
mrs x29, far_el1
|
||||
.endif
|
||||
add sp, sp, #PT_REGS_SIZE // restore sp
|
||||
eret
|
||||
sb
|
||||
.endm
|
||||
|
||||
.align 11
|
||||
SYM_CODE_START_NOALIGN(tramp_vectors)
|
||||
.macro generate_tramp_vector, kpti, bhb
|
||||
.Lvector_start\@:
|
||||
.space 0x400
|
||||
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
|
||||
.endr
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
|
||||
.endr
|
||||
.endm
|
||||
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* Exception vectors trampoline.
|
||||
* The order must match __bp_harden_el1_vectors and the
|
||||
* arm64_bp_harden_el1_vectors enum.
|
||||
*/
|
||||
.pushsection ".entry.tramp.text", "ax"
|
||||
.align 11
|
||||
SYM_CODE_START_NOALIGN(tramp_vectors)
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
|
||||
SYM_CODE_END(tramp_vectors)
|
||||
|
||||
SYM_CODE_START(tramp_exit_native)
|
||||
@ -704,12 +774,56 @@ SYM_CODE_END(tramp_exit_compat)
|
||||
.pushsection ".rodata", "a"
|
||||
.align PAGE_SHIFT
|
||||
SYM_DATA_START(__entry_tramp_data_start)
|
||||
__entry_tramp_data_vectors:
|
||||
.quad vectors
|
||||
#ifdef CONFIG_ARM_SDE_INTERFACE
|
||||
__entry_tramp_data___sdei_asm_handler:
|
||||
.quad __sdei_asm_handler
|
||||
#endif /* CONFIG_ARM_SDE_INTERFACE */
|
||||
__entry_tramp_data_this_cpu_vector:
|
||||
.quad this_cpu_vector
|
||||
SYM_DATA_END(__entry_tramp_data_start)
|
||||
.popsection // .rodata
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
/*
|
||||
* Exception vectors for spectre mitigations on entry from EL1 when
|
||||
* kpti is not in use.
|
||||
*/
|
||||
.macro generate_el1_vector, bhb
|
||||
.Lvector_start\@:
|
||||
kernel_ventry 1, t, 64, sync // Synchronous EL1t
|
||||
kernel_ventry 1, t, 64, irq // IRQ EL1t
|
||||
kernel_ventry 1, t, 64, fiq // FIQ EL1h
|
||||
kernel_ventry 1, t, 64, error // Error EL1t
|
||||
|
||||
kernel_ventry 1, h, 64, sync // Synchronous EL1h
|
||||
kernel_ventry 1, h, 64, irq // IRQ EL1h
|
||||
kernel_ventry 1, h, 64, fiq // FIQ EL1h
|
||||
kernel_ventry 1, h, 64, error // Error EL1h
|
||||
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 64, 0, \bhb
|
||||
.endr
|
||||
.rept 4
|
||||
tramp_ventry .Lvector_start\@, 32, 0, \bhb
|
||||
.endr
|
||||
.endm
|
||||
|
||||
/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
|
||||
.pushsection ".entry.text", "ax"
|
||||
.align 11
|
||||
SYM_CODE_START(__bp_harden_el1_vectors)
|
||||
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
|
||||
generate_el1_vector bhb=BHB_MITIGATION_LOOP
|
||||
generate_el1_vector bhb=BHB_MITIGATION_FW
|
||||
generate_el1_vector bhb=BHB_MITIGATION_INSN
|
||||
#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
|
||||
SYM_CODE_END(__bp_harden_el1_vectors)
|
||||
.popsection
|
||||
|
||||
|
||||
/*
|
||||
* Register switch for AArch64. The callee-saved registers need to be saved
|
||||
* and restored. On entry:
|
||||
@ -835,14 +949,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
|
||||
* Remember whether to unmap the kernel on exit.
|
||||
*/
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x4, tramp_vectors + PAGE_SIZE
|
||||
add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
|
||||
ldr x4, [x4]
|
||||
#else
|
||||
ldr x4, =__sdei_asm_handler
|
||||
#endif
|
||||
tramp_data_read_var x4, __sdei_asm_handler
|
||||
br x4
|
||||
SYM_CODE_END(__sdei_asm_entry_trampoline)
|
||||
NOKPROBE(__sdei_asm_entry_trampoline)
|
||||
@ -865,13 +972,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline)
|
||||
NOKPROBE(__sdei_asm_exit_trampoline)
|
||||
.ltorg
|
||||
.popsection // .entry.tramp.text
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
.pushsection ".rodata", "a"
|
||||
SYM_DATA_START(__sdei_asm_trampoline_next_handler)
|
||||
.quad __sdei_asm_handler
|
||||
SYM_DATA_END(__sdei_asm_trampoline_next_handler)
|
||||
.popsection // .rodata
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
/*
|
||||
@ -981,7 +1081,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
|
||||
alternative_else_nop_endif
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
|
||||
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
|
||||
br x5
|
||||
#endif
|
||||
SYM_CODE_END(__sdei_asm_handler)
|
||||
|
@ -66,6 +66,10 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
|
||||
KVM_NVHE_ALIAS(kvm_update_va_mask);
|
||||
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
|
||||
KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
|
||||
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
|
||||
|
||||
/* Global kernel state accessed by nVHE hyp code. */
|
||||
KVM_NVHE_ALIAS(kvm_vgic_global_state);
|
||||
|
@ -18,15 +18,18 @@
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/spectre.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/vectors.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/*
|
||||
@ -96,14 +99,51 @@ static bool spectre_v2_mitigations_off(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
|
||||
{
|
||||
switch (bhb_state) {
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return "";
|
||||
default:
|
||||
case SPECTRE_VULNERABLE:
|
||||
return ", but not BHB";
|
||||
case SPECTRE_MITIGATED:
|
||||
return ", BHB";
|
||||
}
|
||||
}
|
||||
|
||||
static bool _unprivileged_ebpf_enabled(void)
|
||||
{
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
return !sysctl_unprivileged_bpf_disabled;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
|
||||
const char *bhb_str = get_bhb_affected_string(bhb_state);
|
||||
const char *v2_str = "Branch predictor hardening";
|
||||
|
||||
switch (spectre_v2_state) {
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return sprintf(buf, "Not affected\n");
|
||||
if (bhb_state == SPECTRE_UNAFFECTED)
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
/*
|
||||
* Platforms affected by Spectre-BHB can't report
|
||||
* "Not affected" for Spectre-v2.
|
||||
*/
|
||||
v2_str = "CSV2";
|
||||
fallthrough;
|
||||
case SPECTRE_MITIGATED:
|
||||
return sprintf(buf, "Mitigation: Branch predictor hardening\n");
|
||||
if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
|
||||
return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
|
||||
|
||||
return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
|
||||
case SPECTRE_VULNERABLE:
|
||||
fallthrough;
|
||||
default:
|
||||
@ -554,9 +594,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
|
||||
* Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
|
||||
* to call into firmware to adjust the mitigation state.
|
||||
*/
|
||||
void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
@ -770,3 +810,344 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Spectre BHB.
|
||||
*
|
||||
* A CPU is either:
|
||||
* - Mitigated by a branchy loop a CPU specific number of times, and listed
|
||||
* in our "loop mitigated list".
|
||||
* - Mitigated in software by the firmware Spectre v2 call.
|
||||
* - Has the ClearBHB instruction to perform the mitigation.
|
||||
* - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
|
||||
* software mitigation in the vectors is needed.
|
||||
* - Has CSV2.3, so is unaffected.
|
||||
*/
|
||||
static enum mitigation_state spectre_bhb_state;
|
||||
|
||||
enum mitigation_state arm64_get_spectre_bhb_state(void)
|
||||
{
|
||||
return spectre_bhb_state;
|
||||
}
|
||||
|
||||
enum bhb_mitigation_bits {
|
||||
BHB_LOOP,
|
||||
BHB_FW,
|
||||
BHB_HW,
|
||||
BHB_INSN,
|
||||
};
|
||||
static unsigned long system_bhb_mitigations;
|
||||
|
||||
/*
|
||||
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
|
||||
* SCOPE_SYSTEM call will give the right answer.
|
||||
*/
|
||||
u8 spectre_bhb_loop_affected(int scope)
|
||||
{
|
||||
u8 k = 0;
|
||||
static u8 max_bhb_k;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU) {
|
||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k24_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k8_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
{},
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||
k = 32;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||
k = 24;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||
k = 8;
|
||||
|
||||
max_bhb_k = max(max_bhb_k, k);
|
||||
} else {
|
||||
k = max_bhb_k;
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
|
||||
{
|
||||
int ret;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3, &res);
|
||||
|
||||
ret = res.a0;
|
||||
switch (ret) {
|
||||
case SMCCC_RET_SUCCESS:
|
||||
return SPECTRE_MITIGATED;
|
||||
case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
|
||||
return SPECTRE_UNAFFECTED;
|
||||
default:
|
||||
fallthrough;
|
||||
case SMCCC_RET_NOT_SUPPORTED:
|
||||
return SPECTRE_VULNERABLE;
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_spectre_bhb_fw_affected(int scope)
|
||||
{
|
||||
static bool system_affected;
|
||||
enum mitigation_state fw_state;
|
||||
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
|
||||
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
{},
|
||||
};
|
||||
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
|
||||
spectre_bhb_firmware_mitigated_list);
|
||||
|
||||
if (scope != SCOPE_LOCAL_CPU)
|
||||
return system_affected;
|
||||
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
|
||||
system_affected = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool supports_ecbhb(int scope)
|
||||
{
|
||||
u64 mmfr1;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU)
|
||||
mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
|
||||
else
|
||||
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
|
||||
return cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_ECBHB_SHIFT);
|
||||
}
|
||||
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
if (supports_csv2p3(scope))
|
||||
return false;
|
||||
|
||||
if (supports_clearbhb(scope))
|
||||
return true;
|
||||
|
||||
if (spectre_bhb_loop_affected(scope))
|
||||
return true;
|
||||
|
||||
if (is_spectre_bhb_fw_affected(scope))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||
{
|
||||
const char *v = arm64_get_bp_hardening_vector(slot);
|
||||
|
||||
if (slot < 0)
|
||||
return;
|
||||
|
||||
__this_cpu_write(this_cpu_vector, v);
|
||||
|
||||
/*
|
||||
* When KPTI is in use, the vectors are switched when exiting to
|
||||
* user-space.
|
||||
*/
|
||||
if (arm64_kernel_unmapped_at_el0())
|
||||
return;
|
||||
|
||||
write_sysreg(v, vbar_el1);
|
||||
isb();
|
||||
}
|
||||
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
bp_hardening_cb_t cpu_cb;
|
||||
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
|
||||
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
|
||||
|
||||
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
|
||||
return;
|
||||
|
||||
if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
|
||||
/* No point mitigating Spectre-BHB alone. */
|
||||
} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
|
||||
pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
|
||||
} else if (cpu_mitigations_off()) {
|
||||
pr_info_once("spectre-bhb mitigation disabled by command line option\n");
|
||||
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_HW, &system_bhb_mitigations);
|
||||
} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
|
||||
/*
|
||||
* Ensure KVM uses the indirect vector which will have ClearBHB
|
||||
* added.
|
||||
*/
|
||||
if (!data->slot)
|
||||
data->slot = HYP_VECTOR_INDIRECT;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_INSN, &system_bhb_mitigations);
|
||||
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
|
||||
/*
|
||||
* Ensure KVM uses the indirect vector which will have the
|
||||
* branchy-loop added. A57/A72-r0 will already have selected
|
||||
* the spectre-indirect vector, which is sufficient for BHB
|
||||
* too.
|
||||
*/
|
||||
if (!data->slot)
|
||||
data->slot = HYP_VECTOR_INDIRECT;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_LOOP, &system_bhb_mitigations);
|
||||
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (fw_state == SPECTRE_MITIGATED) {
|
||||
/*
|
||||
* Ensure KVM uses one of the spectre bp_hardening
|
||||
* vectors. The indirect vector doesn't include the EL3
|
||||
* call, so needs upgrading to
|
||||
* HYP_VECTOR_SPECTRE_INDIRECT.
|
||||
*/
|
||||
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
||||
data->slot += 1;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
||||
|
||||
/*
|
||||
* The WA3 call in the vectors supersedes the WA1 call
|
||||
* made during context-switch. Uninstall any firmware
|
||||
* bp_hardening callback.
|
||||
*/
|
||||
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
||||
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
||||
__this_cpu_write(bp_hardening_data.fn, NULL);
|
||||
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
}
|
||||
|
||||
update_mitigation_state(&spectre_bhb_state, state);
|
||||
}
|
||||
|
||||
/* Patched to NOP when enabled */
|
||||
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 1);
|
||||
|
||||
if (test_bit(BHB_LOOP, &system_bhb_mitigations))
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
/* Patched to NOP when enabled */
|
||||
void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
__le32 *updptr, int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 1);
|
||||
|
||||
if (test_bit(BHB_FW, &system_bhb_mitigations))
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
/* Patched to correct the immediate */
|
||||
void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
u8 rd;
|
||||
u32 insn;
|
||||
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
|
||||
|
||||
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
|
||||
return;
|
||||
|
||||
insn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
||||
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_ZERO);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
/* Patched to mov WA3 when supported */
|
||||
void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
u8 rd;
|
||||
u32 insn;
|
||||
|
||||
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
|
||||
!test_bit(BHB_FW, &system_bhb_mitigations))
|
||||
return;
|
||||
|
||||
insn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
||||
|
||||
insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
|
||||
AARCH64_INSN_VARIANT_32BIT,
|
||||
AARCH64_INSN_REG_ZR, rd,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3);
|
||||
if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
|
||||
return;
|
||||
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
/* Patched to NOP when not supported */
|
||||
void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 2);
|
||||
|
||||
if (test_bit(BHB_INSN, &system_bhb_mitigations))
|
||||
return;
|
||||
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
|
||||
void unpriv_ebpf_notify(int new_state)
|
||||
{
|
||||
if (spectre_v2_state == SPECTRE_VULNERABLE ||
|
||||
spectre_bhb_state != SPECTRE_MITIGATED)
|
||||
return;
|
||||
|
||||
if (!new_state)
|
||||
pr_err("WARNING: %s", EBPF_WARN);
|
||||
}
|
||||
#endif
|
||||
|
@ -341,7 +341,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
|
||||
<= SZ_4K, "Hibernate exit text too big or misaligned")
|
||||
#endif
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
|
||||
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
|
||||
"Entry trampoline text too big")
|
||||
#endif
|
||||
#ifdef CONFIG_KVM
|
||||
|
@ -1491,10 +1491,7 @@ static int kvm_init_vector_slots(void)
|
||||
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
|
||||
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
|
||||
return 0;
|
||||
|
||||
if (!has_vhe()) {
|
||||
if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
|
||||
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
|
||||
__BP_HARDEN_HYP_VECS_SZ, &base);
|
||||
if (err)
|
||||
|
@ -62,6 +62,10 @@ el1_sync: // Guest trapped into EL2
|
||||
/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
|
||||
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2)
|
||||
cbz w1, wa_epilogue
|
||||
|
||||
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3)
|
||||
cbnz w1, el1_trap
|
||||
|
||||
wa_epilogue:
|
||||
@ -192,7 +196,10 @@ SYM_CODE_END(__kvm_hyp_vector)
|
||||
sub sp, sp, #(8 * 4)
|
||||
stp x2, x3, [sp, #(8 * 0)]
|
||||
stp x0, x1, [sp, #(8 * 2)]
|
||||
alternative_cb spectre_bhb_patch_wa3
|
||||
/* Patched to mov WA3 when supported */
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
alternative_cb_end
|
||||
smc #0
|
||||
ldp x2, x3, [sp, #(8 * 0)]
|
||||
add sp, sp, #(8 * 2)
|
||||
@ -205,6 +212,8 @@ SYM_CODE_END(__kvm_hyp_vector)
|
||||
spectrev2_smccc_wa1_smc
|
||||
.else
|
||||
stp x0, x1, [sp, #-16]!
|
||||
mitigate_spectre_bhb_loop x0
|
||||
mitigate_spectre_bhb_clear_insn
|
||||
.endif
|
||||
.if \indirect != 0
|
||||
alternative_cb kvm_patch_vector_branch
|
||||
|
@ -148,8 +148,10 @@ int hyp_map_vectors(void)
|
||||
phys_addr_t phys;
|
||||
void *bp_base;
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
|
||||
if (!kvm_system_needs_idmapped_vectors()) {
|
||||
__hyp_bp_vect_base = __bp_harden_hyp_vecs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
phys = __hyp_pa(__bp_harden_hyp_vecs);
|
||||
bp_base = (void *)__pkvm_create_private_mapping(phys,
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <uapi/linux/psci.h>
|
||||
|
||||
#include <kvm/arm_psci.h>
|
||||
@ -24,6 +25,8 @@
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/vectors.h>
|
||||
|
||||
/* VHE specific context */
|
||||
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
|
||||
@ -67,7 +70,7 @@ NOKPROBE_SYMBOL(__activate_traps);
|
||||
|
||||
static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
extern char vectors[]; /* kernel exception vectors */
|
||||
const char *host_vectors = vectors;
|
||||
|
||||
___deactivate_traps(vcpu);
|
||||
|
||||
@ -81,7 +84,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
|
||||
|
||||
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
|
||||
write_sysreg(vectors, vbar_el1);
|
||||
|
||||
if (!arm64_kernel_unmapped_at_el0())
|
||||
host_vectors = __this_cpu_read(this_cpu_vector);
|
||||
write_sysreg(host_vectors, vbar_el1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(__deactivate_traps);
|
||||
|
||||
|
@ -107,6 +107,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
switch (arm64_get_spectre_bhb_state()) {
|
||||
case SPECTRE_VULNERABLE:
|
||||
break;
|
||||
case SPECTRE_MITIGATED:
|
||||
val[0] = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case SPECTRE_UNAFFECTED:
|
||||
val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
||||
val[0] = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
|
@ -405,7 +405,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 3; /* PSCI version and two workaround registers */
|
||||
return 4; /* PSCI version and three workaround registers */
|
||||
}
|
||||
|
||||
int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
@ -419,6 +419,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
|
||||
return -EFAULT;
|
||||
|
||||
if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -458,6 +461,17 @@ static int get_kernel_wa_level(u64 regid)
|
||||
case SPECTRE_VULNERABLE:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
switch (arm64_get_spectre_bhb_state()) {
|
||||
case SPECTRE_VULNERABLE:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
|
||||
case SPECTRE_MITIGATED:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
|
||||
case SPECTRE_UNAFFECTED:
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
|
||||
}
|
||||
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
@ -474,6 +488,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
break;
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
|
||||
break;
|
||||
default:
|
||||
@ -519,6 +534,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
}
|
||||
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
|
||||
if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -7,8 +7,10 @@
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
@ -38,3 +40,18 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
|
||||
{
|
||||
return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
|
||||
}
|
||||
|
||||
static int __init adjust_protection_map(void)
|
||||
{
|
||||
/*
|
||||
* With Enhanced PAN we can honour the execute-only permissions as
|
||||
* there is no PAN override with such mappings.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_HAS_EPAN)) {
|
||||
protection_map[VM_EXEC] = PAGE_EXECONLY;
|
||||
protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(adjust_protection_map);
|
||||
|
@ -617,6 +617,8 @@ early_param("rodata", parse_rodata);
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __init map_entry_trampoline(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
|
||||
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
|
||||
|
||||
@ -625,11 +627,15 @@ static int __init map_entry_trampoline(void)
|
||||
|
||||
/* Map only the text into the trampoline page table */
|
||||
memset(tramp_pg_dir, 0, PGD_SIZE);
|
||||
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
|
||||
prot, __pgd_pgtable_alloc, 0);
|
||||
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
|
||||
entry_tramp_text_size(), prot,
|
||||
__pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
|
||||
|
||||
/* Map both the text and data into the kernel page table */
|
||||
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
|
||||
for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
|
||||
__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
|
||||
pa_start + i * PAGE_SIZE, prot);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern char __entry_tramp_data_start[];
|
||||
|
||||
|
@ -44,6 +44,7 @@ MTE_ASYMM
|
||||
SPECTRE_V2
|
||||
SPECTRE_V3A
|
||||
SPECTRE_V4
|
||||
SPECTRE_BHB
|
||||
SSBS
|
||||
SVE
|
||||
UNMAP_KERNEL_AT_EL0
|
||||
|
@ -202,7 +202,6 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
|
||||
/*
|
||||
* The current system page and segment sizes
|
||||
*/
|
||||
extern int mmu_linear_psize;
|
||||
extern int mmu_virtual_psize;
|
||||
extern int mmu_vmalloc_psize;
|
||||
extern int mmu_io_psize;
|
||||
@ -213,6 +212,7 @@ extern int mmu_io_psize;
|
||||
#define mmu_virtual_psize MMU_PAGE_4K
|
||||
#endif
|
||||
#endif
|
||||
extern int mmu_linear_psize;
|
||||
extern int mmu_vmemmap_psize;
|
||||
|
||||
/* MMU initialization */
|
||||
|
@ -9,7 +9,7 @@ struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges);
|
||||
int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size);
|
||||
int add_tce_mem_ranges(struct crash_mem **mem_ranges);
|
||||
int add_initrd_mem_range(struct crash_mem **mem_ranges);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_PPC_64S_HASH_MMU
|
||||
int add_htab_mem_range(struct crash_mem **mem_ranges);
|
||||
#else
|
||||
static inline int add_htab_mem_range(struct crash_mem **mem_ranges)
|
||||
|
@ -113,7 +113,8 @@
|
||||
compatible = "canaan,k210-plic", "sifive,plic-1.0.0";
|
||||
reg = <0xC000000 0x4000000>;
|
||||
interrupt-controller;
|
||||
interrupts-extended = <&cpu0_intc 11>, <&cpu1_intc 11>;
|
||||
interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>,
|
||||
<&cpu1_intc 11>, <&cpu1_intc 9>;
|
||||
riscv,ndev = <65>;
|
||||
};
|
||||
|
||||
|
@ -119,7 +119,7 @@ extern phys_addr_t phys_ram_base;
|
||||
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
|
||||
|
||||
#define is_linear_mapping(x) \
|
||||
((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
|
||||
((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
|
||||
|
||||
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
|
||||
#define kernel_mapping_pa_to_va(y) ({ \
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
#define KERNEL_LINK_ADDR PAGE_OFFSET
|
||||
#define KERN_VIRT_SIZE (UL(-1))
|
||||
#else
|
||||
|
||||
#define ADDRESS_SPACE_END (UL(-1))
|
||||
|
@ -24,6 +24,9 @@ obj-$(CONFIG_KASAN) += kasan_init.o
|
||||
ifdef CONFIG_KASAN
|
||||
KASAN_SANITIZE_kasan_init.o := n
|
||||
KASAN_SANITIZE_init.o := n
|
||||
ifdef CONFIG_DEBUG_VIRTUAL
|
||||
KASAN_SANITIZE_physaddr.o := n
|
||||
endif
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
|
||||
|
@ -125,7 +125,6 @@ void __init mem_init(void)
|
||||
else
|
||||
swiotlb_force = SWIOTLB_NO_FORCE;
|
||||
#endif
|
||||
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
|
||||
memblock_free_all();
|
||||
|
||||
print_vm_layout();
|
||||
@ -195,6 +194,7 @@ static void __init setup_bootmem(void)
|
||||
|
||||
min_low_pfn = PFN_UP(phys_ram_base);
|
||||
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
|
||||
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
|
||||
|
||||
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
|
||||
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
|
||||
|
@ -113,8 +113,11 @@ static void __init kasan_populate_pud(pgd_t *pgd,
|
||||
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
|
||||
} else {
|
||||
base_pud = (pud_t *)pgd_page_vaddr(*pgd);
|
||||
if (base_pud == lm_alias(kasan_early_shadow_pud))
|
||||
if (base_pud == lm_alias(kasan_early_shadow_pud)) {
|
||||
base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
|
||||
memcpy(base_pud, (void *)kasan_early_shadow_pud,
|
||||
sizeof(pud_t) * PTRS_PER_PUD);
|
||||
}
|
||||
}
|
||||
|
||||
pudp = base_pud + pud_index(vaddr);
|
||||
@ -202,8 +205,7 @@ asmlinkage void __init kasan_early_init(void)
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||
set_pte(kasan_early_shadow_pte + i,
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
PAGE_KERNEL));
|
||||
pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; ++i)
|
||||
set_pmd(kasan_early_shadow_pmd + i,
|
||||
|
@ -8,12 +8,10 @@
|
||||
|
||||
phys_addr_t __virt_to_phys(unsigned long x)
|
||||
{
|
||||
phys_addr_t y = x - PAGE_OFFSET;
|
||||
|
||||
/*
|
||||
* Boundary checking aginst the kernel linear mapping space.
|
||||
*/
|
||||
WARN(y >= KERN_VIRT_SIZE,
|
||||
WARN(!is_linear_mapping(x) && !is_kernel_mapping(x),
|
||||
"virt_to_phys used for non-linear address: %pK (%pS)\n",
|
||||
(void *)x, (void *)x);
|
||||
|
||||
|
@ -69,8 +69,13 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
|
||||
{
|
||||
a->fixup = b->fixup + delta;
|
||||
b->fixup = tmp.fixup - delta;
|
||||
a->handler = b->handler + delta;
|
||||
b->handler = tmp.handler - delta;
|
||||
a->handler = b->handler;
|
||||
if (a->handler)
|
||||
a->handler += delta;
|
||||
b->handler = tmp.handler;
|
||||
if (b->handler)
|
||||
b->handler -= delta;
|
||||
}
|
||||
#define swap_ex_entry_fixup swap_ex_entry_fixup
|
||||
|
||||
#endif
|
||||
|
@ -47,15 +47,17 @@ struct ftrace_regs {
|
||||
|
||||
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
|
||||
{
|
||||
return &fregs->regs;
|
||||
struct pt_regs *regs = &fregs->regs;
|
||||
|
||||
if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS))
|
||||
return regs;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct pt_regs *regs = arch_ftrace_get_regs(fregs);
|
||||
|
||||
regs->psw.addr = ip;
|
||||
fregs->regs.psw.addr = ip;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -15,11 +15,13 @@
|
||||
#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */
|
||||
#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
|
||||
#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
|
||||
#define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */
|
||||
|
||||
#define _PIF_SYSCALL BIT(PIF_SYSCALL)
|
||||
#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART)
|
||||
#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
|
||||
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
|
||||
#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -159,9 +159,38 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
|
||||
{
|
||||
struct ftrace_hotpatch_trampoline *trampoline;
|
||||
struct ftrace_insn insn;
|
||||
s64 disp;
|
||||
u16 opc;
|
||||
|
||||
if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
disp = (s64)insn.disp * 2;
|
||||
trampoline = (void *)(rec->ip + disp);
|
||||
if (get_kernel_nofault(opc, &trampoline->brasl_opc))
|
||||
return ERR_PTR(-EFAULT);
|
||||
if (opc != 0xc015)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return trampoline;
|
||||
}
|
||||
|
||||
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct ftrace_hotpatch_trampoline *trampoline;
|
||||
u64 old;
|
||||
|
||||
trampoline = ftrace_get_trampoline(rec);
|
||||
if (IS_ERR(trampoline))
|
||||
return PTR_ERR(trampoline);
|
||||
if (get_kernel_nofault(old, &trampoline->interceptor))
|
||||
return -EFAULT;
|
||||
if (old != old_addr)
|
||||
return -EINVAL;
|
||||
s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -188,6 +217,12 @@ static void brcl_enable(void *brcl)
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
struct ftrace_hotpatch_trampoline *trampoline;
|
||||
|
||||
trampoline = ftrace_get_trampoline(rec);
|
||||
if (IS_ERR(trampoline))
|
||||
return PTR_ERR(trampoline);
|
||||
s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
|
||||
brcl_enable((void *)rec->ip);
|
||||
return 0;
|
||||
}
|
||||
@ -291,7 +326,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
|
||||
regs = ftrace_get_regs(fregs);
|
||||
p = get_kprobe((kprobe_opcode_t *)ip);
|
||||
if (unlikely(!p) || kprobe_disabled(p))
|
||||
if (!regs || unlikely(!p) || kprobe_disabled(p))
|
||||
goto out;
|
||||
|
||||
if (kprobe_running()) {
|
||||
|
@ -27,6 +27,7 @@ ENDPROC(ftrace_stub)
|
||||
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
|
||||
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
||||
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
|
||||
#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
|
||||
#ifdef __PACK_STACK
|
||||
/* allocate just enough for r14, r15 and backchain */
|
||||
#define TRACED_FUNC_FRAME_SIZE 24
|
||||
@ -57,6 +58,14 @@ ENDPROC(ftrace_stub)
|
||||
.if \allregs == 1
|
||||
stg %r14,(STACK_PTREGS_PSW)(%r15)
|
||||
stosm (STACK_PTREGS_PSW)(%r15),0
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
|
||||
#else
|
||||
lghi %r14,_PIF_FTRACE_FULL_REGS
|
||||
stg %r14,STACK_PTREGS_FLAGS(%r15)
|
||||
#endif
|
||||
.else
|
||||
xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
|
||||
.endif
|
||||
|
||||
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
|
||||
|
@ -800,6 +800,8 @@ static void __init check_initrd(void)
|
||||
static void __init reserve_kernel(void)
|
||||
{
|
||||
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
|
||||
memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
|
||||
memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
|
||||
memblock_reserve(__amode31_base, __eamode31 - __samode31);
|
||||
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
|
||||
memblock_reserve(__pa(_stext), _end - _stext);
|
||||
|
@ -204,7 +204,7 @@
|
||||
/* FREE! ( 7*32+10) */
|
||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
|
@ -84,7 +84,7 @@
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||
__stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
|
||||
#else
|
||||
jmp *%\reg
|
||||
#endif
|
||||
@ -94,7 +94,7 @@
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
|
||||
__stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
|
||||
#else
|
||||
call *%\reg
|
||||
#endif
|
||||
@ -146,7 +146,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
"lfence;\n" \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE_AMD)
|
||||
X86_FEATURE_RETPOLINE_LFENCE)
|
||||
|
||||
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
||||
|
||||
@ -176,7 +176,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
"lfence;\n" \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE_AMD)
|
||||
X86_FEATURE_RETPOLINE_LFENCE)
|
||||
|
||||
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
||||
#endif
|
||||
@ -188,9 +188,11 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
/* The Spectre V2 mitigation variants */
|
||||
enum spectre_v2_mitigation {
|
||||
SPECTRE_V2_NONE,
|
||||
SPECTRE_V2_RETPOLINE_GENERIC,
|
||||
SPECTRE_V2_RETPOLINE_AMD,
|
||||
SPECTRE_V2_IBRS_ENHANCED,
|
||||
SPECTRE_V2_RETPOLINE,
|
||||
SPECTRE_V2_LFENCE,
|
||||
SPECTRE_V2_EIBRS,
|
||||
SPECTRE_V2_EIBRS_RETPOLINE,
|
||||
SPECTRE_V2_EIBRS_LFENCE,
|
||||
};
|
||||
|
||||
/* The indirect branch speculation control variants */
|
||||
|
@ -389,7 +389,7 @@ static int emit_indirect(int op, int reg, u8 *bytes)
|
||||
*
|
||||
* CALL *%\reg
|
||||
*
|
||||
* It also tries to inline spectre_v2=retpoline,amd when size permits.
|
||||
* It also tries to inline spectre_v2=retpoline,lfence when size permits.
|
||||
*/
|
||||
static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
|
||||
{
|
||||
@ -407,7 +407,7 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
|
||||
BUG_ON(reg == 4);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
|
||||
!cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD))
|
||||
!cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE))
|
||||
return -1;
|
||||
|
||||
op = insn->opcode.bytes[0];
|
||||
@ -438,9 +438,9 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
|
||||
}
|
||||
|
||||
/*
|
||||
* For RETPOLINE_AMD: prepend the indirect CALL/JMP with an LFENCE.
|
||||
* For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||
bytes[i++] = 0x0f;
|
||||
bytes[i++] = 0xae;
|
||||
bytes[i++] = 0xe8; /* LFENCE */
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched/smt.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/cmdline.h>
|
||||
@ -650,6 +651,32 @@ static inline const char *spectre_v2_module_string(void)
|
||||
static inline const char *spectre_v2_module_string(void) { return ""; }
|
||||
#endif
|
||||
|
||||
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
|
||||
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
|
||||
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
void unpriv_ebpf_notify(int new_state)
|
||||
{
|
||||
if (new_state)
|
||||
return;
|
||||
|
||||
/* Unprivileged eBPF is enabled */
|
||||
|
||||
switch (spectre_v2_enabled) {
|
||||
case SPECTRE_V2_EIBRS:
|
||||
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
||||
break;
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
if (sched_smt_active())
|
||||
pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool match_option(const char *arg, int arglen, const char *opt)
|
||||
{
|
||||
int len = strlen(opt);
|
||||
@ -664,7 +691,10 @@ enum spectre_v2_mitigation_cmd {
|
||||
SPECTRE_V2_CMD_FORCE,
|
||||
SPECTRE_V2_CMD_RETPOLINE,
|
||||
SPECTRE_V2_CMD_RETPOLINE_GENERIC,
|
||||
SPECTRE_V2_CMD_RETPOLINE_AMD,
|
||||
SPECTRE_V2_CMD_RETPOLINE_LFENCE,
|
||||
SPECTRE_V2_CMD_EIBRS,
|
||||
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
|
||||
SPECTRE_V2_CMD_EIBRS_LFENCE,
|
||||
};
|
||||
|
||||
enum spectre_v2_user_cmd {
|
||||
@ -737,6 +767,13 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
}
|
||||
|
||||
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
|
||||
{
|
||||
return (mode == SPECTRE_V2_EIBRS ||
|
||||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
||||
mode == SPECTRE_V2_EIBRS_LFENCE);
|
||||
}
|
||||
|
||||
static void __init
|
||||
spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
{
|
||||
@ -804,7 +841,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
|
||||
!smt_possible ||
|
||||
spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -824,9 +861,11 @@ set_mode:
|
||||
|
||||
static const char * const spectre_v2_strings[] = {
|
||||
[SPECTRE_V2_NONE] = "Vulnerable",
|
||||
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
|
||||
[SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
|
||||
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
|
||||
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
|
||||
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
|
||||
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
|
||||
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
@ -837,8 +876,12 @@ static const struct {
|
||||
{ "off", SPECTRE_V2_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_CMD_FORCE, true },
|
||||
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
|
||||
{ "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
|
||||
{ "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
|
||||
{ "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
|
||||
{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
|
||||
{ "eibrs", SPECTRE_V2_CMD_EIBRS, false },
|
||||
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
|
||||
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
|
||||
{ "auto", SPECTRE_V2_CMD_AUTO, false },
|
||||
};
|
||||
|
||||
@ -875,10 +918,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
}
|
||||
|
||||
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
|
||||
!IS_ENABLED(CONFIG_RETPOLINE)) {
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if ((cmd == SPECTRE_V2_CMD_EIBRS ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
|
||||
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
|
||||
pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
|
||||
!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
|
||||
pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
@ -887,6 +950,16 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_RETPOLINE)) {
|
||||
pr_err("Kernel not compiled with retpoline; no mitigation available!");
|
||||
return SPECTRE_V2_NONE;
|
||||
}
|
||||
|
||||
return SPECTRE_V2_RETPOLINE;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
@ -907,49 +980,64 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
case SPECTRE_V2_CMD_FORCE:
|
||||
case SPECTRE_V2_CMD_AUTO:
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
|
||||
mode = SPECTRE_V2_IBRS_ENHANCED;
|
||||
/* Force it so VMEXIT will restore correctly */
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
goto specv2_set_mode;
|
||||
mode = SPECTRE_V2_EIBRS;
|
||||
break;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE))
|
||||
goto retpoline_auto;
|
||||
|
||||
mode = spectre_v2_select_retpoline();
|
||||
break;
|
||||
case SPECTRE_V2_CMD_RETPOLINE_AMD:
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE))
|
||||
goto retpoline_amd;
|
||||
|
||||
case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
|
||||
pr_err(SPECTRE_V2_LFENCE_MSG);
|
||||
mode = SPECTRE_V2_LFENCE;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE))
|
||||
goto retpoline_generic;
|
||||
mode = SPECTRE_V2_RETPOLINE;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_CMD_RETPOLINE:
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE))
|
||||
goto retpoline_auto;
|
||||
mode = spectre_v2_select_retpoline();
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_CMD_EIBRS:
|
||||
mode = SPECTRE_V2_EIBRS;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_CMD_EIBRS_LFENCE:
|
||||
mode = SPECTRE_V2_EIBRS_LFENCE;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
|
||||
mode = SPECTRE_V2_EIBRS_RETPOLINE;
|
||||
break;
|
||||
}
|
||||
pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
|
||||
return;
|
||||
|
||||
retpoline_auto:
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
retpoline_amd:
|
||||
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
|
||||
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
|
||||
goto retpoline_generic;
|
||||
}
|
||||
mode = SPECTRE_V2_RETPOLINE_AMD;
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
|
||||
} else {
|
||||
retpoline_generic:
|
||||
mode = SPECTRE_V2_RETPOLINE_GENERIC;
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
|
||||
if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
|
||||
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
||||
|
||||
if (spectre_v2_in_eibrs_mode(mode)) {
|
||||
/* Force it so VMEXIT will restore correctly */
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
case SPECTRE_V2_NONE:
|
||||
case SPECTRE_V2_EIBRS:
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_LFENCE:
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
|
||||
fallthrough;
|
||||
|
||||
case SPECTRE_V2_RETPOLINE:
|
||||
case SPECTRE_V2_EIBRS_RETPOLINE:
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
|
||||
break;
|
||||
}
|
||||
|
||||
specv2_set_mode:
|
||||
spectre_v2_enabled = mode;
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
@ -975,7 +1063,7 @@ specv2_set_mode:
|
||||
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
|
||||
* enable IBRS around firmware calls.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
||||
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
||||
}
|
||||
@ -1045,6 +1133,10 @@ void cpu_bugs_smt_update(void)
|
||||
{
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
|
||||
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
||||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
break;
|
||||
@ -1684,7 +1776,7 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||
|
||||
static char *stibp_state(void)
|
||||
{
|
||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
return "";
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
@ -1714,6 +1806,27 @@ static char *ibpb_state(void)
|
||||
return "";
|
||||
}
|
||||
|
||||
static ssize_t spectre_v2_show_state(char *buf)
|
||||
{
|
||||
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
|
||||
return sprintf(buf, "Vulnerable: LFENCE\n");
|
||||
|
||||
if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
|
||||
return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
|
||||
|
||||
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
||||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
|
||||
|
||||
return sprintf(buf, "%s%s%s%s%s%s\n",
|
||||
spectre_v2_strings[spectre_v2_enabled],
|
||||
ibpb_state(),
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
stibp_state(),
|
||||
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
|
||||
spectre_v2_module_string());
|
||||
}
|
||||
|
||||
static ssize_t srbds_show_state(char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
||||
@ -1739,12 +1852,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
||||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
ibpb_state(),
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
stibp_state(),
|
||||
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
|
||||
spectre_v2_module_string());
|
||||
return spectre_v2_show_state(buf);
|
||||
|
||||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
@ -463,6 +463,7 @@ static bool pv_tlb_flush_supported(void)
|
||||
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
|
||||
!boot_cpu_has(X86_FEATURE_MWAIT) &&
|
||||
(num_possible_cpus() != 1));
|
||||
}
|
||||
|
||||
@ -477,6 +478,7 @@ static bool pv_sched_yield_supported(void)
|
||||
return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
|
||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
|
||||
!boot_cpu_has(X86_FEATURE_MWAIT) &&
|
||||
(num_possible_cpus() != 1));
|
||||
}
|
||||
|
||||
@ -622,7 +624,7 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
|
||||
|
||||
/* Make sure other vCPUs get a chance to run if they need to. */
|
||||
for_each_cpu(cpu, mask) {
|
||||
if (vcpu_is_preempted(cpu)) {
|
||||
if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
|
||||
kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
|
||||
break;
|
||||
}
|
||||
|
@ -239,6 +239,9 @@ static void __init kvmclock_init_mem(void)
|
||||
|
||||
static int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
{
|
||||
if (!kvm_para_available() || !kvmclock)
|
||||
return 0;
|
||||
|
||||
kvmclock_init_mem();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -3565,7 +3565,7 @@ set_root_pgd:
|
||||
out_unlock:
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
|
||||
|
@ -9180,6 +9180,7 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
|
||||
likely(!pic_in_kernel(vcpu->kvm));
|
||||
}
|
||||
|
||||
/* Called within kvm->srcu read side. */
|
||||
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
@ -9188,16 +9189,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
||||
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
||||
|
||||
/*
|
||||
* The call to kvm_ready_for_interrupt_injection() may end up in
|
||||
* kvm_xen_has_interrupt() which may require the srcu lock to be
|
||||
* held, to protect against changes in the vcpu_info address.
|
||||
*/
|
||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
pic_in_kernel(vcpu->kvm) ||
|
||||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
if (is_smm(vcpu))
|
||||
kvm_run->flags |= KVM_RUN_X86_SMM;
|
||||
@ -9815,6 +9809,7 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
|
||||
EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
|
||||
|
||||
/*
|
||||
* Called within kvm->srcu read side.
|
||||
* Returns 1 to let vcpu_run() continue the guest execution loop without
|
||||
* exiting to the userspace. Otherwise, the value will be returned to the
|
||||
* userspace.
|
||||
@ -10193,6 +10188,7 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Called within kvm->srcu read side. */
|
||||
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool hv_timer;
|
||||
@ -10252,12 +10248,12 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
|
||||
!vcpu->arch.apf.halted);
|
||||
}
|
||||
|
||||
/* Called within kvm->srcu read side. */
|
||||
static int vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
for (;;) {
|
||||
@ -10285,14 +10281,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
||||
if (__xfer_to_guest_mode_work_pending()) {
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
r = xfer_to_guest_mode_handle_work(vcpu);
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
if (r)
|
||||
return r;
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
}
|
||||
}
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -10398,6 +10392,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
int r;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
@ -10405,6 +10400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
kvm_run->flags = 0;
|
||||
kvm_load_guest_fpu(vcpu);
|
||||
|
||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
|
||||
if (kvm_run->immediate_exit) {
|
||||
r = -EINTR;
|
||||
@ -10415,7 +10411,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
* use before KVM has ever run the vCPU.
|
||||
*/
|
||||
WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
kvm_vcpu_block(vcpu);
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (kvm_apic_accept_events(vcpu) < 0) {
|
||||
r = 0;
|
||||
goto out;
|
||||
@ -10475,8 +10475,9 @@ out:
|
||||
if (kvm_run->kvm_valid_regs)
|
||||
store_regs(vcpu);
|
||||
post_kvm_run_save(vcpu);
|
||||
kvm_sigset_deactivate(vcpu);
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
kvm_sigset_deactivate(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
return r;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
|
||||
|
||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_AMD
|
||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE
|
||||
|
||||
.endm
|
||||
|
||||
|
@ -390,7 +390,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
|
||||
u8 *prog = *pprog;
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||
EMIT_LFENCE();
|
||||
EMIT2(0xFF, 0xE0 + reg);
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
|
||||
|
@ -2718,7 +2718,8 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
|
||||
|
||||
static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
||||
struct blk_plug *plug,
|
||||
struct bio *bio)
|
||||
struct bio *bio,
|
||||
unsigned int nsegs)
|
||||
{
|
||||
struct blk_mq_alloc_data data = {
|
||||
.q = q,
|
||||
@ -2730,6 +2731,11 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
||||
if (unlikely(bio_queue_enter(bio)))
|
||||
return NULL;
|
||||
|
||||
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
|
||||
goto queue_exit;
|
||||
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
if (plug) {
|
||||
data.nr_tags = plug->nr_ios;
|
||||
plug->nr_ios = 1;
|
||||
@ -2742,12 +2748,13 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
||||
rq_qos_cleanup(q, bio);
|
||||
if (bio->bi_opf & REQ_NOWAIT)
|
||||
bio_wouldblock_error(bio);
|
||||
queue_exit:
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
||||
struct blk_plug *plug, struct bio *bio)
|
||||
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
@ -2757,12 +2764,19 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
||||
if (!rq || rq->q != q)
|
||||
return NULL;
|
||||
|
||||
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
|
||||
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
|
||||
*bio = NULL;
|
||||
return NULL;
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
|
||||
}
|
||||
|
||||
rq_qos_throttle(q, *bio);
|
||||
|
||||
if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
|
||||
return NULL;
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
|
||||
return NULL;
|
||||
|
||||
rq->cmd_flags = bio->bi_opf;
|
||||
rq->cmd_flags = (*bio)->bi_opf;
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
return rq;
|
||||
@ -2800,14 +2814,11 @@ void blk_mq_submit_bio(struct bio *bio)
|
||||
if (!bio_integrity_prep(bio))
|
||||
return;
|
||||
|
||||
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
|
||||
return;
|
||||
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
rq = blk_mq_get_cached_request(q, plug, bio);
|
||||
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
|
||||
if (!rq) {
|
||||
rq = blk_mq_get_new_requests(q, plug, bio);
|
||||
if (!bio)
|
||||
return;
|
||||
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
|
||||
if (unlikely(!rq))
|
||||
return;
|
||||
}
|
||||
|
@ -76,9 +76,6 @@ struct virtio_blk {
|
||||
*/
|
||||
refcount_t refs;
|
||||
|
||||
/* What host tells us, plus 2 for header & tailer. */
|
||||
unsigned int sg_elems;
|
||||
|
||||
/* Ida index - used to track minor number allocations. */
|
||||
int index;
|
||||
|
||||
@ -322,8 +319,6 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
blk_status_t status;
|
||||
int err;
|
||||
|
||||
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
|
||||
|
||||
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
|
||||
if (unlikely(status))
|
||||
return status;
|
||||
@ -783,8 +778,6 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||
/* Prevent integer overflows and honor max vq size */
|
||||
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
|
||||
|
||||
/* We need extra sg elements at head and tail. */
|
||||
sg_elems += 2;
|
||||
vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
|
||||
if (!vblk) {
|
||||
err = -ENOMEM;
|
||||
@ -796,7 +789,6 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||
mutex_init(&vblk->vdev_mutex);
|
||||
|
||||
vblk->vdev = vdev;
|
||||
vblk->sg_elems = sg_elems;
|
||||
|
||||
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
|
||||
|
||||
@ -853,7 +845,7 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||
set_disk_ro(vblk->disk, 1);
|
||||
|
||||
/* We can handle whatever the host told us to handle. */
|
||||
blk_queue_max_segments(q, vblk->sg_elems-2);
|
||||
blk_queue_max_segments(q, sg_elems);
|
||||
|
||||
/* No real sector limit. */
|
||||
blk_queue_max_hw_sectors(q, -1U);
|
||||
@ -925,9 +917,15 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||
|
||||
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
|
||||
&v);
|
||||
|
||||
/*
|
||||
* max_discard_seg == 0 is out of spec but we always
|
||||
* handled it.
|
||||
*/
|
||||
if (!v)
|
||||
v = sg_elems;
|
||||
blk_queue_max_discard_segments(q,
|
||||
min_not_zero(v,
|
||||
MAX_DISCARD_SEGMENTS));
|
||||
min(v, MAX_DISCARD_SEGMENTS));
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||||
}
|
||||
|
@ -1288,7 +1288,8 @@ free_shadow:
|
||||
rinfo->ring_ref[i] = GRANT_INVALID_REF;
|
||||
}
|
||||
}
|
||||
free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
|
||||
free_pages_exact(rinfo->ring.sring,
|
||||
info->nr_ring_pages * XEN_PAGE_SIZE);
|
||||
rinfo->ring.sring = NULL;
|
||||
|
||||
if (rinfo->irq)
|
||||
@ -1372,9 +1373,15 @@ static int blkif_get_final_status(enum blk_req_status s1,
|
||||
return BLKIF_RSP_OKAY;
|
||||
}
|
||||
|
||||
static bool blkif_completion(unsigned long *id,
|
||||
struct blkfront_ring_info *rinfo,
|
||||
struct blkif_response *bret)
|
||||
/*
|
||||
* Return values:
|
||||
* 1 response processed.
|
||||
* 0 missing further responses.
|
||||
* -1 error while processing.
|
||||
*/
|
||||
static int blkif_completion(unsigned long *id,
|
||||
struct blkfront_ring_info *rinfo,
|
||||
struct blkif_response *bret)
|
||||
{
|
||||
int i = 0;
|
||||
struct scatterlist *sg;
|
||||
@ -1397,7 +1404,7 @@ static bool blkif_completion(unsigned long *id,
|
||||
|
||||
/* Wait the second response if not yet here. */
|
||||
if (s2->status < REQ_DONE)
|
||||
return false;
|
||||
return 0;
|
||||
|
||||
bret->status = blkif_get_final_status(s->status,
|
||||
s2->status);
|
||||
@ -1448,42 +1455,43 @@ static bool blkif_completion(unsigned long *id,
|
||||
}
|
||||
/* Add the persistent grant into the list of free grants */
|
||||
for (i = 0; i < num_grant; i++) {
|
||||
if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
|
||||
if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
|
||||
/*
|
||||
* If the grant is still mapped by the backend (the
|
||||
* backend has chosen to make this grant persistent)
|
||||
* we add it at the head of the list, so it will be
|
||||
* reused first.
|
||||
*/
|
||||
if (!info->feature_persistent)
|
||||
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
|
||||
s->grants_used[i]->gref);
|
||||
if (!info->feature_persistent) {
|
||||
pr_alert("backed has not unmapped grant: %u\n",
|
||||
s->grants_used[i]->gref);
|
||||
return -1;
|
||||
}
|
||||
list_add(&s->grants_used[i]->node, &rinfo->grants);
|
||||
rinfo->persistent_gnts_c++;
|
||||
} else {
|
||||
/*
|
||||
* If the grant is not mapped by the backend we end the
|
||||
* foreign access and add it to the tail of the list,
|
||||
* so it will not be picked again unless we run out of
|
||||
* persistent grants.
|
||||
* If the grant is not mapped by the backend we add it
|
||||
* to the tail of the list, so it will not be picked
|
||||
* again unless we run out of persistent grants.
|
||||
*/
|
||||
gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
|
||||
s->grants_used[i]->gref = GRANT_INVALID_REF;
|
||||
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
|
||||
}
|
||||
}
|
||||
if (s->req.operation == BLKIF_OP_INDIRECT) {
|
||||
for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
|
||||
if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
|
||||
if (!info->feature_persistent)
|
||||
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
|
||||
s->indirect_grants[i]->gref);
|
||||
if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
|
||||
if (!info->feature_persistent) {
|
||||
pr_alert("backed has not unmapped grant: %u\n",
|
||||
s->indirect_grants[i]->gref);
|
||||
return -1;
|
||||
}
|
||||
list_add(&s->indirect_grants[i]->node, &rinfo->grants);
|
||||
rinfo->persistent_gnts_c++;
|
||||
} else {
|
||||
struct page *indirect_page;
|
||||
|
||||
gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
|
||||
/*
|
||||
* Add the used indirect page back to the list of
|
||||
* available pages for indirect grefs.
|
||||
@ -1498,7 +1506,7 @@ static bool blkif_completion(unsigned long *id,
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
@ -1564,12 +1572,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (bret.operation != BLKIF_OP_DISCARD) {
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We may need to wait for an extra response if the
|
||||
* I/O request is split in 2
|
||||
*/
|
||||
if (!blkif_completion(&id, rinfo, &bret))
|
||||
ret = blkif_completion(&id, rinfo, &bret);
|
||||
if (!ret)
|
||||
continue;
|
||||
if (unlikely(ret < 0))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (add_id_to_freelist(rinfo, id)) {
|
||||
@ -1676,8 +1689,7 @@ static int setup_blkring(struct xenbus_device *dev,
|
||||
for (i = 0; i < info->nr_ring_pages; i++)
|
||||
rinfo->ring_ref[i] = GRANT_INVALID_REF;
|
||||
|
||||
sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
|
||||
get_order(ring_size));
|
||||
sring = alloc_pages_exact(ring_size, GFP_NOIO);
|
||||
if (!sring) {
|
||||
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
|
||||
return -ENOMEM;
|
||||
@ -1687,7 +1699,7 @@ static int setup_blkring(struct xenbus_device *dev,
|
||||
|
||||
err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
|
||||
if (err < 0) {
|
||||
free_pages((unsigned long)sring, get_order(ring_size));
|
||||
free_pages_exact(sring, ring_size);
|
||||
rinfo->ring.sring = NULL;
|
||||
goto fail;
|
||||
}
|
||||
@ -2532,11 +2544,10 @@ static void purge_persistent_grants(struct blkfront_info *info)
|
||||
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
|
||||
node) {
|
||||
if (gnt_list_entry->gref == GRANT_INVALID_REF ||
|
||||
gnttab_query_foreign_access(gnt_list_entry->gref))
|
||||
!gnttab_try_end_foreign_access(gnt_list_entry->gref))
|
||||
continue;
|
||||
|
||||
list_del(&gnt_list_entry->node);
|
||||
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
|
||||
rinfo->persistent_gnts_c--;
|
||||
gnt_list_entry->gref = GRANT_INVALID_REF;
|
||||
list_add_tail(&gnt_list_entry->node, &rinfo->grants);
|
||||
|
@ -1957,6 +1957,13 @@ static void virtcons_remove(struct virtio_device *vdev)
|
||||
list_del(&portdev->list);
|
||||
spin_unlock_irq(&pdrvdata_lock);
|
||||
|
||||
/* Device is going away, exit any polling for buffers */
|
||||
virtio_break_device(vdev);
|
||||
if (use_multiport(portdev))
|
||||
flush_work(&portdev->control_work);
|
||||
else
|
||||
flush_work(&portdev->config_work);
|
||||
|
||||
/* Disable interrupts for vqs */
|
||||
virtio_reset_device(vdev);
|
||||
/* Finish up work that's lined up */
|
||||
|
@ -231,6 +231,8 @@ config COMMON_CLK_GEMINI
|
||||
|
||||
config COMMON_CLK_LAN966X
|
||||
bool "Generic Clock Controller driver for LAN966X SoC"
|
||||
depends on HAS_IOMEM
|
||||
depends on OF
|
||||
help
|
||||
This driver provides support for Generic Clock Controller(GCK) on
|
||||
LAN966X SoC. GCK generates and supplies clock to various peripherals
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk-provider.h>
|
||||
@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
|
||||
|
||||
static struct gdsc mdss_gdsc = {
|
||||
.gdscr = 0x3000,
|
||||
.en_rest_wait_val = 0x2,
|
||||
.en_few_wait_val = 0x2,
|
||||
.clk_dis_wait_val = 0xf,
|
||||
.pd = {
|
||||
.name = "mdss_gdsc",
|
||||
},
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2022, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk-provider.h>
|
||||
@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = {
|
||||
|
||||
static struct gdsc disp_cc_mdss_core_gdsc = {
|
||||
.gdscr = 0x1004,
|
||||
.en_rest_wait_val = 0x2,
|
||||
.en_few_wait_val = 0x2,
|
||||
.clk_dis_wait_val = 0xf,
|
||||
.pd = {
|
||||
.name = "disp_cc_mdss_core_gdsc",
|
||||
},
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk-provider.h>
|
||||
@ -1126,6 +1126,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
|
||||
|
||||
static struct gdsc mdss_gdsc = {
|
||||
.gdscr = 0x3000,
|
||||
.en_rest_wait_val = 0x2,
|
||||
.en_few_wait_val = 0x2,
|
||||
.clk_dis_wait_val = 0xf,
|
||||
.pd = {
|
||||
.name = "mdss_gdsc",
|
||||
},
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
@ -35,9 +35,14 @@
|
||||
#define CFG_GDSCR_OFFSET 0x4
|
||||
|
||||
/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
|
||||
#define EN_REST_WAIT_VAL (0x2 << 20)
|
||||
#define EN_FEW_WAIT_VAL (0x8 << 16)
|
||||
#define CLK_DIS_WAIT_VAL (0x2 << 12)
|
||||
#define EN_REST_WAIT_VAL 0x2
|
||||
#define EN_FEW_WAIT_VAL 0x8
|
||||
#define CLK_DIS_WAIT_VAL 0x2
|
||||
|
||||
/* Transition delay shifts */
|
||||
#define EN_REST_WAIT_SHIFT 20
|
||||
#define EN_FEW_WAIT_SHIFT 16
|
||||
#define CLK_DIS_WAIT_SHIFT 12
|
||||
|
||||
#define RETAIN_MEM BIT(14)
|
||||
#define RETAIN_PERIPH BIT(13)
|
||||
@ -380,7 +385,18 @@ static int gdsc_init(struct gdsc *sc)
|
||||
*/
|
||||
mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
|
||||
EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
|
||||
val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
|
||||
|
||||
if (!sc->en_rest_wait_val)
|
||||
sc->en_rest_wait_val = EN_REST_WAIT_VAL;
|
||||
if (!sc->en_few_wait_val)
|
||||
sc->en_few_wait_val = EN_FEW_WAIT_VAL;
|
||||
if (!sc->clk_dis_wait_val)
|
||||
sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
|
||||
|
||||
val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
|
||||
sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
|
||||
sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
|
||||
|
||||
ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __QCOM_GDSC_H__
|
||||
@ -22,6 +22,9 @@ struct reset_controller_dev;
|
||||
* @cxcs: offsets of branch registers to toggle mem/periph bits in
|
||||
* @cxc_count: number of @cxcs
|
||||
* @pwrsts: Possible powerdomain power states
|
||||
* @en_rest_wait_val: transition delay value for receiving enr ack signal
|
||||
* @en_few_wait_val: transition delay value for receiving enf ack signal
|
||||
* @clk_dis_wait_val: transition delay value for halting clock
|
||||
* @resets: ids of resets associated with this gdsc
|
||||
* @reset_count: number of @resets
|
||||
* @rcdev: reset controller
|
||||
@ -36,6 +39,9 @@ struct gdsc {
|
||||
unsigned int clamp_io_ctrl;
|
||||
unsigned int *cxcs;
|
||||
unsigned int cxc_count;
|
||||
unsigned int en_rest_wait_val;
|
||||
unsigned int en_few_wait_val;
|
||||
unsigned int clk_dis_wait_val;
|
||||
const u8 pwrsts;
|
||||
/* Powerdomain allowable state bitfields */
|
||||
#define PWRSTS_OFF BIT(0)
|
||||
|
@ -547,7 +547,7 @@ struct gpio_sim_bank {
|
||||
*
|
||||
* So we need to store the pointer to the parent struct here. We can
|
||||
* dereference it anywhere we need with no checks and no locking as
|
||||
* it's guaranteed to survive the childred and protected by configfs
|
||||
* it's guaranteed to survive the children and protected by configfs
|
||||
* locks.
|
||||
*
|
||||
* Same for other structures.
|
||||
@ -1322,7 +1322,7 @@ static void gpio_sim_hog_config_item_release(struct config_item *item)
|
||||
kfree(hog);
|
||||
}
|
||||
|
||||
struct configfs_item_operations gpio_sim_hog_config_item_ops = {
|
||||
static struct configfs_item_operations gpio_sim_hog_config_item_ops = {
|
||||
.release = gpio_sim_hog_config_item_release,
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user