mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: include/trace/events/rpcgss.h386f4a7379
("trace: events: cleanup deprecated strncpy uses")a4833e3aba
("SUNRPC: Fix rpcgss_context trace event acceptor field") Adjacent changes: drivers/net/ethernet/intel/ice/ice_tc_lib.c2cca35f5dd
("ice: Fix checking for unsupported keys on non-tunnel device")784feaa65d
("ice: Add support for PFCP hardware offload in switchdev") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
41e3ddb291
4
CREDITS
4
CREDITS
@ -3146,6 +3146,10 @@ S: Triftstra=DFe 55
|
||||
S: 13353 Berlin
|
||||
S: Germany
|
||||
|
||||
N: Gustavo Pimental
|
||||
E: gustavo.pimentel@synopsys.com
|
||||
D: PCI driver for Synopsys DesignWare
|
||||
|
||||
N: Emanuel Pirker
|
||||
E: epirker@edu.uni-klu.ac.at
|
||||
D: AIC5800 IEEE 1394, RAW I/O on 1394
|
||||
|
@ -439,12 +439,12 @@ The possible values in this file are:
|
||||
- System is protected by retpoline
|
||||
* - BHI: BHI_DIS_S
|
||||
- System is protected by BHI_DIS_S
|
||||
* - BHI: SW loop; KVM SW loop
|
||||
* - BHI: SW loop, KVM SW loop
|
||||
- System is protected by software clearing sequence
|
||||
* - BHI: Syscall hardening
|
||||
- Syscalls are hardened against BHI
|
||||
* - BHI: Syscall hardening; KVM: SW loop
|
||||
- System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
|
||||
* - BHI: Vulnerable
|
||||
- System is vulnerable to BHI
|
||||
* - BHI: Vulnerable, KVM: SW loop
|
||||
- System is vulnerable; KVM is protected by software clearing sequence
|
||||
|
||||
Full mitigation might require a microcode update from the CPU
|
||||
vendor. When the necessary microcode is not available, the kernel will
|
||||
@ -661,18 +661,14 @@ kernel command line.
|
||||
spectre_bhi=
|
||||
|
||||
[X86] Control mitigation of Branch History Injection
|
||||
(BHI) vulnerability. Syscalls are hardened against BHI
|
||||
regardless of this setting. This setting affects the deployment
|
||||
(BHI) vulnerability. This setting affects the deployment
|
||||
of the HW BHI control and the SW BHB clearing sequence.
|
||||
|
||||
on
|
||||
unconditionally enable.
|
||||
(default) Enable the HW or SW mitigation as
|
||||
needed.
|
||||
off
|
||||
unconditionally disable.
|
||||
auto
|
||||
enable if hardware mitigation
|
||||
control(BHI_DIS_S) is available, otherwise
|
||||
enable alternate mitigation in KVM.
|
||||
Disable the mitigation.
|
||||
|
||||
For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
|
||||
|
||||
|
@ -3444,6 +3444,7 @@
|
||||
retbleed=off [X86]
|
||||
spec_rstack_overflow=off [X86]
|
||||
spec_store_bypass_disable=off [X86,PPC]
|
||||
spectre_bhi=off [X86]
|
||||
spectre_v2_user=off [X86]
|
||||
srbds=off [X86,INTEL]
|
||||
ssbd=force-off [ARM64]
|
||||
@ -6064,16 +6065,13 @@
|
||||
See Documentation/admin-guide/laptops/sonypi.rst
|
||||
|
||||
spectre_bhi= [X86] Control mitigation of Branch History Injection
|
||||
(BHI) vulnerability. Syscalls are hardened against BHI
|
||||
reglardless of this setting. This setting affects the
|
||||
(BHI) vulnerability. This setting affects the
|
||||
deployment of the HW BHI control and the SW BHB
|
||||
clearing sequence.
|
||||
|
||||
on - unconditionally enable.
|
||||
off - unconditionally disable.
|
||||
auto - (default) enable hardware mitigation
|
||||
(BHI_DIS_S) if available, otherwise enable
|
||||
alternate mitigation in KVM.
|
||||
on - (default) Enable the HW or SW mitigation
|
||||
as needed.
|
||||
off - Disable the mitigation.
|
||||
|
||||
spectre_v2= [X86,EARLY] Control mitigation of Spectre variant 2
|
||||
(indirect branch speculation) vulnerability.
|
||||
|
@ -53,6 +53,15 @@ patternProperties:
|
||||
compatible:
|
||||
const: qcom,sm8150-dpu
|
||||
|
||||
"^displayport-controller@[0-9a-f]+$":
|
||||
type: object
|
||||
additionalProperties: true
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: qcom,sm8150-dp
|
||||
|
||||
"^dsi@[0-9a-f]+$":
|
||||
type: object
|
||||
additionalProperties: true
|
||||
|
@ -52,6 +52,9 @@ properties:
|
||||
- const: main
|
||||
- const: mm
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
@ -97,7 +97,6 @@ like this::
|
||||
|
||||
static struct virtio_driver virtio_dummy_driver = {
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
.probe = virtio_dummy_probe,
|
||||
.remove = virtio_dummy_remove,
|
||||
|
41
MAINTAINERS
41
MAINTAINERS
@ -2707,7 +2707,7 @@ F: sound/soc/rockchip/
|
||||
N: rockchip
|
||||
|
||||
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
@ -4869,7 +4869,6 @@ F: drivers/power/supply/cw2015_battery.c
|
||||
CEPH COMMON CODE (LIBCEPH)
|
||||
M: Ilya Dryomov <idryomov@gmail.com>
|
||||
M: Xiubo Li <xiubli@redhat.com>
|
||||
R: Jeff Layton <jlayton@kernel.org>
|
||||
L: ceph-devel@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://ceph.com/
|
||||
@ -4881,7 +4880,6 @@ F: net/ceph/
|
||||
CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
|
||||
M: Xiubo Li <xiubli@redhat.com>
|
||||
M: Ilya Dryomov <idryomov@gmail.com>
|
||||
R: Jeff Layton <jlayton@kernel.org>
|
||||
L: ceph-devel@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://ceph.com/
|
||||
@ -5557,7 +5555,7 @@ F: drivers/cpuidle/cpuidle-big_little.c
|
||||
CPUIDLE DRIVER - ARM EXYNOS
|
||||
M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
||||
M: Kukjin Kim <kgene@kernel.org>
|
||||
R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
R: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -8998,7 +8996,7 @@ F: drivers/i2c/muxes/i2c-mux-gpio.c
|
||||
F: include/linux/platform_data/i2c-mux-gpio.h
|
||||
|
||||
GENERIC GPIO RESET DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
S: Maintained
|
||||
F: drivers/reset/reset-gpio.c
|
||||
|
||||
@ -13293,7 +13291,7 @@ F: drivers/iio/adc/max11205.c
|
||||
|
||||
MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
|
||||
R: Iskren Chernev <iskren.chernev@gmail.com>
|
||||
R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
R: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
R: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Matheus Castello <matheus@castello.eng.br>
|
||||
L: linux-pm@vger.kernel.org
|
||||
@ -13303,7 +13301,7 @@ F: drivers/power/supply/max17040_battery.c
|
||||
|
||||
MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
|
||||
R: Hans de Goede <hdegoede@redhat.com>
|
||||
R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
R: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
R: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
|
||||
R: Purism Kernel Team <kernel@puri.sm>
|
||||
@ -13361,7 +13359,7 @@ F: Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
|
||||
F: drivers/power/supply/max77976_charger.c
|
||||
|
||||
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
B: mailto:linux-samsung-soc@vger.kernel.org
|
||||
@ -13372,7 +13370,7 @@ F: drivers/power/supply/max77693_charger.c
|
||||
|
||||
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
|
||||
M: Chanwoo Choi <cw00.choi@samsung.com>
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
B: mailto:linux-samsung-soc@vger.kernel.org
|
||||
@ -14156,7 +14154,7 @@ F: mm/mm_init.c
|
||||
F: tools/testing/memblock/
|
||||
|
||||
MEMORY CONTROLLER DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
B: mailto:krzysztof.kozlowski@linaro.org
|
||||
@ -15538,7 +15536,7 @@ F: include/uapi/linux/nexthop.h
|
||||
F: net/ipv4/nexthop.c
|
||||
|
||||
NFC SUBSYSTEM
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/nfc/
|
||||
@ -15915,7 +15913,7 @@ F: Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
|
||||
F: drivers/regulator/pf8x00-regulator.c
|
||||
|
||||
NXP PTN5150A CC LOGIC AND EXTCON DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
|
||||
@ -16526,7 +16524,7 @@ K: of_overlay_remove
|
||||
|
||||
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
|
||||
M: Rob Herring <robh@kernel.org>
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk+dt@kernel.org>
|
||||
M: Conor Dooley <conor+dt@kernel.org>
|
||||
L: devicetree@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -16973,7 +16971,6 @@ F: drivers/pci/controller/dwc/pci-exynos.c
|
||||
|
||||
PCI DRIVER FOR SYNOPSYS DESIGNWARE
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
|
||||
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -17484,7 +17481,7 @@ F: Documentation/devicetree/bindings/pinctrl/renesas,*
|
||||
F: drivers/pinctrl/renesas/
|
||||
|
||||
PIN CONTROLLER - SAMSUNG
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
@ -19459,7 +19456,7 @@ F: Documentation/devicetree/bindings/sound/samsung*
|
||||
F: sound/soc/samsung/
|
||||
|
||||
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -19494,7 +19491,7 @@ S: Maintained
|
||||
F: drivers/platform/x86/samsung-laptop.c
|
||||
|
||||
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -19520,7 +19517,7 @@ F: drivers/media/platform/samsung/s3c-camif/
|
||||
F: include/media/drv-intf/s3c_camif.h
|
||||
|
||||
SAMSUNG S3FWRN5 NFC DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
|
||||
F: drivers/nfc/s3fwrn5
|
||||
@ -19541,7 +19538,7 @@ S: Supported
|
||||
F: drivers/media/i2c/s5k5baf.c
|
||||
|
||||
SAMSUNG S5P Security SubSystem (SSS) DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Vladimir Zapolskiy <vz@mleia.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
@ -19563,7 +19560,7 @@ F: Documentation/devicetree/bindings/media/samsung,fimc.yaml
|
||||
F: drivers/media/platform/samsung/exynos4-is/
|
||||
|
||||
SAMSUNG SOC CLOCK DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
M: Chanwoo Choi <cw00.choi@samsung.com>
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
@ -19595,7 +19592,7 @@ F: drivers/net/ethernet/samsung/sxgbe/
|
||||
|
||||
SAMSUNG THERMAL DRIVER
|
||||
M: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -23793,7 +23790,7 @@ S: Orphan
|
||||
F: drivers/mmc/host/vub300.c
|
||||
|
||||
W1 DALLAS'S 1-WIRE BUS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/w1/
|
||||
F: Documentation/w1/
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
12
arch/Kconfig
12
arch/Kconfig
@ -1172,12 +1172,12 @@ config PAGE_SIZE_LESS_THAN_256KB
|
||||
|
||||
config PAGE_SHIFT
|
||||
int
|
||||
default 12 if PAGE_SIZE_4KB
|
||||
default 13 if PAGE_SIZE_8KB
|
||||
default 14 if PAGE_SIZE_16KB
|
||||
default 15 if PAGE_SIZE_32KB
|
||||
default 16 if PAGE_SIZE_64KB
|
||||
default 18 if PAGE_SIZE_256KB
|
||||
default 12 if PAGE_SIZE_4KB
|
||||
default 13 if PAGE_SIZE_8KB
|
||||
default 14 if PAGE_SIZE_16KB
|
||||
default 15 if PAGE_SIZE_32KB
|
||||
default 16 if PAGE_SIZE_64KB
|
||||
default 18 if PAGE_SIZE_256KB
|
||||
|
||||
# This allows to use a set of generic functions to determine mmap base
|
||||
# address by giving priority to top-down scheme only if the process
|
||||
|
@ -666,7 +666,7 @@
|
||||
bus-width = <4>;
|
||||
no-1-8-v;
|
||||
no-sdio;
|
||||
no-emmc;
|
||||
no-mmc;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -210,6 +210,7 @@
|
||||
remote-endpoint = <&mipi_from_sensor>;
|
||||
clock-lanes = <0>;
|
||||
data-lanes = <1>;
|
||||
link-frequencies = /bits/ 64 <330000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -79,10 +79,8 @@ static struct musb_hdrc_platform_data tusb_data = {
|
||||
static struct gpiod_lookup_table tusb_gpio_table = {
|
||||
.dev_id = "musb-tusb",
|
||||
.table = {
|
||||
GPIO_LOOKUP("gpio-0-15", 0, "enable",
|
||||
GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP("gpio-48-63", 10, "int",
|
||||
GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP("gpio-0-31", 0, "enable", GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP("gpio-32-63", 26, "int", GPIO_ACTIVE_HIGH),
|
||||
{ }
|
||||
},
|
||||
};
|
||||
@ -140,12 +138,11 @@ static int slot1_cover_open;
|
||||
static int slot2_cover_open;
|
||||
static struct device *mmc_device;
|
||||
|
||||
static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
|
||||
static struct gpiod_lookup_table nokia800_mmc_gpio_table = {
|
||||
.dev_id = "mmci-omap.0",
|
||||
.table = {
|
||||
/* Slot switch, GPIO 96 */
|
||||
GPIO_LOOKUP("gpio-80-111", 16,
|
||||
"switch", GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
|
||||
{ }
|
||||
},
|
||||
};
|
||||
@ -153,12 +150,12 @@ static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
|
||||
static struct gpiod_lookup_table nokia810_mmc_gpio_table = {
|
||||
.dev_id = "mmci-omap.0",
|
||||
.table = {
|
||||
/* Slot switch, GPIO 96 */
|
||||
GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
|
||||
/* Slot index 1, VSD power, GPIO 23 */
|
||||
GPIO_LOOKUP_IDX("gpio-16-31", 7,
|
||||
"vsd", 1, GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP_IDX("gpio-0-31", 23, "vsd", 1, GPIO_ACTIVE_HIGH),
|
||||
/* Slot index 1, VIO power, GPIO 9 */
|
||||
GPIO_LOOKUP_IDX("gpio-0-15", 9,
|
||||
"vio", 1, GPIO_ACTIVE_HIGH),
|
||||
GPIO_LOOKUP_IDX("gpio-0-31", 9, "vio", 1, GPIO_ACTIVE_HIGH),
|
||||
{ }
|
||||
},
|
||||
};
|
||||
@ -415,8 +412,6 @@ static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
|
||||
|
||||
static void __init n8x0_mmc_init(void)
|
||||
{
|
||||
gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table);
|
||||
|
||||
if (board_is_n810()) {
|
||||
mmc1_data.slots[0].name = "external";
|
||||
|
||||
@ -429,6 +424,8 @@ static void __init n8x0_mmc_init(void)
|
||||
mmc1_data.slots[1].name = "internal";
|
||||
mmc1_data.slots[1].ban_openended = 1;
|
||||
gpiod_add_lookup_table(&nokia810_mmc_gpio_table);
|
||||
} else {
|
||||
gpiod_add_lookup_table(&nokia800_mmc_gpio_table);
|
||||
}
|
||||
|
||||
mmc1_data.nr_slots = 2;
|
||||
|
@ -41,7 +41,7 @@ conn_subsys: bus@5b000000 {
|
||||
interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
|
||||
fsl,usbphy = <&usbphy1>;
|
||||
fsl,usbmisc = <&usbmisc1 0>;
|
||||
clocks = <&usb2_lpcg 0>;
|
||||
clocks = <&usb2_lpcg IMX_LPCG_CLK_6>;
|
||||
ahb-burst-config = <0x0>;
|
||||
tx-burst-size-dword = <0x10>;
|
||||
rx-burst-size-dword = <0x10>;
|
||||
@ -58,7 +58,7 @@ conn_subsys: bus@5b000000 {
|
||||
usbphy1: usbphy@5b100000 {
|
||||
compatible = "fsl,imx7ulp-usbphy";
|
||||
reg = <0x5b100000 0x1000>;
|
||||
clocks = <&usb2_lpcg 1>;
|
||||
clocks = <&usb2_lpcg IMX_LPCG_CLK_7>;
|
||||
power-domains = <&pd IMX_SC_R_USB_0_PHY>;
|
||||
status = "disabled";
|
||||
};
|
||||
@ -67,8 +67,8 @@ conn_subsys: bus@5b000000 {
|
||||
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x5b010000 0x10000>;
|
||||
clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
|
||||
<&sdhc0_lpcg IMX_LPCG_CLK_0>,
|
||||
<&sdhc0_lpcg IMX_LPCG_CLK_5>;
|
||||
<&sdhc0_lpcg IMX_LPCG_CLK_5>,
|
||||
<&sdhc0_lpcg IMX_LPCG_CLK_0>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
power-domains = <&pd IMX_SC_R_SDHC_0>;
|
||||
status = "disabled";
|
||||
@ -78,8 +78,8 @@ conn_subsys: bus@5b000000 {
|
||||
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x5b020000 0x10000>;
|
||||
clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
|
||||
<&sdhc1_lpcg IMX_LPCG_CLK_0>,
|
||||
<&sdhc1_lpcg IMX_LPCG_CLK_5>;
|
||||
<&sdhc1_lpcg IMX_LPCG_CLK_5>,
|
||||
<&sdhc1_lpcg IMX_LPCG_CLK_0>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
power-domains = <&pd IMX_SC_R_SDHC_1>;
|
||||
fsl,tuning-start-tap = <20>;
|
||||
@ -91,8 +91,8 @@ conn_subsys: bus@5b000000 {
|
||||
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0x5b030000 0x10000>;
|
||||
clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
|
||||
<&sdhc2_lpcg IMX_LPCG_CLK_0>,
|
||||
<&sdhc2_lpcg IMX_LPCG_CLK_5>;
|
||||
<&sdhc2_lpcg IMX_LPCG_CLK_5>,
|
||||
<&sdhc2_lpcg IMX_LPCG_CLK_0>;
|
||||
clock-names = "ipg", "ahb", "per";
|
||||
power-domains = <&pd IMX_SC_R_SDHC_2>;
|
||||
status = "disabled";
|
||||
|
@ -28,8 +28,8 @@ dma_subsys: bus@5a000000 {
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&spi0_lpcg 0>,
|
||||
<&spi0_lpcg 1>;
|
||||
clocks = <&spi0_lpcg IMX_LPCG_CLK_0>,
|
||||
<&spi0_lpcg IMX_LPCG_CLK_4>;
|
||||
clock-names = "per", "ipg";
|
||||
assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <60000000>;
|
||||
@ -44,8 +44,8 @@ dma_subsys: bus@5a000000 {
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&spi1_lpcg 0>,
|
||||
<&spi1_lpcg 1>;
|
||||
clocks = <&spi1_lpcg IMX_LPCG_CLK_0>,
|
||||
<&spi1_lpcg IMX_LPCG_CLK_4>;
|
||||
clock-names = "per", "ipg";
|
||||
assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <60000000>;
|
||||
@ -60,8 +60,8 @@ dma_subsys: bus@5a000000 {
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&spi2_lpcg 0>,
|
||||
<&spi2_lpcg 1>;
|
||||
clocks = <&spi2_lpcg IMX_LPCG_CLK_0>,
|
||||
<&spi2_lpcg IMX_LPCG_CLK_4>;
|
||||
clock-names = "per", "ipg";
|
||||
assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <60000000>;
|
||||
@ -76,8 +76,8 @@ dma_subsys: bus@5a000000 {
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&spi3_lpcg 0>,
|
||||
<&spi3_lpcg 1>;
|
||||
clocks = <&spi3_lpcg IMX_LPCG_CLK_0>,
|
||||
<&spi3_lpcg IMX_LPCG_CLK_4>;
|
||||
clock-names = "per", "ipg";
|
||||
assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <60000000>;
|
||||
@ -145,8 +145,8 @@ dma_subsys: bus@5a000000 {
|
||||
compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
|
||||
reg = <0x5a190000 0x1000>;
|
||||
interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&adma_pwm_lpcg 1>,
|
||||
<&adma_pwm_lpcg 0>;
|
||||
clocks = <&adma_pwm_lpcg IMX_LPCG_CLK_4>,
|
||||
<&adma_pwm_lpcg IMX_LPCG_CLK_0>;
|
||||
clock-names = "ipg", "per";
|
||||
assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
@ -355,8 +355,8 @@ dma_subsys: bus@5a000000 {
|
||||
reg = <0x5a880000 0x10000>;
|
||||
interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&adc0_lpcg 0>,
|
||||
<&adc0_lpcg 1>;
|
||||
clocks = <&adc0_lpcg IMX_LPCG_CLK_0>,
|
||||
<&adc0_lpcg IMX_LPCG_CLK_4>;
|
||||
clock-names = "per", "ipg";
|
||||
assigned-clocks = <&clk IMX_SC_R_ADC_0 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
@ -370,8 +370,8 @@ dma_subsys: bus@5a000000 {
|
||||
reg = <0x5a890000 0x10000>;
|
||||
interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&adc1_lpcg 0>,
|
||||
<&adc1_lpcg 1>;
|
||||
clocks = <&adc1_lpcg IMX_LPCG_CLK_0>,
|
||||
<&adc1_lpcg IMX_LPCG_CLK_4>;
|
||||
clock-names = "per", "ipg";
|
||||
assigned-clocks = <&clk IMX_SC_R_ADC_1 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
@ -384,8 +384,8 @@ dma_subsys: bus@5a000000 {
|
||||
reg = <0x5a8d0000 0x10000>;
|
||||
interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&can0_lpcg 1>,
|
||||
<&can0_lpcg 0>;
|
||||
clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
|
||||
<&can0_lpcg IMX_LPCG_CLK_0>;
|
||||
clock-names = "ipg", "per";
|
||||
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <40000000>;
|
||||
@ -405,8 +405,8 @@ dma_subsys: bus@5a000000 {
|
||||
* CAN1 shares CAN0's clock and to enable CAN0's clock it
|
||||
* has to be powered on.
|
||||
*/
|
||||
clocks = <&can0_lpcg 1>,
|
||||
<&can0_lpcg 0>;
|
||||
clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
|
||||
<&can0_lpcg IMX_LPCG_CLK_0>;
|
||||
clock-names = "ipg", "per";
|
||||
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <40000000>;
|
||||
@ -426,8 +426,8 @@ dma_subsys: bus@5a000000 {
|
||||
* CAN2 shares CAN0's clock and to enable CAN0's clock it
|
||||
* has to be powered on.
|
||||
*/
|
||||
clocks = <&can0_lpcg 1>,
|
||||
<&can0_lpcg 0>;
|
||||
clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
|
||||
<&can0_lpcg IMX_LPCG_CLK_0>;
|
||||
clock-names = "ipg", "per";
|
||||
assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <40000000>;
|
||||
|
@ -25,8 +25,8 @@ lsio_subsys: bus@5d000000 {
|
||||
compatible = "fsl,imx27-pwm";
|
||||
reg = <0x5d000000 0x10000>;
|
||||
clock-names = "ipg", "per";
|
||||
clocks = <&pwm0_lpcg 4>,
|
||||
<&pwm0_lpcg 1>;
|
||||
clocks = <&pwm0_lpcg IMX_LPCG_CLK_6>,
|
||||
<&pwm0_lpcg IMX_LPCG_CLK_1>;
|
||||
assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
#pwm-cells = <3>;
|
||||
@ -38,8 +38,8 @@ lsio_subsys: bus@5d000000 {
|
||||
compatible = "fsl,imx27-pwm";
|
||||
reg = <0x5d010000 0x10000>;
|
||||
clock-names = "ipg", "per";
|
||||
clocks = <&pwm1_lpcg 4>,
|
||||
<&pwm1_lpcg 1>;
|
||||
clocks = <&pwm1_lpcg IMX_LPCG_CLK_6>,
|
||||
<&pwm1_lpcg IMX_LPCG_CLK_1>;
|
||||
assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
#pwm-cells = <3>;
|
||||
@ -51,8 +51,8 @@ lsio_subsys: bus@5d000000 {
|
||||
compatible = "fsl,imx27-pwm";
|
||||
reg = <0x5d020000 0x10000>;
|
||||
clock-names = "ipg", "per";
|
||||
clocks = <&pwm2_lpcg 4>,
|
||||
<&pwm2_lpcg 1>;
|
||||
clocks = <&pwm2_lpcg IMX_LPCG_CLK_6>,
|
||||
<&pwm2_lpcg IMX_LPCG_CLK_1>;
|
||||
assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
#pwm-cells = <3>;
|
||||
@ -64,8 +64,8 @@ lsio_subsys: bus@5d000000 {
|
||||
compatible = "fsl,imx27-pwm";
|
||||
reg = <0x5d030000 0x10000>;
|
||||
clock-names = "ipg", "per";
|
||||
clocks = <&pwm3_lpcg 4>,
|
||||
<&pwm3_lpcg 1>;
|
||||
clocks = <&pwm3_lpcg IMX_LPCG_CLK_6>,
|
||||
<&pwm3_lpcg IMX_LPCG_CLK_1>;
|
||||
assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
#pwm-cells = <3>;
|
||||
|
@ -14,6 +14,7 @@
|
||||
pinctrl-0 = <&pinctrl_usbcon1>;
|
||||
type = "micro";
|
||||
label = "otg";
|
||||
vbus-supply = <®_usb1_vbus>;
|
||||
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
port {
|
||||
@ -183,7 +184,6 @@
|
||||
};
|
||||
|
||||
&usb3_phy0 {
|
||||
vbus-supply = <®_usb1_vbus>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
pinctrl-0 = <&pinctrl_usbcon1>;
|
||||
type = "micro";
|
||||
label = "otg";
|
||||
vbus-supply = <®_usb1_vbus>;
|
||||
id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
port {
|
||||
@ -202,7 +203,6 @@
|
||||
};
|
||||
|
||||
&usb3_phy0 {
|
||||
vbus-supply = <®_usb1_vbus>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -153,15 +153,15 @@
|
||||
};
|
||||
|
||||
&flexcan2 {
|
||||
clocks = <&can1_lpcg 1>,
|
||||
<&can1_lpcg 0>;
|
||||
clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
|
||||
<&can1_lpcg IMX_LPCG_CLK_0>;
|
||||
assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
|
||||
fsl,clk-source = /bits/ 8 <1>;
|
||||
};
|
||||
|
||||
&flexcan3 {
|
||||
clocks = <&can2_lpcg 1>,
|
||||
<&can2_lpcg 0>;
|
||||
clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
|
||||
<&can2_lpcg IMX_LPCG_CLK_0>;
|
||||
assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
|
||||
fsl,clk-source = /bits/ 8 <1>;
|
||||
};
|
||||
|
@ -161,12 +161,18 @@ static inline unsigned long get_trans_granule(void)
|
||||
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
|
||||
|
||||
/*
|
||||
* Generate 'num' values from -1 to 30 with -1 rejected by the
|
||||
* __flush_tlb_range() loop below.
|
||||
* Generate 'num' values from -1 to 31 with -1 rejected by the
|
||||
* __flush_tlb_range() loop below. Its return value is only
|
||||
* significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
|
||||
* 'pages' is more than that, you must iterate over the overall
|
||||
* range.
|
||||
*/
|
||||
#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
|
||||
#define __TLBI_RANGE_NUM(pages, scale) \
|
||||
((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
|
||||
#define __TLBI_RANGE_NUM(pages, scale) \
|
||||
({ \
|
||||
int __pages = min((pages), \
|
||||
__TLBI_RANGE_PAGES(31, (scale))); \
|
||||
(__pages >> (5 * (scale) + 1)) - 1; \
|
||||
})
|
||||
|
||||
/*
|
||||
* TLB Invalidation
|
||||
@ -379,10 +385,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||
* 3. If there is 1 page remaining, flush it through non-range operations. Range
|
||||
* operations can only span an even number of pages. We save this for last to
|
||||
* ensure 64KB start alignment is maintained for the LPA2 case.
|
||||
*
|
||||
* Note that certain ranges can be represented by either num = 31 and
|
||||
* scale or num = 0 and scale + 1. The loop below favours the latter
|
||||
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
|
||||
*/
|
||||
#define __flush_tlb_range_op(op, start, pages, stride, \
|
||||
asid, tlb_level, tlbi_user, lpa2) \
|
||||
|
@ -159,7 +159,7 @@ extern unsigned long exception_ip(struct pt_regs *regs);
|
||||
#define exception_ip(regs) exception_ip(regs)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
|
||||
extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
|
||||
extern asmlinkage long syscall_trace_enter(struct pt_regs *regs);
|
||||
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
|
||||
|
||||
extern void die(const char *, struct pt_regs *) __noreturn;
|
||||
|
@ -101,6 +101,7 @@ void output_thread_info_defines(void)
|
||||
OFFSET(TI_CPU, thread_info, cpu);
|
||||
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
|
||||
OFFSET(TI_REGS, thread_info, regs);
|
||||
OFFSET(TI_SYSCALL, thread_info, syscall);
|
||||
DEFINE(_THREAD_SIZE, THREAD_SIZE);
|
||||
DEFINE(_THREAD_MASK, THREAD_MASK);
|
||||
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
|
||||
|
@ -1317,16 +1317,13 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
* Notification of system call entry/exit
|
||||
* - triggered by current->work.syscall_trace
|
||||
*/
|
||||
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
|
||||
asmlinkage long syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
user_exit();
|
||||
|
||||
current_thread_info()->syscall = syscall;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
|
||||
if (ptrace_report_syscall_entry(regs))
|
||||
return -1;
|
||||
syscall = current_thread_info()->syscall;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SECCOMP
|
||||
@ -1335,7 +1332,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
|
||||
struct seccomp_data sd;
|
||||
unsigned long args[6];
|
||||
|
||||
sd.nr = syscall;
|
||||
sd.nr = current_thread_info()->syscall;
|
||||
sd.arch = syscall_get_arch(current);
|
||||
syscall_get_arguments(current, regs, args);
|
||||
for (i = 0; i < 6; i++)
|
||||
@ -1345,23 +1342,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
|
||||
ret = __secure_computing(&sd);
|
||||
if (ret == -1)
|
||||
return ret;
|
||||
syscall = current_thread_info()->syscall;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
||||
trace_sys_enter(regs, regs->regs[2]);
|
||||
|
||||
audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
|
||||
audit_syscall_entry(current_thread_info()->syscall,
|
||||
regs->regs[4], regs->regs[5],
|
||||
regs->regs[6], regs->regs[7]);
|
||||
|
||||
/*
|
||||
* Negative syscall numbers are mistaken for rejected syscalls, but
|
||||
* won't have had the return value set appropriately, so we do so now.
|
||||
*/
|
||||
if (syscall < 0)
|
||||
if (current_thread_info()->syscall < 0)
|
||||
syscall_set_return_value(current, regs, -ENOSYS, 0);
|
||||
return syscall;
|
||||
return current_thread_info()->syscall;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -77,6 +77,18 @@ loads_done:
|
||||
PTR_WD load_a7, bad_stack_a7
|
||||
.previous
|
||||
|
||||
/*
|
||||
* syscall number is in v0 unless we called syscall(__NR_###)
|
||||
* where the real syscall number is in a0
|
||||
*/
|
||||
subu t2, v0, __NR_O32_Linux
|
||||
bnez t2, 1f /* __NR_syscall at offset 0 */
|
||||
LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
|
||||
b 2f
|
||||
1:
|
||||
LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
|
||||
2:
|
||||
|
||||
lw t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
and t0, t1
|
||||
@ -114,16 +126,7 @@ syscall_trace_entry:
|
||||
SAVE_STATIC
|
||||
move a0, sp
|
||||
|
||||
/*
|
||||
* syscall number is in v0 unless we called syscall(__NR_###)
|
||||
* where the real syscall number is in a0
|
||||
*/
|
||||
move a1, v0
|
||||
subu t2, v0, __NR_O32_Linux
|
||||
bnez t2, 1f /* __NR_syscall at offset 0 */
|
||||
lw a1, PT_R4(sp)
|
||||
|
||||
1: jal syscall_trace_enter
|
||||
jal syscall_trace_enter
|
||||
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
||||
|
@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp)
|
||||
|
||||
sd a3, PT_R26(sp) # save a3 for syscall restarting
|
||||
|
||||
LONG_S v0, TI_SYSCALL($28) # Store syscall number
|
||||
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
and t0, t1, t0
|
||||
@ -72,7 +74,6 @@ syscall_common:
|
||||
n32_syscall_trace_entry:
|
||||
SAVE_STATIC
|
||||
move a0, sp
|
||||
move a1, v0
|
||||
jal syscall_trace_enter
|
||||
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp)
|
||||
|
||||
sd a3, PT_R26(sp) # save a3 for syscall restarting
|
||||
|
||||
LONG_S v0, TI_SYSCALL($28) # Store syscall number
|
||||
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
and t0, t1, t0
|
||||
@ -82,7 +84,6 @@ n64_syscall_exit:
|
||||
syscall_trace_entry:
|
||||
SAVE_STATIC
|
||||
move a0, sp
|
||||
move a1, v0
|
||||
jal syscall_trace_enter
|
||||
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
@ -79,6 +79,22 @@ loads_done:
|
||||
PTR_WD load_a7, bad_stack_a7
|
||||
.previous
|
||||
|
||||
/*
|
||||
* absolute syscall number is in v0 unless we called syscall(__NR_###)
|
||||
* where the real syscall number is in a0
|
||||
* note: NR_syscall is the first O32 syscall but the macro is
|
||||
* only defined when compiling with -mabi=32 (CONFIG_32BIT)
|
||||
* therefore __NR_O32_Linux is used (4000)
|
||||
*/
|
||||
|
||||
subu t2, v0, __NR_O32_Linux
|
||||
bnez t2, 1f /* __NR_syscall at offset 0 */
|
||||
LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
|
||||
b 2f
|
||||
1:
|
||||
LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
|
||||
2:
|
||||
|
||||
li t1, _TIF_WORK_SYSCALL_ENTRY
|
||||
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
|
||||
and t0, t1, t0
|
||||
@ -113,22 +129,7 @@ trace_a_syscall:
|
||||
sd a7, PT_R11(sp) # For indirect syscalls
|
||||
|
||||
move a0, sp
|
||||
/*
|
||||
* absolute syscall number is in v0 unless we called syscall(__NR_###)
|
||||
* where the real syscall number is in a0
|
||||
* note: NR_syscall is the first O32 syscall but the macro is
|
||||
* only defined when compiling with -mabi=32 (CONFIG_32BIT)
|
||||
* therefore __NR_O32_Linux is used (4000)
|
||||
*/
|
||||
.set push
|
||||
.set reorder
|
||||
subu t1, v0, __NR_O32_Linux
|
||||
move a1, v0
|
||||
bnez t1, 1f /* __NR_syscall at offset 0 */
|
||||
ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
|
||||
.set pop
|
||||
|
||||
1: jal syscall_trace_enter
|
||||
jal syscall_trace_enter
|
||||
|
||||
bltz v0, 1f # seccomp failed? Skip syscall
|
||||
|
||||
|
@ -2633,32 +2633,16 @@ config MITIGATION_RFDS
|
||||
stored in floating point, vector and integer registers.
|
||||
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
|
||||
|
||||
choice
|
||||
prompt "Clear branch history"
|
||||
config MITIGATION_SPECTRE_BHI
|
||||
bool "Mitigate Spectre-BHB (Branch History Injection)"
|
||||
depends on CPU_SUP_INTEL
|
||||
default SPECTRE_BHI_ON
|
||||
default y
|
||||
help
|
||||
Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
|
||||
where the branch history buffer is poisoned to speculatively steer
|
||||
indirect branches.
|
||||
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
|
||||
|
||||
config SPECTRE_BHI_ON
|
||||
bool "on"
|
||||
help
|
||||
Equivalent to setting spectre_bhi=on command line parameter.
|
||||
config SPECTRE_BHI_OFF
|
||||
bool "off"
|
||||
help
|
||||
Equivalent to setting spectre_bhi=off command line parameter.
|
||||
config SPECTRE_BHI_AUTO
|
||||
bool "auto"
|
||||
depends on BROKEN
|
||||
help
|
||||
Equivalent to setting spectre_bhi=auto command line parameter.
|
||||
|
||||
endchoice
|
||||
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
|
@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
|
||||
while (++i < cpuc->n_events) {
|
||||
cpuc->event_list[i-1] = cpuc->event_list[i];
|
||||
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
|
||||
cpuc->assign[i-1] = cpuc->assign[i];
|
||||
}
|
||||
cpuc->event_constraint[i-1] = NULL;
|
||||
--cpuc->n_events;
|
||||
|
@ -105,7 +105,7 @@ static bool cpu_is_self(int cpu)
|
||||
* IPI implementation on Hyper-V.
|
||||
*/
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
bool exclude_self)
|
||||
{
|
||||
struct hv_send_ipi_ex *ipi_arg;
|
||||
unsigned long flags;
|
||||
@ -132,8 +132,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
|
||||
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
|
||||
|
||||
nr_bank = cpumask_to_vpset_skip(&(ipi_arg->vp_set), mask,
|
||||
exclude_self ? cpu_is_self : NULL);
|
||||
nr_bank = cpumask_to_vpset_skip(&ipi_arg->vp_set, mask,
|
||||
exclude_self ? cpu_is_self : NULL);
|
||||
|
||||
/*
|
||||
* 'nr_bank <= 0' means some CPUs in cpumask can't be
|
||||
@ -147,7 +147,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
}
|
||||
|
||||
status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
|
||||
ipi_arg, NULL);
|
||||
ipi_arg, NULL);
|
||||
|
||||
ipi_mask_ex_done:
|
||||
local_irq_restore(flags);
|
||||
@ -155,7 +155,7 @@ ipi_mask_ex_done:
|
||||
}
|
||||
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
bool exclude_self)
|
||||
{
|
||||
int cur_cpu, vcpu, this_cpu = smp_processor_id();
|
||||
struct hv_send_ipi ipi_arg;
|
||||
@ -181,7 +181,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
|
||||
if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -218,7 +218,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
|
||||
}
|
||||
|
||||
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
|
||||
ipi_arg.cpu_mask);
|
||||
ipi_arg.cpu_mask);
|
||||
return hv_result_success(status);
|
||||
|
||||
do_ex_hypercall:
|
||||
@ -241,7 +241,7 @@ static bool __send_ipi_one(int cpu, int vector)
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
|
||||
if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
|
||||
return false;
|
||||
|
||||
if (vp >= 64)
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/hyperv.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
@ -116,12 +115,11 @@ free_buf:
|
||||
|
||||
int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
|
||||
{
|
||||
struct hv_add_logical_processor_in *input;
|
||||
struct hv_add_logical_processor_out *output;
|
||||
struct hv_input_add_logical_processor *input;
|
||||
struct hv_output_add_logical_processor *output;
|
||||
u64 status;
|
||||
unsigned long flags;
|
||||
int ret = HV_STATUS_SUCCESS;
|
||||
int pxm = node_to_pxm(node);
|
||||
|
||||
/*
|
||||
* When adding a logical processor, the hypervisor may return
|
||||
@ -137,11 +135,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
|
||||
|
||||
input->lp_index = lp_index;
|
||||
input->apic_id = apic_id;
|
||||
input->flags = 0;
|
||||
input->proximity_domain_info.domain_id = pxm;
|
||||
input->proximity_domain_info.flags.reserved = 0;
|
||||
input->proximity_domain_info.flags.proximity_info_valid = 1;
|
||||
input->proximity_domain_info.flags.proximity_preferred = 1;
|
||||
input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
|
||||
status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
|
||||
input, output);
|
||||
local_irq_restore(flags);
|
||||
@ -166,7 +160,6 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
|
||||
u64 status;
|
||||
unsigned long irq_flags;
|
||||
int ret = HV_STATUS_SUCCESS;
|
||||
int pxm = node_to_pxm(node);
|
||||
|
||||
/* Root VPs don't seem to need pages deposited */
|
||||
if (partition_id != hv_current_partition_id) {
|
||||
@ -185,14 +178,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
|
||||
input->vp_index = vp_index;
|
||||
input->flags = flags;
|
||||
input->subnode_type = HvSubnodeAny;
|
||||
if (node != NUMA_NO_NODE) {
|
||||
input->proximity_domain_info.domain_id = pxm;
|
||||
input->proximity_domain_info.flags.reserved = 0;
|
||||
input->proximity_domain_info.flags.proximity_info_valid = 1;
|
||||
input->proximity_domain_info.flags.proximity_preferred = 1;
|
||||
} else {
|
||||
input->proximity_domain_info.as_uint64 = 0;
|
||||
}
|
||||
input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
|
||||
status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <asm/mpspec.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#define ARCH_APICTIMER_STOPS_ON_C3 1
|
||||
|
||||
@ -98,7 +99,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
|
||||
|
||||
static inline u32 native_apic_mem_read(u32 reg)
|
||||
{
|
||||
return *((volatile u32 *)(APIC_BASE + reg));
|
||||
return readl((void __iomem *)(APIC_BASE + reg));
|
||||
}
|
||||
|
||||
static inline void native_apic_mem_eoi(void)
|
||||
|
@ -1687,11 +1687,11 @@ static int x2apic_state;
|
||||
|
||||
static bool x2apic_hw_locked(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
u64 x86_arch_cap_msr;
|
||||
u64 msr;
|
||||
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
|
||||
x86_arch_cap_msr = x86_read_arch_cap_msr();
|
||||
if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
|
||||
rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
|
||||
return (msr & LEGACY_XAPIC_DISABLED);
|
||||
}
|
||||
|
@ -535,7 +535,6 @@ clear_sev:
|
||||
|
||||
static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 value;
|
||||
u32 dummy;
|
||||
|
||||
if (c->x86 >= 0xf)
|
||||
@ -603,20 +602,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
|
||||
early_detect_mem_encrypt(c);
|
||||
|
||||
/* Re-enable TopologyExtensions if switched off by BIOS */
|
||||
if (c->x86 == 0x15 &&
|
||||
(c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
|
||||
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
|
||||
|
||||
if (msr_set_bit(0xc0011005, 54) > 0) {
|
||||
rdmsrl(0xc0011005, value);
|
||||
if (value & BIT_64(54)) {
|
||||
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
||||
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
|
||||
if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
|
||||
|
@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
|
||||
EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
|
||||
static u64 __ro_after_init x86_arch_cap_msr;
|
||||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
|
||||
@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
|
||||
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
|
||||
}
|
||||
|
||||
x86_arch_cap_msr = x86_read_arch_cap_msr();
|
||||
|
||||
/* Select the proper CPU mitigations before patching alternatives: */
|
||||
spectre_v1_select_mitigation();
|
||||
spectre_v2_select_mitigation();
|
||||
@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
|
||||
|
||||
static void __init taa_select_mitigation(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
|
||||
taa_mitigation = TAA_MITIGATION_OFF;
|
||||
return;
|
||||
@ -341,9 +343,8 @@ static void __init taa_select_mitigation(void)
|
||||
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
|
||||
* update is required.
|
||||
*/
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
|
||||
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
|
||||
if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
|
||||
!(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
|
||||
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
|
||||
|
||||
/*
|
||||
@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
|
||||
|
||||
static void __init mmio_select_mitigation(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
||||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
|
||||
cpu_mitigations_off()) {
|
||||
@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return;
|
||||
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
/*
|
||||
* Enable CPU buffer clear mitigation for host and VMM, if also affected
|
||||
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
|
||||
@ -437,7 +434,7 @@ static void __init mmio_select_mitigation(void)
|
||||
* be propagated to uncore buffers, clearing the Fill buffers on idle
|
||||
* is required irrespective of SMT state.
|
||||
*/
|
||||
if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
|
||||
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
|
||||
/*
|
||||
@ -447,10 +444,10 @@ static void __init mmio_select_mitigation(void)
|
||||
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
|
||||
* affected systems.
|
||||
*/
|
||||
if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
|
||||
if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
|
||||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
|
||||
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
|
||||
!(ia32_cap & ARCH_CAP_MDS_NO)))
|
||||
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
else
|
||||
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
|
||||
@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
|
||||
if (rfds_mitigation == RFDS_MITIGATION_OFF)
|
||||
return;
|
||||
|
||||
if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
|
||||
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
||||
else
|
||||
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
|
||||
@ -659,8 +656,6 @@ void update_srbds_msr(void)
|
||||
|
||||
static void __init srbds_select_mitigation(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
|
||||
return;
|
||||
|
||||
@ -669,8 +664,7 @@ static void __init srbds_select_mitigation(void)
|
||||
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
|
||||
* by Processor MMIO Stale Data vulnerability.
|
||||
*/
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
|
||||
if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
|
||||
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
|
||||
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
|
||||
/* Will verify below that mitigation _can_ be disabled */
|
||||
|
||||
/* No microcode */
|
||||
if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
|
||||
if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
|
||||
if (gds_mitigation == GDS_MITIGATION_FORCE) {
|
||||
/*
|
||||
* This only needs to be done on the boot CPU so do it
|
||||
@ -1544,20 +1538,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
|
||||
return SPECTRE_V2_RETPOLINE;
|
||||
}
|
||||
|
||||
static bool __ro_after_init rrsba_disabled;
|
||||
|
||||
/* Disable in-kernel use of non-RSB RET predictors */
|
||||
static void __init spec_ctrl_disable_kernel_rrsba(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
if (rrsba_disabled)
|
||||
return;
|
||||
|
||||
if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
|
||||
rrsba_disabled = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
|
||||
return;
|
||||
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
if (ia32_cap & ARCH_CAP_RRSBA) {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
rrsba_disabled = true;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
|
||||
@ -1626,13 +1625,10 @@ static bool __init spec_ctrl_bhi_dis(void)
|
||||
enum bhi_mitigations {
|
||||
BHI_MITIGATION_OFF,
|
||||
BHI_MITIGATION_ON,
|
||||
BHI_MITIGATION_AUTO,
|
||||
};
|
||||
|
||||
static enum bhi_mitigations bhi_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_SPECTRE_BHI_ON) ? BHI_MITIGATION_ON :
|
||||
IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
|
||||
BHI_MITIGATION_AUTO;
|
||||
IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
|
||||
|
||||
static int __init spectre_bhi_parse_cmdline(char *str)
|
||||
{
|
||||
@ -1643,8 +1639,6 @@ static int __init spectre_bhi_parse_cmdline(char *str)
|
||||
bhi_mitigation = BHI_MITIGATION_OFF;
|
||||
else if (!strcmp(str, "on"))
|
||||
bhi_mitigation = BHI_MITIGATION_ON;
|
||||
else if (!strcmp(str, "auto"))
|
||||
bhi_mitigation = BHI_MITIGATION_AUTO;
|
||||
else
|
||||
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
|
||||
|
||||
@ -1658,9 +1652,11 @@ static void __init bhi_select_mitigation(void)
|
||||
return;
|
||||
|
||||
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
|
||||
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
|
||||
return;
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
|
||||
spec_ctrl_disable_kernel_rrsba();
|
||||
if (rrsba_disabled)
|
||||
return;
|
||||
}
|
||||
|
||||
if (spec_ctrl_bhi_dis())
|
||||
return;
|
||||
@ -1672,9 +1668,6 @@ static void __init bhi_select_mitigation(void)
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
|
||||
pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
|
||||
|
||||
if (bhi_mitigation == BHI_MITIGATION_AUTO)
|
||||
return;
|
||||
|
||||
/* Mitigate syscalls when the mitigation is forced =on */
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
|
||||
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
|
||||
@ -1908,8 +1901,6 @@ static void update_indir_branch_cond(void)
|
||||
/* Update the static key controlling the MDS CPU buffer clear in idle */
|
||||
static void update_mds_branch_idle(void)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
/*
|
||||
* Enable the idle clearing if SMT is active on CPUs which are
|
||||
* affected only by MSBDS and not any other MDS variant.
|
||||
@ -1924,7 +1915,7 @@ static void update_mds_branch_idle(void)
|
||||
if (sched_smt_active()) {
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
|
||||
(ia32_cap & ARCH_CAP_FBSDP_NO)) {
|
||||
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
|
||||
static_branch_disable(&mds_idle_clear);
|
||||
}
|
||||
}
|
||||
@ -2809,7 +2800,7 @@ static char *pbrsb_eibrs_state(void)
|
||||
}
|
||||
}
|
||||
|
||||
static const char * const spectre_bhi_state(void)
|
||||
static const char *spectre_bhi_state(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_BHI))
|
||||
return "; BHI: Not affected";
|
||||
@ -2817,13 +2808,12 @@ static const char * const spectre_bhi_state(void)
|
||||
return "; BHI: BHI_DIS_S";
|
||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
|
||||
return "; BHI: SW loop, KVM: SW loop";
|
||||
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
|
||||
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
|
||||
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
|
||||
return "; BHI: Retpoline";
|
||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
|
||||
return "; BHI: Syscall hardening, KVM: SW loop";
|
||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
|
||||
return "; BHI: Vulnerable, KVM: SW loop";
|
||||
|
||||
return "; BHI: Vulnerable (Syscall hardening enabled)";
|
||||
return "; BHI: Vulnerable";
|
||||
}
|
||||
|
||||
static ssize_t spectre_v2_show_state(char *buf)
|
||||
|
@ -1284,25 +1284,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
|
||||
|
||||
u64 x86_read_arch_cap_msr(void)
|
||||
{
|
||||
u64 ia32_cap = 0;
|
||||
u64 x86_arch_cap_msr = 0;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
|
||||
|
||||
return ia32_cap;
|
||||
return x86_arch_cap_msr;
|
||||
}
|
||||
|
||||
static bool arch_cap_mmio_immune(u64 ia32_cap)
|
||||
static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
|
||||
{
|
||||
return (ia32_cap & ARCH_CAP_FBSDP_NO &&
|
||||
ia32_cap & ARCH_CAP_PSDP_NO &&
|
||||
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
|
||||
return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
|
||||
x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
|
||||
x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
|
||||
}
|
||||
|
||||
static bool __init vulnerable_to_rfds(u64 ia32_cap)
|
||||
static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
|
||||
{
|
||||
/* The "immunity" bit trumps everything else: */
|
||||
if (ia32_cap & ARCH_CAP_RFDS_NO)
|
||||
if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -1310,7 +1310,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
|
||||
* indicate that mitigation is needed because guest is running on a
|
||||
* vulnerable hardware or may migrate to such hardware:
|
||||
*/
|
||||
if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
|
||||
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
|
||||
return true;
|
||||
|
||||
/* Only consult the blacklist when there is no enumeration: */
|
||||
@ -1319,11 +1319,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
|
||||
|
||||
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
|
||||
!(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
|
||||
!(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
|
||||
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
|
||||
@ -1335,7 +1335,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
|
||||
!(ia32_cap & ARCH_CAP_SSB_NO) &&
|
||||
!(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
|
||||
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||
|
||||
@ -1346,17 +1346,17 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
* Don't use AutoIBRS when SNP is enabled because it degrades host
|
||||
* userspace indirect branch performance.
|
||||
*/
|
||||
if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
|
||||
if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
|
||||
(cpu_has(c, X86_FEATURE_AUTOIBRS) &&
|
||||
!cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
|
||||
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
|
||||
!(ia32_cap & ARCH_CAP_PBRSB_NO))
|
||||
!(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
|
||||
}
|
||||
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
|
||||
!(ia32_cap & ARCH_CAP_MDS_NO)) {
|
||||
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
|
||||
setup_force_cpu_bug(X86_BUG_MDS);
|
||||
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
|
||||
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
|
||||
@ -1375,9 +1375,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
* TSX_CTRL check alone is not sufficient for cases when the microcode
|
||||
* update is not present or running as guest that don't get TSX_CTRL.
|
||||
*/
|
||||
if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
|
||||
if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
|
||||
(cpu_has(c, X86_FEATURE_RTM) ||
|
||||
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
|
||||
(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
|
||||
setup_force_cpu_bug(X86_BUG_TAA);
|
||||
|
||||
/*
|
||||
@ -1403,7 +1403,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
|
||||
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
|
||||
*/
|
||||
if (!arch_cap_mmio_immune(ia32_cap)) {
|
||||
if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
|
||||
@ -1411,7 +1411,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
|
||||
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
|
||||
setup_force_cpu_bug(X86_BUG_RETBLEED);
|
||||
}
|
||||
|
||||
@ -1429,15 +1429,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
|
||||
* which means that AVX will be disabled.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
|
||||
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX))
|
||||
setup_force_cpu_bug(X86_BUG_GDS);
|
||||
|
||||
if (vulnerable_to_rfds(ia32_cap))
|
||||
if (vulnerable_to_rfds(x86_arch_cap_msr))
|
||||
setup_force_cpu_bug(X86_BUG_RFDS);
|
||||
|
||||
/* When virtualized, eIBRS could be hidden, assume vulnerable */
|
||||
if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
|
||||
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
|
||||
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
|
||||
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
|
||||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
||||
@ -1447,7 +1447,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
|
||||
/* Rogue Data Cache Load? No! */
|
||||
if (ia32_cap & ARCH_CAP_RDCL_NO)
|
||||
if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
@ -123,7 +123,6 @@ static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
|
||||
early_per_cpu(x86_cpu_to_apicid, cpu) = apic_id;
|
||||
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
set_cpu_present(cpu, true);
|
||||
}
|
||||
|
||||
@ -210,7 +209,11 @@ static __init void topo_register_apic(u32 apic_id, u32 acpi_id, bool present)
|
||||
topo_info.nr_disabled_cpus++;
|
||||
}
|
||||
|
||||
/* Register present and possible CPUs in the domain maps */
|
||||
/*
|
||||
* Register present and possible CPUs in the domain
|
||||
* maps. cpu_possible_map will be updated in
|
||||
* topology_init_possible_cpus() after enumeration is done.
|
||||
*/
|
||||
for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++)
|
||||
set_bit(topo_apicid(apic_id, dom), apic_maps[dom].map);
|
||||
}
|
||||
|
@ -29,11 +29,21 @@ static bool parse_8000_0008(struct topo_scan *tscan)
|
||||
if (!sft)
|
||||
sft = get_count_order(ecx.cpu_nthreads + 1);
|
||||
|
||||
topology_set_dom(tscan, TOPO_SMT_DOMAIN, sft, ecx.cpu_nthreads + 1);
|
||||
/*
|
||||
* cpu_nthreads describes the number of threads in the package
|
||||
* sft is the number of APIC ID bits per package
|
||||
*
|
||||
* As the number of actual threads per core is not described in
|
||||
* this leaf, just set the CORE domain shift and let the later
|
||||
* parsers set SMT shift. Assume one thread per core by default
|
||||
* which is correct if there are no other CPUID leafs to parse.
|
||||
*/
|
||||
topology_update_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
|
||||
topology_set_dom(tscan, TOPO_CORE_DOMAIN, sft, ecx.cpu_nthreads + 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void store_node(struct topo_scan *tscan, unsigned int nr_nodes, u16 node_id)
|
||||
static void store_node(struct topo_scan *tscan, u16 nr_nodes, u16 node_id)
|
||||
{
|
||||
/*
|
||||
* Starting with Fam 17h the DIE domain could probably be used to
|
||||
@ -73,12 +83,14 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
|
||||
tscan->c->topo.initial_apicid = leaf.ext_apic_id;
|
||||
|
||||
/*
|
||||
* If leaf 0xb is available, then SMT shift is set already. If not
|
||||
* take it from ecx.threads_per_core and use topo_update_dom() -
|
||||
* topology_set_dom() would propagate and overwrite the already
|
||||
* propagated CORE level.
|
||||
* If leaf 0xb is available, then the domain shifts are set
|
||||
* already and nothing to do here.
|
||||
*/
|
||||
if (!has_0xb) {
|
||||
/*
|
||||
* Leaf 0x80000008 set the CORE domain shift already.
|
||||
* Update the SMT domain, but do not propagate it.
|
||||
*/
|
||||
unsigned int nthreads = leaf.core_nthreads + 1;
|
||||
|
||||
topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
|
||||
@ -109,13 +121,13 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
|
||||
|
||||
static bool parse_fam10h_node_id(struct topo_scan *tscan)
|
||||
{
|
||||
struct {
|
||||
union {
|
||||
union {
|
||||
struct {
|
||||
u64 node_id : 3,
|
||||
nodes_per_pkg : 3,
|
||||
unused : 58;
|
||||
u64 msr;
|
||||
};
|
||||
u64 msr;
|
||||
} nid;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_NODEID_MSR))
|
||||
@ -135,6 +147,26 @@ static void legacy_set_llc(struct topo_scan *tscan)
|
||||
tscan->c->topo.llc_id = apicid >> tscan->dom_shifts[TOPO_CORE_DOMAIN];
|
||||
}
|
||||
|
||||
static void topoext_fixup(struct topo_scan *tscan)
|
||||
{
|
||||
struct cpuinfo_x86 *c = tscan->c;
|
||||
u64 msrval;
|
||||
|
||||
/* Try to re-enable TopologyExtensions if switched off by BIOS */
|
||||
if (cpu_has(c, X86_FEATURE_TOPOEXT) || c->x86_vendor != X86_VENDOR_AMD ||
|
||||
c->x86 != 0x15 || c->x86_model < 0x10 || c->x86_model > 0x6f)
|
||||
return;
|
||||
|
||||
if (msr_set_bit(0xc0011005, 54) <= 0)
|
||||
return;
|
||||
|
||||
rdmsrl(0xc0011005, msrval);
|
||||
if (msrval & BIT_64(54)) {
|
||||
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
||||
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void parse_topology_amd(struct topo_scan *tscan)
|
||||
{
|
||||
bool has_0xb = false;
|
||||
@ -164,6 +196,7 @@ static void parse_topology_amd(struct topo_scan *tscan)
|
||||
void cpu_parse_topology_amd(struct topo_scan *tscan)
|
||||
{
|
||||
tscan->amd_nodes_per_pkg = 1;
|
||||
topoext_fixup(tscan);
|
||||
parse_topology_amd(tscan);
|
||||
|
||||
if (tscan->amd_nodes_per_pkg > 1)
|
||||
|
@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void blkg_init_queue(struct request_queue *q)
|
||||
{
|
||||
INIT_LIST_HEAD(&q->blkg_list);
|
||||
mutex_init(&q->blkcg_mutex);
|
||||
}
|
||||
|
||||
int blkcg_init_disk(struct gendisk *disk)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk)
|
||||
bool preloaded;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&q->blkg_list);
|
||||
mutex_init(&q->blkcg_mutex);
|
||||
|
||||
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
|
||||
if (!new_blkg)
|
||||
return -ENOMEM;
|
||||
|
@ -189,6 +189,7 @@ struct blkcg_policy {
|
||||
extern struct blkcg blkcg_root;
|
||||
extern bool blkcg_debug_stats;
|
||||
|
||||
void blkg_init_queue(struct request_queue *q);
|
||||
int blkcg_init_disk(struct gendisk *disk);
|
||||
void blkcg_exit_disk(struct gendisk *disk);
|
||||
|
||||
@ -482,6 +483,7 @@ struct blkcg {
|
||||
};
|
||||
|
||||
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
|
||||
static inline void blkg_init_queue(struct request_queue *q) { }
|
||||
static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
|
||||
static inline void blkcg_exit_disk(struct gendisk *disk) { }
|
||||
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
|
||||
|
@ -442,6 +442,8 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
|
||||
init_waitqueue_head(&q->mq_freeze_wq);
|
||||
mutex_init(&q->mq_freeze_lock);
|
||||
|
||||
blkg_init_queue(q);
|
||||
|
||||
/*
|
||||
* Init percpu_ref in atomic mode so that it's faster to shutdown.
|
||||
* See blk_register_queue() for details.
|
||||
@ -1195,6 +1197,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
|
||||
if (unlikely(!rq_list_empty(plug->cached_rq)))
|
||||
blk_mq_free_plug_rqs(plug);
|
||||
|
||||
plug->cur_ktime = 0;
|
||||
current->flags &= ~PF_BLOCK_TS;
|
||||
}
|
||||
|
||||
|
@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
|
||||
{
|
||||
struct ioc *ioc = iocg->ioc;
|
||||
struct blkcg_gq *blkg = iocg_to_blkg(iocg);
|
||||
u64 tdelta, delay, new_delay;
|
||||
u64 tdelta, delay, new_delay, shift;
|
||||
s64 vover, vover_pct;
|
||||
u32 hwa;
|
||||
|
||||
@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
|
||||
|
||||
/* calculate the current delay in effect - 1/2 every second */
|
||||
tdelta = now->now - iocg->delay_at;
|
||||
if (iocg->delay)
|
||||
delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
|
||||
shift = div64_u64(tdelta, USEC_PER_SEC);
|
||||
if (iocg->delay && shift < BITS_PER_LONG)
|
||||
delay = iocg->delay >> shift;
|
||||
else
|
||||
delay = 0;
|
||||
|
||||
|
@ -182,17 +182,13 @@ static int blk_validate_limits(struct queue_limits *lim)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Devices that require a virtual boundary do not support scatter/gather
|
||||
* I/O natively, but instead require a descriptor list entry for each
|
||||
* page (which might not be identical to the Linux PAGE_SIZE). Because
|
||||
* of that they are not limited by our notion of "segment size".
|
||||
* Stacking device may have both virtual boundary and max segment
|
||||
* size limit, so allow this setting now, and long-term the two
|
||||
* might need to move out of stacking limits since we have immutable
|
||||
* bvec and lower layer bio splitting is supposed to handle the two
|
||||
* correctly.
|
||||
*/
|
||||
if (lim->virt_boundary_mask) {
|
||||
if (WARN_ON_ONCE(lim->max_segment_size &&
|
||||
lim->max_segment_size != UINT_MAX))
|
||||
return -EINVAL;
|
||||
lim->max_segment_size = UINT_MAX;
|
||||
} else {
|
||||
if (!lim->virt_boundary_mask) {
|
||||
/*
|
||||
* The maximum segment size has an odd historic 64k default that
|
||||
* drivers probably should override. Just like the I/O size we
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ivpu_rpm_get_if_active(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
|
||||
|
||||
if (ret)
|
||||
ivpu_rpm_put(vdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
struct ivpu_file_priv *file_priv = file->driver_priv;
|
||||
@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
|
||||
args->value = vdev->platform;
|
||||
break;
|
||||
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
|
||||
ret = ivpu_get_core_clock_rate(vdev, &args->value);
|
||||
args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
|
||||
break;
|
||||
case DRM_IVPU_PARAM_NUM_CONTEXTS:
|
||||
args->value = ivpu_get_context_count(vdev);
|
||||
@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ivpu_prepare_for_reset(vdev);
|
||||
/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
|
||||
pci_save_state(to_pci_dev(vdev->drm.dev));
|
||||
|
||||
ret = ivpu_hw_power_down(vdev);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
|
||||
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
||||
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
|
||||
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
|
||||
atomic64_set(&vdev->unique_id_counter, 0);
|
||||
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
|
||||
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
|
||||
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
|
||||
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
|
||||
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
|
||||
@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
||||
/* Power up early so the rest of init code can access VPU registers */
|
||||
ret = ivpu_hw_power_up(vdev);
|
||||
if (ret)
|
||||
goto err_power_down;
|
||||
goto err_shutdown;
|
||||
|
||||
ret = ivpu_mmu_global_context_init(vdev);
|
||||
if (ret)
|
||||
goto err_power_down;
|
||||
goto err_shutdown;
|
||||
|
||||
ret = ivpu_mmu_init(vdev);
|
||||
if (ret)
|
||||
@ -601,10 +588,8 @@ err_mmu_rctx_fini:
|
||||
ivpu_mmu_reserved_context_fini(vdev);
|
||||
err_mmu_gctx_fini:
|
||||
ivpu_mmu_global_context_fini(vdev);
|
||||
err_power_down:
|
||||
ivpu_hw_power_down(vdev);
|
||||
if (IVPU_WA(d3hot_after_power_off))
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
err_shutdown:
|
||||
ivpu_shutdown(vdev);
|
||||
err_xa_destroy:
|
||||
xa_destroy(&vdev->db_xa);
|
||||
xa_destroy(&vdev->submitted_jobs_xa);
|
||||
@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
|
||||
static void ivpu_dev_fini(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_pm_disable(vdev);
|
||||
ivpu_prepare_for_reset(vdev);
|
||||
ivpu_shutdown(vdev);
|
||||
if (IVPU_WA(d3hot_after_power_off))
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
|
||||
ivpu_jobs_abort_all(vdev);
|
||||
ivpu_job_done_consumer_fini(vdev);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_DRV_H__
|
||||
@ -90,7 +90,6 @@
|
||||
struct ivpu_wa_table {
|
||||
bool punit_disabled;
|
||||
bool clear_runtime_mem;
|
||||
bool d3hot_after_power_off;
|
||||
bool interrupt_clear_with_0;
|
||||
bool disable_clock_relinquish;
|
||||
bool disable_d0i3_msg;
|
||||
|
@ -21,6 +21,7 @@ struct ivpu_hw_ops {
|
||||
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
|
||||
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
|
||||
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
|
||||
u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
|
||||
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
|
||||
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
|
||||
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
|
||||
@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
|
||||
return vdev->hw->ops->reg_pll_freq_get(vdev);
|
||||
};
|
||||
|
||||
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
|
||||
{
|
||||
return vdev->hw->ops->ratio_to_freq(vdev, ratio);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->wa.punit_disabled = false;
|
||||
vdev->wa.clear_runtime_mem = false;
|
||||
vdev->wa.d3hot_after_power_off = true;
|
||||
|
||||
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
|
||||
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
|
||||
@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
|
||||
|
||||
IVPU_PRINT_WA(punit_disabled);
|
||||
IVPU_PRINT_WA(clear_runtime_mem);
|
||||
IVPU_PRINT_WA(d3hot_after_power_off);
|
||||
IVPU_PRINT_WA(interrupt_clear_with_0);
|
||||
}
|
||||
|
||||
@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
|
||||
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
|
||||
}
|
||||
|
||||
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
|
||||
static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
|
||||
{
|
||||
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
|
||||
u32 cpu_clock;
|
||||
|
||||
if ((config & 0xff) == PLL_RATIO_4_3)
|
||||
if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
|
||||
cpu_clock = pll_clock * 2 / 4;
|
||||
else
|
||||
cpu_clock = pll_clock * 2 / 5;
|
||||
@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
|
||||
if (!ivpu_is_silicon(vdev))
|
||||
return PLL_SIMULATION_FREQ;
|
||||
|
||||
return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
|
||||
return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
|
||||
}
|
||||
|
||||
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
|
||||
@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
|
||||
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
|
||||
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
|
||||
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
|
||||
.ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
|
||||
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
|
||||
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
|
||||
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
|
||||
|
@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
|
||||
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
|
||||
}
|
||||
|
||||
static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
|
||||
{
|
||||
return PLL_RATIO_TO_FREQ(ratio);
|
||||
}
|
||||
|
||||
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
|
||||
@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
|
||||
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
|
||||
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
|
||||
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
|
||||
.ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
|
||||
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
|
||||
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
|
||||
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/genalloc.h>
|
||||
@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
|
||||
spin_lock_init(&ipc->cons_lock);
|
||||
INIT_LIST_HEAD(&ipc->cons_list);
|
||||
INIT_LIST_HEAD(&ipc->cb_msg_list);
|
||||
drmm_mutex_init(&vdev->drm, &ipc->lock);
|
||||
ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
|
||||
goto err_free_rx;
|
||||
}
|
||||
ivpu_ipc_reset(vdev);
|
||||
return 0;
|
||||
|
||||
|
@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
|
||||
case IVPU_MMU_EVT_F_VMS_FETCH:
|
||||
return "Fetch of VMS caused external abort";
|
||||
default:
|
||||
return "Unknown CMDQ command";
|
||||
return "Unknown event";
|
||||
}
|
||||
}
|
||||
|
||||
@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
|
||||
{
|
||||
switch (err) {
|
||||
case IVPU_MMU_CERROR_NONE:
|
||||
return "No CMDQ Error";
|
||||
return "No error";
|
||||
case IVPU_MMU_CERROR_ILL:
|
||||
return "Illegal command";
|
||||
case IVPU_MMU_CERROR_ABT:
|
||||
return "External abort on CMDQ read";
|
||||
return "External abort on command queue read";
|
||||
case IVPU_MMU_CERROR_ATC_INV_SYNC:
|
||||
return "Sync failed to complete ATS invalidation";
|
||||
default:
|
||||
return "Unknown CMDQ Error";
|
||||
return "Unknown error";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/highmem.h>
|
||||
@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
|
||||
pci_save_state(to_pci_dev(vdev->drm.dev));
|
||||
ivpu_prepare_for_reset(vdev);
|
||||
|
||||
ret = ivpu_shutdown(vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
|
||||
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
|
||||
pci_restore_state(to_pci_dev(vdev->drm.dev));
|
||||
|
||||
retry:
|
||||
pci_restore_state(to_pci_dev(vdev->drm.dev));
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
|
||||
|
||||
ret = ivpu_hw_power_up(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
|
||||
@ -100,6 +97,7 @@ err_mmu_disable:
|
||||
ivpu_mmu_disable(vdev);
|
||||
err_power_down:
|
||||
ivpu_hw_power_down(vdev);
|
||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||
|
||||
if (!ivpu_fw_is_cold_boot(vdev)) {
|
||||
ivpu_pm_prepare_cold_boot(vdev);
|
||||
|
@ -1843,7 +1843,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
|
||||
if (dep->honor_dep)
|
||||
adev->flags.honor_deps = 1;
|
||||
|
||||
adev->dep_unmet++;
|
||||
if (!dep->met)
|
||||
adev->dep_unmet++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -666,6 +666,87 @@ static int mobile_lpm_policy = -1;
|
||||
module_param(mobile_lpm_policy, int, 0644);
|
||||
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
|
||||
|
||||
static char *ahci_mask_port_map;
|
||||
module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
|
||||
MODULE_PARM_DESC(mask_port_map,
|
||||
"32-bits port map masks to ignore controllers ports. "
|
||||
"Valid values are: "
|
||||
"\"<mask>\" to apply the same mask to all AHCI controller "
|
||||
"devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
|
||||
"specify different masks for the controllers specified, "
|
||||
"where <pci_dev> is the PCI ID of an AHCI controller in the "
|
||||
"form \"domain:bus:dev.func\"");
|
||||
|
||||
static void ahci_apply_port_map_mask(struct device *dev,
|
||||
struct ahci_host_priv *hpriv, char *mask_s)
|
||||
{
|
||||
unsigned int mask;
|
||||
|
||||
if (kstrtouint(mask_s, 0, &mask)) {
|
||||
dev_err(dev, "Invalid port map mask\n");
|
||||
return;
|
||||
}
|
||||
|
||||
hpriv->mask_port_map = mask;
|
||||
}
|
||||
|
||||
static void ahci_get_port_map_mask(struct device *dev,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
char *param, *end, *str, *mask_s;
|
||||
char *name;
|
||||
|
||||
if (!strlen(ahci_mask_port_map))
|
||||
return;
|
||||
|
||||
str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
|
||||
if (!str)
|
||||
return;
|
||||
|
||||
/* Handle single mask case */
|
||||
if (!strchr(str, '=')) {
|
||||
ahci_apply_port_map_mask(dev, hpriv, str);
|
||||
goto free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask list case: parse the parameter to apply the mask only if
|
||||
* the device name matches.
|
||||
*/
|
||||
param = str;
|
||||
end = param + strlen(param);
|
||||
while (param && param < end && *param) {
|
||||
name = param;
|
||||
param = strchr(name, '=');
|
||||
if (!param)
|
||||
break;
|
||||
|
||||
*param = '\0';
|
||||
param++;
|
||||
if (param >= end)
|
||||
break;
|
||||
|
||||
if (strcmp(dev_name(dev), name) != 0) {
|
||||
param = strchr(param, ',');
|
||||
if (param)
|
||||
param++;
|
||||
continue;
|
||||
}
|
||||
|
||||
mask_s = param;
|
||||
param = strchr(mask_s, ',');
|
||||
if (param) {
|
||||
*param = '\0';
|
||||
param++;
|
||||
}
|
||||
|
||||
ahci_apply_port_map_mask(dev, hpriv, mask_s);
|
||||
}
|
||||
|
||||
free:
|
||||
kfree(str);
|
||||
}
|
||||
|
||||
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
@ -688,6 +769,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
||||
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
|
||||
}
|
||||
|
||||
/* Handle port map masks passed as module parameter. */
|
||||
if (ahci_mask_port_map)
|
||||
ahci_get_port_map_mask(&pdev->dev, hpriv);
|
||||
|
||||
ahci_save_initial_config(&pdev->dev, hpriv);
|
||||
}
|
||||
|
||||
|
@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
|
||||
bool cdl_enabled;
|
||||
u64 val;
|
||||
|
||||
if (ata_id_major_version(dev->id) < 12)
|
||||
if (ata_id_major_version(dev->id) < 11)
|
||||
goto not_supported;
|
||||
|
||||
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
|
||||
|
@ -4745,7 +4745,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
* bail out.
|
||||
*/
|
||||
if (ap->pflags & ATA_PFLAG_SUSPENDED)
|
||||
goto unlock;
|
||||
goto unlock_ap;
|
||||
|
||||
if (!sdev)
|
||||
continue;
|
||||
@ -4758,7 +4758,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
if (do_resume) {
|
||||
ret = scsi_resume_device(sdev);
|
||||
if (ret == -EWOULDBLOCK)
|
||||
goto unlock;
|
||||
goto unlock_scan;
|
||||
dev->flags &= ~ATA_DFLAG_RESUMING;
|
||||
}
|
||||
ret = scsi_rescan_device(sdev);
|
||||
@ -4766,12 +4766,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
if (ret)
|
||||
goto unlock;
|
||||
goto unlock_ap;
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
unlock_ap:
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
unlock_scan:
|
||||
mutex_unlock(&ap->scsi_scan_mutex);
|
||||
|
||||
/* Reschedule with a delay if scsi_rescan_device() returned an error */
|
||||
|
72
drivers/cache/sifive_ccache.c
vendored
72
drivers/cache/sifive_ccache.c
vendored
@ -15,6 +15,8 @@
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/property.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cacheinfo.h>
|
||||
#include <asm/dma-noncoherent.h>
|
||||
@ -247,13 +249,49 @@ static irqreturn_t ccache_int_handler(int irq, void *device)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int sifive_ccache_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
unsigned long quirks;
|
||||
int intr_num, rc;
|
||||
|
||||
quirks = (unsigned long)device_get_match_data(dev);
|
||||
|
||||
intr_num = platform_irq_count(pdev);
|
||||
if (!intr_num)
|
||||
return dev_err_probe(dev, -ENODEV, "No interrupts property\n");
|
||||
|
||||
for (int i = 0; i < intr_num; i++) {
|
||||
if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
|
||||
continue;
|
||||
|
||||
g_irq[i] = platform_get_irq(pdev, i);
|
||||
if (g_irq[i] < 0)
|
||||
return g_irq[i];
|
||||
|
||||
rc = devm_request_irq(dev, g_irq[i], ccache_int_handler, 0, "ccache_ecc", NULL);
|
||||
if (rc)
|
||||
return dev_err_probe(dev, rc, "Could not request IRQ %d\n", g_irq[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sifive_ccache_driver = {
|
||||
.probe = sifive_ccache_probe,
|
||||
.driver = {
|
||||
.name = "sifive_ccache",
|
||||
.of_match_table = sifive_ccache_ids,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init sifive_ccache_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct resource res;
|
||||
int i, rc, intr_num;
|
||||
const struct of_device_id *match;
|
||||
unsigned long quirks;
|
||||
int rc;
|
||||
|
||||
np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
|
||||
if (!np)
|
||||
@ -277,28 +315,6 @@ static int __init sifive_ccache_init(void)
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
intr_num = of_property_count_u32_elems(np, "interrupts");
|
||||
if (!intr_num) {
|
||||
pr_err("No interrupts property\n");
|
||||
rc = -ENODEV;
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
for (i = 0; i < intr_num; i++) {
|
||||
g_irq[i] = irq_of_parse_and_map(np, i);
|
||||
|
||||
if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
|
||||
continue;
|
||||
|
||||
rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
|
||||
NULL);
|
||||
if (rc) {
|
||||
pr_err("Could not request IRQ %d\n", g_irq[i]);
|
||||
goto err_free_irq;
|
||||
}
|
||||
}
|
||||
of_node_put(np);
|
||||
|
||||
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
|
||||
if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
|
||||
riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
|
||||
@ -315,11 +331,15 @@ static int __init sifive_ccache_init(void)
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
setup_sifive_debug();
|
||||
#endif
|
||||
|
||||
rc = platform_driver_register(&sifive_ccache_driver);
|
||||
if (rc)
|
||||
goto err_unmap;
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
while (--i >= 0)
|
||||
free_irq(g_irq[i], NULL);
|
||||
err_unmap:
|
||||
iounmap(ccache_base);
|
||||
err_node_put:
|
||||
|
@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
|
||||
|
||||
static void __cold _credit_init_bits(size_t bits)
|
||||
{
|
||||
static struct execute_work set_ready;
|
||||
static DECLARE_WORK(set_ready, crng_set_ready);
|
||||
unsigned int new, orig, add;
|
||||
unsigned long flags;
|
||||
|
||||
@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
|
||||
|
||||
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
|
||||
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
|
||||
if (static_key_initialized)
|
||||
execute_in_process_context(crng_set_ready, &set_ready);
|
||||
if (static_key_initialized && system_unbound_wq)
|
||||
queue_work(system_unbound_wq, &set_ready);
|
||||
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
@ -890,8 +890,8 @@ void __init random_init(void)
|
||||
|
||||
/*
|
||||
* If we were initialized by the cpu or bootloader before jump labels
|
||||
* are initialized, then we should enable the static branch here, where
|
||||
* it's guaranteed that jump labels have been initialized.
|
||||
* or workqueues are initialized, then we should enable the static
|
||||
* branch here, where it's guaranteed that these have been initialized.
|
||||
*/
|
||||
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
|
||||
crng_set_ready(NULL);
|
||||
|
@ -525,22 +525,11 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
|
||||
{
|
||||
struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
|
||||
u32 uid;
|
||||
int rc;
|
||||
|
||||
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
|
||||
return -EINVAL;
|
||||
|
||||
rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Adjust back to picoseconds from nanoseconds */
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
|
||||
dport->hb_coord[i].read_latency *= 1000;
|
||||
dport->hb_coord[i].write_latency *= 1000;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return acpi_get_genport_coordinates(uid, dport->coord);
|
||||
}
|
||||
|
||||
static int add_host_bridge_dport(struct device *match, void *arg)
|
||||
|
@ -14,12 +14,42 @@
|
||||
struct dsmas_entry {
|
||||
struct range dpa_range;
|
||||
u8 handle;
|
||||
struct access_coordinate coord;
|
||||
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
|
||||
|
||||
int entries;
|
||||
int qos_class;
|
||||
};
|
||||
|
||||
static u32 cdat_normalize(u16 entry, u64 base, u8 type)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
/*
|
||||
* Check for invalid and overflow values
|
||||
*/
|
||||
if (entry == 0xffff || !entry)
|
||||
return 0;
|
||||
else if (base > (UINT_MAX / (entry)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* CDAT fields follow the format of HMAT fields. See table 5 Device
|
||||
* Scoped Latency and Bandwidth Information Structure in Coherent Device
|
||||
* Attribute Table (CDAT) Specification v1.01.
|
||||
*/
|
||||
value = entry * base;
|
||||
switch (type) {
|
||||
case ACPI_HMAT_ACCESS_LATENCY:
|
||||
case ACPI_HMAT_READ_LATENCY:
|
||||
case ACPI_HMAT_WRITE_LATENCY:
|
||||
value = DIV_ROUND_UP(value, 1000);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
|
||||
const unsigned long end)
|
||||
{
|
||||
@ -58,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cxl_access_coordinate_set(struct access_coordinate *coord,
|
||||
int access, unsigned int val)
|
||||
static void __cxl_access_coordinate_set(struct access_coordinate *coord,
|
||||
int access, unsigned int val)
|
||||
{
|
||||
switch (access) {
|
||||
case ACPI_HMAT_ACCESS_LATENCY:
|
||||
@ -85,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord,
|
||||
}
|
||||
}
|
||||
|
||||
static void cxl_access_coordinate_set(struct access_coordinate *coord,
|
||||
int access, unsigned int val)
|
||||
{
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
|
||||
__cxl_access_coordinate_set(&coord[i], access, val);
|
||||
}
|
||||
|
||||
static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
|
||||
const unsigned long end)
|
||||
{
|
||||
@ -97,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
|
||||
__le16 le_val;
|
||||
u64 val;
|
||||
u16 len;
|
||||
int rc;
|
||||
|
||||
len = le16_to_cpu((__force __le16)hdr->length);
|
||||
if (len != size || (unsigned long)hdr + len > end) {
|
||||
@ -124,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
|
||||
|
||||
le_base = (__force __le64)dslbis->entry_base_unit;
|
||||
le_val = (__force __le16)dslbis->entry[0];
|
||||
rc = check_mul_overflow(le64_to_cpu(le_base),
|
||||
le16_to_cpu(le_val), &val);
|
||||
if (rc)
|
||||
pr_warn("DSLBIS value overflowed.\n");
|
||||
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
|
||||
dslbis->data_type);
|
||||
|
||||
cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
|
||||
cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -163,25 +197,18 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
|
||||
static int cxl_port_perf_data_calculate(struct cxl_port *port,
|
||||
struct xarray *dsmas_xa)
|
||||
{
|
||||
struct access_coordinate ep_c;
|
||||
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
|
||||
struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
|
||||
struct dsmas_entry *dent;
|
||||
int valid_entries = 0;
|
||||
unsigned long index;
|
||||
int rc;
|
||||
|
||||
rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
|
||||
rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
|
||||
if (rc) {
|
||||
dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cxl_hb_get_perf_coordinates(port, coord);
|
||||
if (rc) {
|
||||
dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
|
||||
|
||||
if (!cxl_root)
|
||||
@ -193,18 +220,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
|
||||
xa_for_each(dsmas_xa, index, dent) {
|
||||
int qos_class;
|
||||
|
||||
cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
|
||||
/*
|
||||
* Keeping the host bridge coordinates separate from the dsmas
|
||||
* coordinates in order to allow calculation of access class
|
||||
* 0 and 1 for region later.
|
||||
*/
|
||||
cxl_coordinates_combine(&coord[ACCESS_COORDINATE_CPU],
|
||||
&coord[ACCESS_COORDINATE_CPU],
|
||||
&dent->coord);
|
||||
cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
|
||||
dent->entries = 1;
|
||||
rc = cxl_root->ops->qos_class(cxl_root,
|
||||
&coord[ACCESS_COORDINATE_CPU],
|
||||
&dent->coord[ACCESS_COORDINATE_CPU],
|
||||
1, &qos_class);
|
||||
if (rc != 1)
|
||||
continue;
|
||||
@ -222,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
|
||||
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
|
||||
struct cxl_dpa_perf *dpa_perf)
|
||||
{
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
|
||||
dpa_perf->coord[i] = dent->coord[i];
|
||||
dpa_perf->dpa_range = dent->dpa_range;
|
||||
dpa_perf->coord = dent->coord;
|
||||
dpa_perf->qos_class = dent->qos_class;
|
||||
dev_dbg(dev,
|
||||
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
|
||||
dent->dpa_range.start, dpa_perf->qos_class,
|
||||
dent->coord.read_bandwidth, dent->coord.write_bandwidth,
|
||||
dent->coord.read_latency, dent->coord.write_latency);
|
||||
dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
|
||||
dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
|
||||
dent->coord[ACCESS_COORDINATE_CPU].read_latency,
|
||||
dent->coord[ACCESS_COORDINATE_CPU].write_latency);
|
||||
}
|
||||
|
||||
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
|
||||
@ -461,17 +483,16 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
|
||||
|
||||
le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
|
||||
le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
|
||||
|
||||
if (check_mul_overflow(le64_to_cpu(le_base),
|
||||
le16_to_cpu(le_val), &val))
|
||||
dev_warn(dev, "SSLBIS value overflowed!\n");
|
||||
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
|
||||
sslbis->data_type);
|
||||
|
||||
xa_for_each(&port->dports, index, dport) {
|
||||
if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
|
||||
dsp_id == dport->port_id)
|
||||
cxl_access_coordinate_set(&dport->sw_coord,
|
||||
dsp_id == dport->port_id) {
|
||||
cxl_access_coordinate_set(dport->coord,
|
||||
sslbis->data_type,
|
||||
val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -493,16 +514,9 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
|
||||
|
||||
/**
|
||||
* cxl_coordinates_combine - Combine the two input coordinates
|
||||
*
|
||||
* @out: Output coordinate of c1 and c2 combined
|
||||
* @c1: input coordinates
|
||||
* @c2: input coordinates
|
||||
*/
|
||||
void cxl_coordinates_combine(struct access_coordinate *out,
|
||||
struct access_coordinate *c1,
|
||||
struct access_coordinate *c2)
|
||||
static void __cxl_coordinates_combine(struct access_coordinate *out,
|
||||
struct access_coordinate *c1,
|
||||
struct access_coordinate *c2)
|
||||
{
|
||||
if (c1->write_bandwidth && c2->write_bandwidth)
|
||||
out->write_bandwidth = min(c1->write_bandwidth,
|
||||
@ -515,23 +529,34 @@ void cxl_coordinates_combine(struct access_coordinate *out,
|
||||
out->read_latency = c1->read_latency + c2->read_latency;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxl_coordinates_combine - Combine the two input coordinates
|
||||
*
|
||||
* @out: Output coordinate of c1 and c2 combined
|
||||
* @c1: input coordinates
|
||||
* @c2: input coordinates
|
||||
*/
|
||||
void cxl_coordinates_combine(struct access_coordinate *out,
|
||||
struct access_coordinate *c1,
|
||||
struct access_coordinate *c2)
|
||||
{
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
|
||||
__cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
|
||||
}
|
||||
|
||||
MODULE_IMPORT_NS(CXL);
|
||||
|
||||
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
|
||||
struct cxl_endpoint_decoder *cxled)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
|
||||
struct cxl_port *port = cxlmd->endpoint;
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
|
||||
struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
|
||||
struct access_coordinate coord;
|
||||
struct range dpa = {
|
||||
.start = cxled->dpa_res->start,
|
||||
.end = cxled->dpa_res->end,
|
||||
};
|
||||
struct cxl_dpa_perf *perf;
|
||||
int rc;
|
||||
|
||||
switch (cxlr->mode) {
|
||||
case CXL_DECODER_RAM:
|
||||
@ -549,35 +574,16 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
|
||||
if (!range_contains(&perf->dpa_range, &dpa))
|
||||
return;
|
||||
|
||||
rc = cxl_hb_get_perf_coordinates(port, hb_coord);
|
||||
if (rc) {
|
||||
dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
|
||||
/* Pickup the host bridge coords */
|
||||
cxl_coordinates_combine(&coord, &hb_coord[i], &perf->coord);
|
||||
|
||||
/* Get total bandwidth and the worst latency for the cxl region */
|
||||
cxlr->coord[i].read_latency = max_t(unsigned int,
|
||||
cxlr->coord[i].read_latency,
|
||||
coord.read_latency);
|
||||
perf->coord[i].read_latency);
|
||||
cxlr->coord[i].write_latency = max_t(unsigned int,
|
||||
cxlr->coord[i].write_latency,
|
||||
coord.write_latency);
|
||||
cxlr->coord[i].read_bandwidth += coord.read_bandwidth;
|
||||
cxlr->coord[i].write_bandwidth += coord.write_bandwidth;
|
||||
|
||||
/*
|
||||
* Convert latency to nanosec from picosec to be consistent
|
||||
* with the resulting latency coordinates computed by the
|
||||
* HMAT_REPORTING code.
|
||||
*/
|
||||
cxlr->coord[i].read_latency =
|
||||
DIV_ROUND_UP(cxlr->coord[i].read_latency, 1000);
|
||||
cxlr->coord[i].write_latency =
|
||||
DIV_ROUND_UP(cxlr->coord[i].write_latency, 1000);
|
||||
perf->coord[i].write_latency);
|
||||
cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
|
||||
cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -915,7 +915,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
|
||||
|
||||
payload->handles[i++] = gen->hdr.handle;
|
||||
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
|
||||
le16_to_cpu(payload->handles[i]));
|
||||
le16_to_cpu(payload->handles[i - 1]));
|
||||
|
||||
if (i == max_handles) {
|
||||
payload->nr_recs = i;
|
||||
@ -958,13 +958,14 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
|
||||
.payload_in = &log_type,
|
||||
.size_in = sizeof(log_type),
|
||||
.payload_out = payload,
|
||||
.size_out = mds->payload_size,
|
||||
.min_out = struct_size(payload, records, 0),
|
||||
};
|
||||
|
||||
do {
|
||||
int rc, i;
|
||||
|
||||
mbox_cmd.size_out = mds->payload_size;
|
||||
|
||||
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
|
||||
if (rc) {
|
||||
dev_err_ratelimited(dev,
|
||||
|
@ -2133,36 +2133,44 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
|
||||
|
||||
/**
|
||||
* cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
|
||||
* and host bridge
|
||||
*
|
||||
* @port: endpoint cxl_port
|
||||
* @coord: output access coordinates
|
||||
*
|
||||
* Return: errno on failure, 0 on success.
|
||||
*/
|
||||
int cxl_hb_get_perf_coordinates(struct cxl_port *port,
|
||||
struct access_coordinate *coord)
|
||||
static void add_latency(struct access_coordinate *c, long latency)
|
||||
{
|
||||
struct cxl_port *iter = port;
|
||||
struct cxl_dport *dport;
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
|
||||
c[i].write_latency += latency;
|
||||
c[i].read_latency += latency;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_cxl_endpoint(port))
|
||||
return -EINVAL;
|
||||
|
||||
dport = iter->parent_dport;
|
||||
while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
|
||||
iter = to_cxl_port(iter->dev.parent);
|
||||
dport = iter->parent_dport;
|
||||
static bool coordinates_valid(struct access_coordinate *c)
|
||||
{
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
|
||||
if (c[i].read_bandwidth && c[i].write_bandwidth &&
|
||||
c[i].read_latency && c[i].write_latency)
|
||||
continue;
|
||||
return false;
|
||||
}
|
||||
|
||||
coord[ACCESS_COORDINATE_LOCAL] =
|
||||
dport->hb_coord[ACCESS_COORDINATE_LOCAL];
|
||||
coord[ACCESS_COORDINATE_CPU] =
|
||||
dport->hb_coord[ACCESS_COORDINATE_CPU];
|
||||
return true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
|
||||
{
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
|
||||
c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
|
||||
c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_access_coordinates(struct access_coordinate *out,
|
||||
struct access_coordinate *in)
|
||||
{
|
||||
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
|
||||
out[i] = in[i];
|
||||
}
|
||||
|
||||
static bool parent_port_is_cxl_root(struct cxl_port *port)
|
||||
{
|
||||
return is_cxl_root(to_cxl_port(port->dev.parent));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2176,35 +2184,53 @@ int cxl_hb_get_perf_coordinates(struct cxl_port *port,
|
||||
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
|
||||
struct access_coordinate *coord)
|
||||
{
|
||||
struct access_coordinate c = {
|
||||
.read_bandwidth = UINT_MAX,
|
||||
.write_bandwidth = UINT_MAX,
|
||||
struct access_coordinate c[] = {
|
||||
{
|
||||
.read_bandwidth = UINT_MAX,
|
||||
.write_bandwidth = UINT_MAX,
|
||||
},
|
||||
{
|
||||
.read_bandwidth = UINT_MAX,
|
||||
.write_bandwidth = UINT_MAX,
|
||||
},
|
||||
};
|
||||
struct cxl_port *iter = port;
|
||||
struct cxl_dport *dport;
|
||||
struct pci_dev *pdev;
|
||||
unsigned int bw;
|
||||
bool is_cxl_root;
|
||||
|
||||
if (!is_cxl_endpoint(port))
|
||||
return -EINVAL;
|
||||
|
||||
dport = iter->parent_dport;
|
||||
|
||||
/*
|
||||
* Exit the loop when the parent port of the current port is cxl root.
|
||||
* The iterative loop starts at the endpoint and gathers the
|
||||
* latency of the CXL link from the current iter to the next downstream
|
||||
* port each iteration. If the parent is cxl root then there is
|
||||
* nothing to gather.
|
||||
* Exit the loop when the parent port of the current iter port is cxl
|
||||
* root. The iterative loop starts at the endpoint and gathers the
|
||||
* latency of the CXL link from the current device/port to the connected
|
||||
* downstream port each iteration.
|
||||
*/
|
||||
while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
|
||||
cxl_coordinates_combine(&c, &c, &dport->sw_coord);
|
||||
c.write_latency += dport->link_latency;
|
||||
c.read_latency += dport->link_latency;
|
||||
|
||||
iter = to_cxl_port(iter->dev.parent);
|
||||
do {
|
||||
dport = iter->parent_dport;
|
||||
}
|
||||
iter = to_cxl_port(iter->dev.parent);
|
||||
is_cxl_root = parent_port_is_cxl_root(iter);
|
||||
|
||||
/*
|
||||
* There's no valid access_coordinate for a root port since RPs do not
|
||||
* have CDAT and therefore needs to be skipped.
|
||||
*/
|
||||
if (!is_cxl_root) {
|
||||
if (!coordinates_valid(dport->coord))
|
||||
return -EINVAL;
|
||||
cxl_coordinates_combine(c, c, dport->coord);
|
||||
}
|
||||
add_latency(c, dport->link_latency);
|
||||
} while (!is_cxl_root);
|
||||
|
||||
dport = iter->parent_dport;
|
||||
/* Retrieve HB coords */
|
||||
if (!coordinates_valid(dport->coord))
|
||||
return -EINVAL;
|
||||
cxl_coordinates_combine(c, c, dport->coord);
|
||||
|
||||
/* Get the calculated PCI paths bandwidth */
|
||||
pdev = to_pci_dev(port->uport_dev->parent);
|
||||
@ -2213,10 +2239,8 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
|
||||
return -ENXIO;
|
||||
bw /= BITS_PER_BYTE;
|
||||
|
||||
c.write_bandwidth = min(c.write_bandwidth, bw);
|
||||
c.read_bandwidth = min(c.read_bandwidth, bw);
|
||||
|
||||
*coord = c;
|
||||
set_min_bandwidth(c, bw);
|
||||
set_access_coordinates(coord, c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
|
||||
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
|
||||
struct cxl_register_map *map)
|
||||
{
|
||||
u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
|
||||
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
|
||||
u64 offset = ((u64)reg_hi << 32) |
|
||||
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
|
||||
@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
|
||||
if (offset > pci_resource_len(pdev, bar)) {
|
||||
dev_warn(&pdev->dev,
|
||||
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
|
||||
&pdev->resource[bar], &offset, map->reg_type);
|
||||
&pdev->resource[bar], &offset, reg_type);
|
||||
return false;
|
||||
}
|
||||
|
||||
map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
|
||||
map->reg_type = reg_type;
|
||||
map->resource = pci_resource_start(pdev, bar) + offset;
|
||||
map->max_size = pci_resource_len(pdev, bar) - offset;
|
||||
return true;
|
||||
|
@ -663,8 +663,7 @@ struct cxl_rcrb_info {
|
||||
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
|
||||
* @port: reference to cxl_port that contains this downstream port
|
||||
* @regs: Dport parsed register blocks
|
||||
* @sw_coord: access coordinates (performance) for switch from CDAT
|
||||
* @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
|
||||
* @coord: access coordinates (bandwidth and latency performance attributes)
|
||||
* @link_latency: calculated PCIe downstream latency
|
||||
*/
|
||||
struct cxl_dport {
|
||||
@ -675,8 +674,7 @@ struct cxl_dport {
|
||||
bool rch;
|
||||
struct cxl_port *port;
|
||||
struct cxl_regs regs;
|
||||
struct access_coordinate sw_coord;
|
||||
struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
|
||||
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
|
||||
long link_latency;
|
||||
};
|
||||
|
||||
@ -884,8 +882,6 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
|
||||
|
||||
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
|
||||
struct access_coordinate *coord);
|
||||
int cxl_hb_get_perf_coordinates(struct cxl_port *port,
|
||||
struct access_coordinate *coord);
|
||||
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
|
||||
struct cxl_endpoint_decoder *cxled);
|
||||
|
||||
|
@ -401,7 +401,7 @@ enum cxl_devtype {
|
||||
*/
|
||||
struct cxl_dpa_perf {
|
||||
struct range dpa_range;
|
||||
struct access_coordinate coord;
|
||||
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
|
||||
int qos_class;
|
||||
};
|
||||
|
||||
|
@ -790,7 +790,7 @@ static void ffa_notification_info_get(void)
|
||||
|
||||
part_id = packed_id_list[ids_processed++];
|
||||
|
||||
if (!ids_count[list]) { /* Global Notification */
|
||||
if (ids_count[list] == 1) { /* Global Notification */
|
||||
__do_sched_recv_cb(part_id, 0, false);
|
||||
continue;
|
||||
}
|
||||
|
@ -736,7 +736,7 @@ static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
|
||||
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
|
||||
POWERCAP_PAI_GET, 4, domain,
|
||||
&fc[POWERCAP_FC_PAI].get_addr, NULL,
|
||||
&fc[POWERCAP_PAI_GET].rate_limit);
|
||||
&fc[POWERCAP_FC_PAI].rate_limit);
|
||||
|
||||
*p_fc = fc;
|
||||
}
|
||||
|
@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
|
||||
rd->raw = raw;
|
||||
filp->private_data = rd;
|
||||
|
||||
return 0;
|
||||
return nonseekable_open(inode, filp);
|
||||
}
|
||||
|
||||
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
|
||||
@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
|
||||
.open = scmi_dbg_raw_mode_open,
|
||||
.release = scmi_dbg_raw_mode_release,
|
||||
.write = scmi_dbg_raw_mode_reset_write,
|
||||
.llseek = no_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
|
||||
.read = scmi_dbg_raw_mode_message_read,
|
||||
.write = scmi_dbg_raw_mode_message_write,
|
||||
.poll = scmi_dbg_raw_mode_message_poll,
|
||||
.llseek = no_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
|
||||
.read = scmi_dbg_raw_mode_message_read,
|
||||
.write = scmi_dbg_raw_mode_message_async_write,
|
||||
.poll = scmi_dbg_raw_mode_message_poll,
|
||||
.llseek = no_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
|
||||
.release = scmi_dbg_raw_mode_release,
|
||||
.read = scmi_test_dbg_raw_mode_notif_read,
|
||||
.poll = scmi_test_dbg_raw_mode_notif_poll,
|
||||
.llseek = no_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
|
||||
.release = scmi_dbg_raw_mode_release,
|
||||
.read = scmi_test_dbg_raw_mode_errors_read,
|
||||
.poll = scmi_test_dbg_raw_mode_errors_poll,
|
||||
.llseek = no_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
|
||||
case 0x5e:
|
||||
return GPIOPANELCTL;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
|
||||
{ .compatible = "nxp,lpc3220-gpio", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
|
||||
|
||||
static struct platform_driver lpc32xx_gpio_driver = {
|
||||
.driver = {
|
||||
|
@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
|
||||
unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
|
||||
|
||||
if (gpio >= WCOVE_GPIO_NUM)
|
||||
return -EOPNOTSUPP;
|
||||
return -ENOTSUPP;
|
||||
|
||||
return reg + gpio;
|
||||
}
|
||||
|
@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring;
|
||||
extern int amdgpu_mcbp;
|
||||
extern int amdgpu_discovery;
|
||||
extern int amdgpu_mes;
|
||||
extern int amdgpu_mes_log_enable;
|
||||
extern int amdgpu_mes_kiq;
|
||||
extern int amdgpu_noretry;
|
||||
extern int amdgpu_force_asic_type;
|
||||
|
@ -4135,18 +4135,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
}
|
||||
}
|
||||
} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
|
||||
!amdgpu_device_has_display_hardware(adev)) {
|
||||
r = psp_gpu_reset(adev);
|
||||
} else {
|
||||
tmp = amdgpu_reset_method;
|
||||
/* It should do a default reset when loading or reloading the driver,
|
||||
* regardless of the module parameter reset_method.
|
||||
*/
|
||||
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_reset_method = tmp;
|
||||
if (r) {
|
||||
dev_err(adev->dev, "asic reset on init failed\n");
|
||||
goto failed;
|
||||
}
|
||||
tmp = amdgpu_reset_method;
|
||||
/* It should do a default reset when loading or reloading the driver,
|
||||
* regardless of the module parameter reset_method.
|
||||
*/
|
||||
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_reset_method = tmp;
|
||||
}
|
||||
|
||||
if (r) {
|
||||
dev_err(adev->dev, "asic reset on init failed\n");
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1896,6 +1896,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
|
||||
break;
|
||||
case IP_VERSION(14, 0, 0):
|
||||
case IP_VERSION(14, 0, 1):
|
||||
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
|
@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
|
||||
int amdgpu_mcbp = -1;
|
||||
int amdgpu_discovery = -1;
|
||||
int amdgpu_mes;
|
||||
int amdgpu_mes_log_enable = 0;
|
||||
int amdgpu_mes_kiq;
|
||||
int amdgpu_noretry = -1;
|
||||
int amdgpu_force_asic_type = -1;
|
||||
@ -667,6 +668,15 @@ MODULE_PARM_DESC(mes,
|
||||
"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
|
||||
module_param_named(mes, amdgpu_mes, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: mes_log_enable (int)
|
||||
* Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
|
||||
* (0 = disabled (default), 1 = enabled)
|
||||
*/
|
||||
MODULE_PARM_DESC(mes_log_enable,
|
||||
"Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
|
||||
module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: mes_kiq (int)
|
||||
* Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
|
||||
|
@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
dma_fence_set_error(finished, -ECANCELED);
|
||||
|
||||
if (finished->error < 0) {
|
||||
DRM_INFO("Skip scheduling IBs!\n");
|
||||
dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
|
||||
ring->name);
|
||||
} else {
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
|
||||
&fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
dev_err(adev->dev,
|
||||
"Error scheduling IBs (%d) in ring(%s)", r,
|
||||
ring->name);
|
||||
}
|
||||
|
||||
job->job_run_counter++;
|
||||
|
@ -102,7 +102,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
|
||||
if (!amdgpu_mes_log_enable)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->mes.event_log_gpu_obj,
|
||||
&adev->mes.event_log_gpu_addr,
|
||||
@ -1549,12 +1552,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
||||
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
|
||||
|
||||
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
|
||||
mem, PAGE_SIZE, false);
|
||||
mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
|
||||
|
||||
#endif
|
||||
@ -1565,7 +1567,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||
struct dentry *root = minor->debugfs_root;
|
||||
if (adev->enable_mes)
|
||||
if (adev->enable_mes && amdgpu_mes_log_enable)
|
||||
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
|
||||
adev, &amdgpu_debugfs_mes_event_log_fops);
|
||||
|
||||
|
@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
|
||||
|
||||
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
|
||||
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
|
||||
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
|
||||
|
||||
struct amdgpu_mes_funcs;
|
||||
|
||||
|
@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
|
||||
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
|
||||
}
|
||||
|
||||
static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
|
||||
{
|
||||
return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
|
||||
}
|
||||
|
||||
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
|
||||
uint32_t inst_idx, struct amdgpu_ring *ring)
|
||||
{
|
||||
@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
|
||||
case AMDGPU_RING_TYPE_VCN_ENC:
|
||||
case AMDGPU_RING_TYPE_VCN_JPEG:
|
||||
ip_blk = AMDGPU_XCP_VCN;
|
||||
if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
|
||||
if (aqua_vanjaram_xcp_vcn_shared(adev))
|
||||
inst_mask = 1 << (inst_idx * 2);
|
||||
break;
|
||||
default:
|
||||
@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
|
||||
|
||||
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
|
||||
|
||||
/* VCN is shared by two partitions under CPX MODE */
|
||||
/* VCN may be shared by two partitions under CPX MODE in certain
|
||||
* configs.
|
||||
*/
|
||||
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
|
||||
adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
|
||||
aqua_vanjaram_xcp_vcn_shared(adev))
|
||||
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
|
||||
}
|
||||
|
||||
|
@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
|
||||
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
|
||||
}
|
||||
|
||||
active_rb_bitmap |= global_active_rb_bitmap;
|
||||
active_rb_bitmap &= global_active_rb_bitmap;
|
||||
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
|
||||
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
|
||||
}
|
||||
@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
/* Make sure that we can't skip the SET_Q_MODE packets when the VM
|
||||
* changed in any way.
|
||||
*/
|
||||
ring->set_q_mode_offs = 0;
|
||||
ring->set_q_mode_ptr = NULL;
|
||||
}
|
||||
|
||||
|
@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
|
||||
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
|
||||
mes_set_hw_res_pkt.oversubscription_timer = 50;
|
||||
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
|
||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
|
||||
if (amdgpu_mes_log_enable) {
|
||||
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
|
||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
|
||||
mes->event_log_gpu_addr;
|
||||
}
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||
|
@ -1602,19 +1602,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
|
||||
u32 sdma_cntl;
|
||||
|
||||
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
|
||||
DRAM_ECC_INT_ENABLE, 0);
|
||||
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
|
||||
break;
|
||||
/* sdma ecc interrupt is enabled by default
|
||||
* driver doesn't need to do anything to
|
||||
* enable the interrupt */
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_JPEG |
|
||||
AMD_PG_SUPPORT_GFX_PG;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
if (adev->rev_id == 0)
|
||||
adev->external_rev_id = 0x1;
|
||||
else
|
||||
adev->external_rev_id = adev->rev_id + 0x10;
|
||||
break;
|
||||
case IP_VERSION(11, 5, 1):
|
||||
adev->cg_flags =
|
||||
@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
|
||||
return soc21_common_hw_fini(adev);
|
||||
}
|
||||
|
||||
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 sol_reg1, sol_reg2;
|
||||
|
||||
/* Will reset for the following suspend abort cases.
|
||||
* 1) Only reset dGPU side.
|
||||
* 2) S3 suspend got aborted and TOS is active.
|
||||
*/
|
||||
if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
|
||||
!adev->suspend_complete) {
|
||||
sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
|
||||
msleep(100);
|
||||
sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
|
||||
|
||||
return (sol_reg1 != sol_reg2);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int soc21_common_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (soc21_need_reset_on_resume(adev)) {
|
||||
dev_info(adev->dev, "S3 suspend aborted, resetting...");
|
||||
soc21_asic_reset(adev);
|
||||
}
|
||||
|
||||
return soc21_common_hw_init(adev);
|
||||
}
|
||||
|
||||
|
@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
|
||||
|
||||
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
|
||||
|
||||
ring->wptr = 0;
|
||||
|
||||
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
|
||||
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
|
||||
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
|
||||
|
@ -779,8 +779,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
|
||||
* nodes, but not more than args->num_of_nodes as that is
|
||||
* the amount of memory allocated by user
|
||||
*/
|
||||
pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
|
||||
args->num_of_nodes), GFP_KERNEL);
|
||||
pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
|
||||
GFP_KERNEL);
|
||||
if (!pa)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
struct kfd_node *node;
|
||||
int i;
|
||||
int count;
|
||||
|
||||
if (!kfd->init_complete)
|
||||
return;
|
||||
@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
|
||||
/* for runtime suspend, skip locking kfd */
|
||||
if (!run_pm) {
|
||||
mutex_lock(&kfd_processes_mutex);
|
||||
count = ++kfd_locked;
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
|
||||
/* For first KFD device suspend all the KFD processes */
|
||||
if (count == 1)
|
||||
if (++kfd_locked == 1)
|
||||
kfd_suspend_all_processes();
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
}
|
||||
|
||||
for (i = 0; i < kfd->num_nodes; i++) {
|
||||
@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
|
||||
|
||||
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
int ret, count, i;
|
||||
int ret, i;
|
||||
|
||||
if (!kfd->init_complete)
|
||||
return 0;
|
||||
@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
/* for runtime resume, skip unlocking kfd */
|
||||
if (!run_pm) {
|
||||
mutex_lock(&kfd_processes_mutex);
|
||||
count = --kfd_locked;
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
|
||||
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
|
||||
if (count == 0)
|
||||
if (--kfd_locked == 0)
|
||||
ret = kfd_resume_all_processes();
|
||||
WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2001,6 +2001,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||
dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
|
||||
while (halt_if_hws_hang)
|
||||
schedule();
|
||||
kfd_hws_hang(dqm);
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
|
@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
|
||||
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
|
||||
|
||||
#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
|
||||
|
||||
/* Number of bytes in PSP header for firmware. */
|
||||
#define PSP_HEADER_BYTES 0x100
|
||||
|
||||
@ -3044,6 +3047,10 @@ static int dm_resume(void *handle)
|
||||
/* Do mst topology probing after resuming cached state*/
|
||||
drm_connector_list_iter_begin(ddev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (aconnector->dc_link->type != dc_connection_mst_branch ||
|
||||
aconnector->mst_root)
|
||||
@ -4820,9 +4827,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
|
||||
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
|
||||
break;
|
||||
case IP_VERSION(3, 5, 0):
|
||||
case IP_VERSION(3, 5, 1):
|
||||
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
|
||||
break;
|
||||
case IP_VERSION(3, 5, 1):
|
||||
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
|
||||
break;
|
||||
default:
|
||||
/* ASIC doesn't support DMUB. */
|
||||
return 0;
|
||||
@ -5921,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
&aconnector->base.probed_modes :
|
||||
&aconnector->base.modes;
|
||||
|
||||
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
return NULL;
|
||||
|
||||
if (aconnector->freesync_vid_base.clock != 0)
|
||||
return &aconnector->freesync_vid_base;
|
||||
|
||||
@ -6306,19 +6318,16 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||
stream->signal == SIGNAL_TYPE_EDP) {
|
||||
//
|
||||
// should decide stream support vsc sdp colorimetry capability
|
||||
// before building vsc info packet
|
||||
//
|
||||
stream->use_vsc_sdp_for_colorimetry = false;
|
||||
if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
stream->use_vsc_sdp_for_colorimetry =
|
||||
aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
|
||||
} else {
|
||||
if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
|
||||
stream->use_vsc_sdp_for_colorimetry = true;
|
||||
}
|
||||
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
|
||||
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
|
||||
|
||||
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
|
||||
@ -8762,10 +8771,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
continue;
|
||||
|
||||
notify:
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
notify:
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
mutex_lock(&adev->dm.audio_lock);
|
||||
|
@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
|
||||
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
return drm_add_modes_noedid(connector, dev->mode_config.max_width,
|
||||
dev->mode_config.max_height);
|
||||
/* Maximum resolution supported by DWB */
|
||||
return drm_add_modes_noedid(connector, 3840, 2160);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
|
||||
|
@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
|
||||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
|
||||
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
|
||||
bool safe_to_lower, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *pipe = safe_to_lower
|
||||
? &context->res_ctx.pipe_ctx[i]
|
||||
: &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
|
||||
dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
|
||||
!pipe->stream->link_enc)) {
|
||||
if (disable) {
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn316_disable_otg_wa(clk_mgr_base, context, true);
|
||||
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn316_disable_otg_wa(clk_mgr_base, context, false);
|
||||
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
@ -73,6 +73,12 @@
|
||||
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
|
||||
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
|
||||
|
||||
#define regCLK5_0_CLK5_spll_field_8 0x464b
|
||||
#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
|
||||
|
||||
#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
|
||||
#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
|
||||
|
||||
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
|
||||
|
||||
#define REG(reg_name) \
|
||||
@ -411,6 +417,17 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
|
||||
{
|
||||
}
|
||||
|
||||
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct dc_context *ctx = clk_mgr->base.ctx;
|
||||
uint32_t ssc_enable;
|
||||
|
||||
REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
|
||||
|
||||
return ssc_enable == 1;
|
||||
}
|
||||
|
||||
static void init_clk_states(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
|
||||
@ -428,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
|
||||
|
||||
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
|
||||
init_clk_states(clk_mgr);
|
||||
|
||||
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
|
||||
if (dcn35_is_spll_ssc_enabled(clk_mgr))
|
||||
clk_mgr->dp_dto_source_clock_in_khz =
|
||||
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
|
||||
else
|
||||
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
|
||||
|
||||
}
|
||||
static struct clk_bw_params dcn35_bw_params = {
|
||||
.vram_type = Ddr4MemType,
|
||||
@ -517,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
|
||||
|
||||
static struct dcn35_watermarks dummy_wms = { 0 };
|
||||
|
||||
static struct dcn35_ss_info_table ss_info_table = {
|
||||
.ss_divider = 1000,
|
||||
.ss_percentage = {0, 0, 375, 375, 375}
|
||||
};
|
||||
|
||||
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
struct dc_context *ctx = clk_mgr->base.ctx;
|
||||
uint32_t clock_source;
|
||||
|
||||
REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
|
||||
// If it's DFS mode, clock_source is 0.
|
||||
if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
|
||||
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
|
||||
|
||||
if (clk_mgr->dprefclk_ss_percentage != 0) {
|
||||
clk_mgr->ss_on_dprefclk = true;
|
||||
clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
|
||||
{
|
||||
int i, num_valid_sets;
|
||||
@ -1061,6 +1109,8 @@ void dcn35_clk_mgr_construct(
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
|
||||
|
||||
dcn35_read_ss_info_from_lut(&clk_mgr->base);
|
||||
|
||||
clk_mgr->base.base.bw_params = &dcn35_bw_params;
|
||||
|
||||
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
|
||||
|
@ -436,6 +436,15 @@ bool dc_state_add_plane(
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
|
||||
/* ODM combine could prevent us from supporting more planes
|
||||
* we will reset ODM slice count back to 1 when all planes have
|
||||
* been removed to maximize the amount of planes supported when
|
||||
* new planes are added.
|
||||
*/
|
||||
resource_update_pipes_for_stream_with_slice_count(
|
||||
state, dc->current_state, dc->res_pool, stream, 1);
|
||||
|
||||
otg_master_pipe = resource_get_otg_master_for_stream(
|
||||
&state->res_ctx, stream);
|
||||
if (otg_master_pipe)
|
||||
|
@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
|
||||
struct bp_pixel_clock_parameters bp_pc_params = {0};
|
||||
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
|
||||
|
||||
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
|
||||
// Apply ssed(spread spectrum) dpref clock for edp only.
|
||||
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
|
||||
&& pix_clk_params->signal_type == SIGNAL_TYPE_EDP
|
||||
&& encoding == DP_8b_10b_ENCODING)
|
||||
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
|
||||
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
|
||||
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
|
||||
@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
|
||||
unsigned int modulo_hz = 0;
|
||||
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
|
||||
|
||||
if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
|
||||
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
|
||||
|
||||
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
|
||||
clock_hz = REG_READ(PHASE[inst]);
|
||||
|
||||
|
@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
|
||||
OTG_V_TOTAL_MAX_SEL, 1,
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
|
||||
|
||||
// Setup manual flow control for EOF via TRIG_A
|
||||
optc->funcs->setup_manual_trigger(optc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -735,7 +735,7 @@ static int smu_early_init(void *handle)
|
||||
smu->adev = adev;
|
||||
smu->pm_enabled = !!amdgpu_dpm;
|
||||
smu->is_apu = false;
|
||||
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
|
||||
smu->smu_baco.state = SMU_BACO_STATE_NONE;
|
||||
smu->smu_baco.platform_support = false;
|
||||
smu->user_dpm_profile.fan_mode = -1;
|
||||
|
||||
@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_reset_mp1_state(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if ((!adev->in_runpm) && (!adev->in_suspend) &&
|
||||
(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
|
||||
IP_VERSION(13, 0, 10) &&
|
||||
!amdgpu_device_has_display_hardware(adev))
|
||||
ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct smu_context *smu = adev->powerplay.pp_handle;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle)
|
||||
|
||||
adev->pm.dpm_enabled = false;
|
||||
|
||||
return smu_smc_hw_cleanup(smu);
|
||||
ret = smu_smc_hw_cleanup(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_reset_mp1_state(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void smu_late_fini(void *handle)
|
||||
|
@ -424,6 +424,7 @@ enum smu_reset_mode {
|
||||
enum smu_baco_state {
|
||||
SMU_BACO_STATE_ENTER = 0,
|
||||
SMU_BACO_STATE_EXIT,
|
||||
SMU_BACO_STATE_NONE,
|
||||
};
|
||||
|
||||
struct smu_baco_context {
|
||||
|
@ -144,6 +144,37 @@ typedef struct {
|
||||
uint32_t MaxGfxClk;
|
||||
} DpmClocks_t;
|
||||
|
||||
//Freq in MHz
|
||||
//Voltage in milli volts with 2 fractional bits
|
||||
typedef struct {
|
||||
uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
|
||||
uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
|
||||
uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
|
||||
uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
|
||||
uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
|
||||
uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
|
||||
uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
|
||||
uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
|
||||
MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
|
||||
|
||||
uint8_t NumDcfClkLevelsEnabled;
|
||||
uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
|
||||
uint8_t NumSocClkLevelsEnabled;
|
||||
uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0
|
||||
uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1
|
||||
uint8_t VpeClkLevelsEnabled;
|
||||
uint8_t NumMemPstatesEnabled;
|
||||
uint8_t NumFclkLevelsEnabled;
|
||||
uint8_t spare;
|
||||
|
||||
uint32_t MinGfxClk;
|
||||
uint32_t MaxGfxClk;
|
||||
} DpmClocks_t_v14_0_1;
|
||||
|
||||
typedef struct {
|
||||
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
|
||||
uint16_t CorePower[16]; //CAC calculated core power [mW]
|
||||
@ -224,7 +255,7 @@ typedef enum {
|
||||
#define TABLE_CUSTOM_DPM 2 // Called by Driver
|
||||
#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
|
||||
#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
|
||||
#define TABLE_SPARE0 5 // Unused
|
||||
#define TABLE_MOMENTARY_PM 5 // Called by Tools
|
||||
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
|
||||
#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
|
||||
#define TABLE_COUNT 8
|
||||
|
@ -42,7 +42,7 @@
|
||||
#define FEATURE_EDC_BIT 7
|
||||
#define FEATURE_PLL_POWER_DOWN_BIT 8
|
||||
#define FEATURE_VDDOFF_BIT 9
|
||||
#define FEATURE_VCN_DPM_BIT 10
|
||||
#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */
|
||||
#define FEATURE_DS_MPM_BIT 11
|
||||
#define FEATURE_FCLK_DPM_BIT 12
|
||||
#define FEATURE_SOCCLK_DPM_BIT 13
|
||||
@ -56,9 +56,9 @@
|
||||
#define FEATURE_DS_GFXCLK_BIT 21
|
||||
#define FEATURE_DS_SOCCLK_BIT 22
|
||||
#define FEATURE_DS_LCLK_BIT 23
|
||||
#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
|
||||
#define FEATURE_LOW_POWER_DCNCLKS_BIT 24
|
||||
#define FEATURE_DS_SHUBCLK_BIT 25
|
||||
#define FEATURE_SPARE0_BIT 26 //SPARE
|
||||
#define FEATURE_RESERVED0_BIT 26
|
||||
#define FEATURE_ZSTATES_BIT 27
|
||||
#define FEATURE_IOMMUL2_PG_BIT 28
|
||||
#define FEATURE_DS_FCLK_BIT 29
|
||||
@ -66,8 +66,8 @@
|
||||
#define FEATURE_DS_MP1CLK_BIT 31
|
||||
#define FEATURE_WHISPER_MODE_BIT 32
|
||||
#define FEATURE_SMU_LOW_POWER_BIT 33
|
||||
#define FEATURE_SMART_L3_RINSER_BIT 34
|
||||
#define FEATURE_SPARE1_BIT 35 //SPARE
|
||||
#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
|
||||
#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
|
||||
#define FEATURE_PSI_BIT 36
|
||||
#define FEATURE_PROCHOT_BIT 37
|
||||
#define FEATURE_CPUOFF_BIT 38
|
||||
@ -77,11 +77,11 @@
|
||||
#define FEATURE_PERF_LIMIT_BIT 42
|
||||
#define FEATURE_CORE_DLDO_BIT 43
|
||||
#define FEATURE_DVO_BIT 44
|
||||
#define FEATURE_DS_VCN_BIT 45
|
||||
#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */
|
||||
#define FEATURE_CPPC_BIT 46
|
||||
#define FEATURE_CPPC_PREFERRED_CORES 47
|
||||
#define FEATURE_DF_CSTATES_BIT 48
|
||||
#define FEATURE_SPARE2_BIT 49 //SPARE
|
||||
#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */
|
||||
#define FEATURE_ATHUB_PG_BIT 50
|
||||
#define FEATURE_VDDOFF_ECO_BIT 51
|
||||
#define FEATURE_ZSTATES_ECO_BIT 52
|
||||
@ -93,8 +93,8 @@
|
||||
#define FEATURE_DS_IPUCLK_BIT 58
|
||||
#define FEATURE_DS_VPECLK_BIT 59
|
||||
#define FEATURE_VPE_DPM_BIT 60
|
||||
#define FEATURE_SPARE_61 61
|
||||
#define FEATURE_FP_DIDT 62
|
||||
#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/
|
||||
#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
|
||||
#define NUM_FEATURES 63
|
||||
|
||||
// Firmware Header/Footer
|
||||
@ -151,6 +151,43 @@ typedef struct {
|
||||
// MP1_EXT_SCRATCH7 = RTOS Current Job
|
||||
} FwStatus_t;
|
||||
|
||||
typedef struct {
|
||||
// MP1_EXT_SCRATCH0
|
||||
uint32_t DpmHandlerID : 8;
|
||||
uint32_t ActivityMonitorID : 8;
|
||||
uint32_t DpmTimerID : 8;
|
||||
uint32_t DpmHubID : 4;
|
||||
uint32_t DpmHubTask : 4;
|
||||
// MP1_EXT_SCRATCH1
|
||||
uint32_t CclkSyncStatus : 8;
|
||||
uint32_t ZstateStatus : 4;
|
||||
uint32_t Cpu1VddOff : 4;
|
||||
uint32_t DstateFun : 4;
|
||||
uint32_t DstateDev : 4;
|
||||
uint32_t GfxOffStatus : 2;
|
||||
uint32_t Cpu0Off : 2;
|
||||
uint32_t Cpu1Off : 2;
|
||||
uint32_t Cpu0VddOff : 2;
|
||||
// MP1_EXT_SCRATCH2
|
||||
uint32_t P2JobHandler :32;
|
||||
// MP1_EXT_SCRATCH3
|
||||
uint32_t PostCode :32;
|
||||
// MP1_EXT_SCRATCH4
|
||||
uint32_t MsgPortBusy :15;
|
||||
uint32_t RsmuPmiP1Pending : 1;
|
||||
uint32_t RsmuPmiP2PendingCnt : 8;
|
||||
uint32_t DfCstateExitPending : 1;
|
||||
uint32_t Pc6EntryPending : 1;
|
||||
uint32_t Pc6ExitPending : 1;
|
||||
uint32_t WarmResetPending : 1;
|
||||
uint32_t Mp0ClkPending : 1;
|
||||
uint32_t InWhisperMode : 1;
|
||||
uint32_t spare2 : 2;
|
||||
// MP1_EXT_SCRATCH5
|
||||
uint32_t IdleMask :32;
|
||||
// MP1_EXT_SCRATCH6 = RTOS threads' status
|
||||
// MP1_EXT_SCRATCH7 = RTOS Current Job
|
||||
} FwStatus_t_v14_0_1;
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
|
@ -72,23 +72,19 @@
|
||||
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
|
||||
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
|
||||
#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
|
||||
|
||||
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
|
||||
|
||||
#define PPSMC_MSG_spare_0x17 0x17
|
||||
#define PPSMC_MSG_spare_0x18 0x18
|
||||
#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
|
||||
#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
|
||||
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
|
||||
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
|
||||
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
|
||||
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
|
||||
|
||||
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
|
||||
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
|
||||
#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
|
||||
#define PPSMC_MSG_spare_0x20 0x20
|
||||
#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
|
||||
#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
|
||||
#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
|
||||
|
||||
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
|
||||
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
|
||||
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
|
||||
@ -99,8 +95,8 @@
|
||||
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
|
||||
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
|
||||
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
|
||||
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
|
||||
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
|
||||
#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
|
||||
#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
|
||||
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
|
||||
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
|
||||
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
|
||||
@ -110,7 +106,9 @@
|
||||
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
|
||||
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
|
||||
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
|
||||
#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
|
||||
#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
|
||||
#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
|
||||
#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
|
||||
/** @}*/
|
||||
|
||||
/**
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
|
||||
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
|
||||
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
|
||||
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
|
||||
|
||||
#define FEATURE_MASK(feature) (1ULL << feature)
|
||||
|
@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
|
||||
|
||||
switch (mp1_state) {
|
||||
case PP_MP1_STATE_UNLOAD:
|
||||
ret = smu_cmn_set_mp1_state(smu, mp1_state);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_PrepareMp1ForUnload,
|
||||
0x55, NULL);
|
||||
|
||||
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
|
||||
ret = smu_v13_0_disable_pmfw_state(smu);
|
||||
|
||||
break;
|
||||
default:
|
||||
/* Ignore others */
|
||||
|
@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!en && !adev->in_s0ix)
|
||||
if (!en && !adev->in_s0ix) {
|
||||
/* Adds a GFX reset as workaround just before sending the
|
||||
* MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
|
||||
* an invalid state.
|
||||
*/
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
|
||||
SMU_RESET_MODE_2, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user