mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. No conflicts. Adjacent changes: drivers/net/ethernet/broadcom/bnxt/bnxt.hc948c0973d
("bnxt_en: Don't clear ntuple filters and rss contexts during ethtool ops")f2878cdeb7
("bnxt_en: Add support to call FW to update a VNIC") Link: https://patch.msgid.link/20240822210125.1542769-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
761d527d5d
@ -562,7 +562,8 @@ Description: Control Symmetric Multi Threading (SMT)
|
||||
================ =========================================
|
||||
|
||||
If control status is "forceoff" or "notsupported" writes
|
||||
are rejected.
|
||||
are rejected. Note that enabling SMT on PowerPC skips
|
||||
offline cores.
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/power/energy_perf_bias
|
||||
Date: March 2019
|
||||
|
@ -162,13 +162,14 @@ iv_large_sectors
|
||||
|
||||
|
||||
Module parameters::
|
||||
max_read_size
|
||||
max_write_size
|
||||
Maximum size of read or write requests. When a request larger than this size
|
||||
is received, dm-crypt will split the request. The splitting improves
|
||||
concurrency (the split requests could be encrypted in parallel by multiple
|
||||
cores), but it also causes overhead. The user should tune these parameters to
|
||||
fit the actual workload.
|
||||
|
||||
max_read_size
|
||||
max_write_size
|
||||
Maximum size of read or write requests. When a request larger than this size
|
||||
is received, dm-crypt will split the request. The splitting improves
|
||||
concurrency (the split requests could be encrypted in parallel by multiple
|
||||
cores), but it also causes overhead. The user should tune these parameters to
|
||||
fit the actual workload.
|
||||
|
||||
|
||||
Example scripts
|
||||
|
@ -239,25 +239,33 @@ The following keys are defined:
|
||||
ratified in commit 98918c844281 ("Merge pull request #1217 from
|
||||
riscv/zawrs") of riscv-isa-manual.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance
|
||||
information about the selected set of processors.
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: Deprecated. Returns similar values to
|
||||
:c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF`, but the key was
|
||||
mistakenly classified as a bitmask rather than a value.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_UNKNOWN`: The performance of misaligned
|
||||
accesses is unknown.
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF`: An enum value describing
|
||||
the performance of misaligned scalar native word accesses on the selected set
|
||||
of processors.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_EMULATED`: Misaligned accesses are
|
||||
emulated via software, either in or below the kernel. These accesses are
|
||||
always extremely slow.
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN`: The performance of
|
||||
misaligned scalar accesses is unknown.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower
|
||||
than equivalent byte accesses. Misaligned accesses may be supported
|
||||
directly in hardware, or trapped and emulated by software.
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED`: Misaligned scalar
|
||||
accesses are emulated via software, either in or below the kernel. These
|
||||
accesses are always extremely slow.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster
|
||||
than equivalent byte accesses.
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW`: Misaligned scalar native
|
||||
word sized accesses are slower than the equivalent quantity of byte
|
||||
accesses. Misaligned accesses may be supported directly in hardware, or
|
||||
trapped and emulated by software.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
|
||||
not supported at all and will generate a misaligned address fault.
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SCALAR_FAST`: Misaligned scalar native
|
||||
word sized accesses are faster than the equivalent quantity of byte
|
||||
accesses.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED`: Misaligned scalar
|
||||
accesses are not supported at all and will generate a misaligned address
|
||||
fault.
|
||||
|
||||
* :c:macro:`RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE`: An unsigned int which
|
||||
represents the size of the Zicboz block in bytes.
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Display Clock & Reset Controller on SM6350
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm display clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Global Clock & Reset Controller on MSM8994
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm global clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Global Clock & Reset Controller on SM6125
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm global clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Global Clock & Reset Controller on SM6350
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm global clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Graphics Clock & Reset Controller on SM6115
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm graphics clock control module provides clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Graphics Clock & Reset Controller on SM6125
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm graphics clock control module provides clocks and power domains on
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Camera Clock & Reset Controller on SM6350
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm camera clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Display Clock & Reset Controller on SM6375
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm display clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Global Clock & Reset Controller on SM6375
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm global clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Graphics Clock & Reset Controller on SM6375
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm graphics clock control module provides clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm SM8350 Video Clock & Reset Controller
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm video clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Graphics Clock & Reset Controller on SM8450
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm graphics clock control module provides the clocks, resets and power
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm SM6375 Display MDSS
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description:
|
||||
SM6375 MSM Mobile Display Subsystem (MDSS), which encapsulates sub-blocks
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: ASUS Z00T TM5P5 NT35596 5.5" 1080×1920 LCD Panel
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konradybcio@gmail.com>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |+
|
||||
This panel seems to only be found in the Asus Z00T
|
||||
|
@ -18,12 +18,12 @@ properties:
|
||||
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
|
||||
- const: samsung,atna33xc20
|
||||
- items:
|
||||
- enum:
|
||||
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
|
||||
- samsung,atna45af01
|
||||
# Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
|
||||
- samsung,atna45dc02
|
||||
- const: samsung,atna33xc20
|
||||
- enum:
|
||||
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
|
||||
- samsung,atna45af01
|
||||
# Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
|
||||
- samsung,atna45dc02
|
||||
- const: samsung,atna33xc20
|
||||
|
||||
enable-gpios: true
|
||||
port: true
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Sony TD4353 JDI 5 / 5.7" 2160x1080 MIPI-DSI Panel
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
The Sony TD4353 JDI is a 5 (XZ2c) / 5.7 (XZ2) inch 2160x1080
|
||||
|
@ -28,6 +28,7 @@ properties:
|
||||
- anvo,anv32e61w
|
||||
- atmel,at25256B
|
||||
- fujitsu,mb85rs1mt
|
||||
- fujitsu,mb85rs256
|
||||
- fujitsu,mb85rs64
|
||||
- microchip,at25160bn
|
||||
- microchip,25lc040
|
||||
|
@ -8,7 +8,7 @@ title: Qualcomm RPMh Network-On-Chip Interconnect on SC7280
|
||||
|
||||
maintainers:
|
||||
- Bjorn Andersson <andersson@kernel.org>
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
RPMh interconnect providers support system bandwidth requirements through
|
||||
|
@ -8,7 +8,7 @@ title: Qualcomm RPMh Network-On-Chip Interconnect on SC8280XP
|
||||
|
||||
maintainers:
|
||||
- Bjorn Andersson <andersson@kernel.org>
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
RPMh interconnect providers support system bandwidth requirements through
|
||||
|
@ -8,7 +8,7 @@ title: Qualcomm RPMh Network-On-Chip Interconnect on SM8450
|
||||
|
||||
maintainers:
|
||||
- Bjorn Andersson <andersson@kernel.org>
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
RPMh interconnect providers support system bandwidth requirements through
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Technologies legacy IOMMU implementations
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
Qualcomm "B" family devices which are not compatible with arm-smmu have
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Technologies, Inc. MDM9607 TLMM block
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description:
|
||||
Top Level Mode Multiplexer pin controller in Qualcomm MDM9607 SoC.
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Technologies, Inc. SM6350 TLMM block
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description:
|
||||
Top Level Mode Multiplexer pin controller in Qualcomm SM6350 SoC.
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Technologies, Inc. SM6375 TLMM block
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@somainline.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description:
|
||||
Top Level Mode Multiplexer pin controller in Qualcomm SM6375 SoC.
|
||||
|
@ -8,7 +8,7 @@ title: Qualcomm Resource Power Manager (RPM) Processor/Subsystem
|
||||
|
||||
maintainers:
|
||||
- Bjorn Andersson <andersson@kernel.org>
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
- Stephan Gerhold <stephan@gerhold.net>
|
||||
|
||||
description: |
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Qualcomm Technologies, Inc. (QTI) RPM Master Stats
|
||||
|
||||
maintainers:
|
||||
- Konrad Dybcio <konrad.dybcio@linaro.org>
|
||||
- Konrad Dybcio <konradybcio@kernel.org>
|
||||
|
||||
description: |
|
||||
The Qualcomm RPM (Resource Power Manager) architecture includes a concept
|
||||
|
@ -75,7 +75,7 @@ Here are the main features of EROFS:
|
||||
|
||||
- Support merging tail-end data into a special inode as fragments.
|
||||
|
||||
- Support large folios for uncompressed files.
|
||||
- Support large folios to make use of THPs (Transparent Hugepages);
|
||||
|
||||
- Support direct I/O on uncompressed files to avoid double caching for loop
|
||||
devices;
|
||||
|
@ -13,7 +13,7 @@ KSMBD architecture
|
||||
The subset of performance related operations belong in kernelspace and
|
||||
the other subset which belong to operations which are not really related with
|
||||
performance in userspace. So, DCE/RPC management that has historically resulted
|
||||
into number of buffer overflow issues and dangerous security bugs and user
|
||||
into a number of buffer overflow issues and dangerous security bugs and user
|
||||
account management are implemented in user space as ksmbd.mountd.
|
||||
File operations that are related with performance (open/read/write/close etc.)
|
||||
in kernel space (ksmbd). This also allows for easier integration with VFS
|
||||
@ -24,8 +24,8 @@ ksmbd (kernel daemon)
|
||||
|
||||
When the server daemon is started, It starts up a forker thread
|
||||
(ksmbd/interface name) at initialization time and open a dedicated port 445
|
||||
for listening to SMB requests. Whenever new clients make request, Forker
|
||||
thread will accept the client connection and fork a new thread for dedicated
|
||||
for listening to SMB requests. Whenever new clients make a request, the Forker
|
||||
thread will accept the client connection and fork a new thread for a dedicated
|
||||
communication channel between the client and the server. It allows for parallel
|
||||
processing of SMB requests(commands) from clients as well as allowing for new
|
||||
clients to make new connections. Each instance is named ksmbd/1~n(port number)
|
||||
@ -34,12 +34,12 @@ thread can decide to pass through the commands to the user space (ksmbd.mountd),
|
||||
currently DCE/RPC commands are identified to be handled through the user space.
|
||||
To further utilize the linux kernel, it has been chosen to process the commands
|
||||
as workitems and to be executed in the handlers of the ksmbd-io kworker threads.
|
||||
It allows for multiplexing of the handlers as the kernel take care of initiating
|
||||
It allows for multiplexing of the handlers as the kernel takes care of initiating
|
||||
extra worker threads if the load is increased and vice versa, if the load is
|
||||
decreased it destroys the extra worker threads. So, after connection is
|
||||
established with client. Dedicated ksmbd/1..n(port number) takes complete
|
||||
decreased it destroys the extra worker threads. So, after the connection is
|
||||
established with the client. Dedicated ksmbd/1..n(port number) takes complete
|
||||
ownership of receiving/parsing of SMB commands. Each received command is worked
|
||||
in parallel i.e., There can be multiple clients commands which are worked in
|
||||
in parallel i.e., there can be multiple client commands which are worked in
|
||||
parallel. After receiving each command a separated kernel workitem is prepared
|
||||
for each command which is further queued to be handled by ksmbd-io kworkers.
|
||||
So, each SMB workitem is queued to the kworkers. This allows the benefit of load
|
||||
@ -49,9 +49,9 @@ performance by handling client commands in parallel.
|
||||
ksmbd.mountd (user space daemon)
|
||||
--------------------------------
|
||||
|
||||
ksmbd.mountd is userspace process to, transfer user account and password that
|
||||
ksmbd.mountd is a userspace process to, transfer the user account and password that
|
||||
are registered using ksmbd.adduser (part of utils for user space). Further it
|
||||
allows sharing information parameters that parsed from smb.conf to ksmbd in
|
||||
allows sharing information parameters that are parsed from smb.conf to ksmbd in
|
||||
kernel. For the execution part it has a daemon which is continuously running
|
||||
and connected to the kernel interface using netlink socket, it waits for the
|
||||
requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
@ -124,7 +124,7 @@ How to run
|
||||
1. Download ksmbd-tools(https://github.com/cifsd-team/ksmbd-tools/releases) and
|
||||
compile them.
|
||||
|
||||
- Refer README(https://github.com/cifsd-team/ksmbd-tools/blob/master/README.md)
|
||||
- Refer to README(https://github.com/cifsd-team/ksmbd-tools/blob/master/README.md)
|
||||
to know how to use ksmbd.mountd/adduser/addshare/control utils
|
||||
|
||||
$ ./autogen.sh
|
||||
@ -133,7 +133,7 @@ How to run
|
||||
|
||||
2. Create /usr/local/etc/ksmbd/ksmbd.conf file, add SMB share in ksmbd.conf file.
|
||||
|
||||
- Refer ksmbd.conf.example in ksmbd-utils, See ksmbd.conf manpage
|
||||
- Refer to ksmbd.conf.example in ksmbd-utils, See ksmbd.conf manpage
|
||||
for details to configure shares.
|
||||
|
||||
$ man ksmbd.conf
|
||||
@ -145,7 +145,7 @@ How to run
|
||||
$ man ksmbd.adduser
|
||||
$ sudo ksmbd.adduser -a <Enter USERNAME for SMB share access>
|
||||
|
||||
4. Insert ksmbd.ko module after build your kernel. No need to load module
|
||||
4. Insert the ksmbd.ko module after you build your kernel. No need to load the module
|
||||
if ksmbd is built into the kernel.
|
||||
|
||||
- Set ksmbd in menuconfig(e.g. $ make menuconfig)
|
||||
@ -175,7 +175,7 @@ Each layer
|
||||
1. Enable all component prints
|
||||
# sudo ksmbd.control -d "all"
|
||||
|
||||
2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
2. Enable one of the components (smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
# sudo ksmbd.control -d "smb"
|
||||
|
||||
3. Show what prints are enabled.
|
||||
|
@ -126,7 +126,7 @@ Ccache
|
||||
|
||||
``ccache`` can be used with ``clang`` to improve subsequent builds, (though
|
||||
KBUILD_BUILD_TIMESTAMP_ should be set to a deterministic value between builds
|
||||
in order to avoid 100% cache misses, see Reproducible_builds_ for more info):
|
||||
in order to avoid 100% cache misses, see Reproducible_builds_ for more info)::
|
||||
|
||||
KBUILD_BUILD_TIMESTAMP='' make LLVM=1 CC="ccache clang"
|
||||
|
||||
|
33
MAINTAINERS
33
MAINTAINERS
@ -3504,7 +3504,9 @@ S: Maintained
|
||||
W: http://linux-atm.sourceforge.net
|
||||
F: drivers/atm/
|
||||
F: include/linux/atm*
|
||||
F: include/linux/sonet.h
|
||||
F: include/uapi/linux/atm*
|
||||
F: include/uapi/linux/sonet.h
|
||||
|
||||
ATMEL MACB ETHERNET DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
@ -11993,7 +11995,7 @@ F: fs/jfs/
|
||||
JME NETWORK DRIVER
|
||||
M: Guo-Fu Tseng <cooldavid@cooldavid.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/net/ethernet/jme.*
|
||||
|
||||
JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
|
||||
@ -15877,15 +15879,19 @@ F: drivers/net/
|
||||
F: include/dt-bindings/net/
|
||||
F: include/linux/cn_proc.h
|
||||
F: include/linux/etherdevice.h
|
||||
F: include/linux/ethtool_netlink.h
|
||||
F: include/linux/fcdevice.h
|
||||
F: include/linux/fddidevice.h
|
||||
F: include/linux/hippidevice.h
|
||||
F: include/linux/if_*
|
||||
F: include/linux/inetdevice.h
|
||||
F: include/linux/netdevice.h
|
||||
F: include/linux/netdev*
|
||||
F: include/linux/platform_data/wiznet.h
|
||||
F: include/uapi/linux/cn_proc.h
|
||||
F: include/uapi/linux/ethtool_netlink.h
|
||||
F: include/uapi/linux/if_*
|
||||
F: include/uapi/linux/netdevice.h
|
||||
F: include/uapi/linux/netdev*
|
||||
F: tools/testing/selftests/drivers/net/
|
||||
X: drivers/net/wireless/
|
||||
|
||||
NETWORKING DRIVERS (WIRELESS)
|
||||
@ -15936,14 +15942,28 @@ F: include/linux/framer/framer-provider.h
|
||||
F: include/linux/framer/framer.h
|
||||
F: include/linux/in.h
|
||||
F: include/linux/indirect_call_wrapper.h
|
||||
F: include/linux/inet.h
|
||||
F: include/linux/inet_diag.h
|
||||
F: include/linux/net.h
|
||||
F: include/linux/netdevice.h
|
||||
F: include/linux/skbuff.h
|
||||
F: include/linux/netdev*
|
||||
F: include/linux/netlink.h
|
||||
F: include/linux/netpoll.h
|
||||
F: include/linux/rtnetlink.h
|
||||
F: include/linux/seq_file_net.h
|
||||
F: include/linux/skbuff*
|
||||
F: include/net/
|
||||
F: include/uapi/linux/genetlink.h
|
||||
F: include/uapi/linux/hsr_netlink.h
|
||||
F: include/uapi/linux/in.h
|
||||
F: include/uapi/linux/inet_diag.h
|
||||
F: include/uapi/linux/nbd-netlink.h
|
||||
F: include/uapi/linux/net.h
|
||||
F: include/uapi/linux/net_namespace.h
|
||||
F: include/uapi/linux/netdevice.h
|
||||
F: include/uapi/linux/netconf.h
|
||||
F: include/uapi/linux/netdev*
|
||||
F: include/uapi/linux/netlink.h
|
||||
F: include/uapi/linux/netlink_diag.h
|
||||
F: include/uapi/linux/rtnetlink.h
|
||||
F: lib/net_utils.c
|
||||
F: lib/random32.c
|
||||
F: net/
|
||||
@ -21055,6 +21075,7 @@ SOCKET TIMESTAMPING
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
S: Maintained
|
||||
F: Documentation/networking/timestamping.rst
|
||||
F: include/linux/net_tstamp.h
|
||||
F: include/uapi/linux/net_tstamp.h
|
||||
F: tools/testing/selftests/net/so_txtime.c
|
||||
|
||||
|
6
Makefile
6
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1963,7 +1963,7 @@ tags TAGS cscope gtags: FORCE
|
||||
# Protocol).
|
||||
PHONY += rust-analyzer
|
||||
rust-analyzer:
|
||||
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh
|
||||
+$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh
|
||||
$(Q)$(MAKE) $(build)=rust $@
|
||||
|
||||
# Script to generate missing namespace dependencies
|
||||
@ -1980,7 +1980,7 @@ nsdeps: modules
|
||||
quiet_cmd_gen_compile_commands = GEN $@
|
||||
cmd_gen_compile_commands = $(PYTHON3) $< -a $(AR) -o $@ $(filter-out $<, $(real-prereqs))
|
||||
|
||||
$(extmod_prefix)compile_commands.json: scripts/clang-tools/gen_compile_commands.py \
|
||||
$(extmod_prefix)compile_commands.json: $(srctree)/scripts/clang-tools/gen_compile_commands.py \
|
||||
$(if $(KBUILD_EXTMOD),, vmlinux.a $(KBUILD_VMLINUX_LIBS)) \
|
||||
$(if $(CONFIG_MODULES), $(MODORDER)) FORCE
|
||||
$(call if_changed,gen_compile_commands)
|
||||
|
@ -1109,7 +1109,7 @@ void ecard_remove_driver(struct ecard_driver *drv)
|
||||
driver_unregister(&drv->drv);
|
||||
}
|
||||
|
||||
static int ecard_match(struct device *_dev, struct device_driver *_drv)
|
||||
static int ecard_match(struct device *_dev, const struct device_driver *_drv)
|
||||
{
|
||||
struct expansion_card *ec = ECARD_DEV(_dev);
|
||||
struct ecard_driver *drv = ECARD_DRV(_drv);
|
||||
|
@ -188,7 +188,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
#define __get_mem_asm(load, reg, x, addr, label, type) \
|
||||
asm_goto_output( \
|
||||
"1: " load " " reg "0, [%1]\n" \
|
||||
_ASM_EXTABLE_##type##ACCESS_ERR(1b, %l2, %w0) \
|
||||
_ASM_EXTABLE_##type##ACCESS(1b, %l2) \
|
||||
: "=r" (x) \
|
||||
: "r" (addr) : : label)
|
||||
#else
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#include <asm/numa.h>
|
||||
|
||||
static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
|
||||
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
|
||||
|
||||
int __init acpi_numa_get_nid(unsigned int cpu)
|
||||
{
|
||||
|
@ -355,9 +355,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
|
||||
smp_init_cpus();
|
||||
smp_build_mpidr_hash();
|
||||
|
||||
/* Init percpu seeds for random tags after cpus are set up. */
|
||||
kasan_init_sw_tags();
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
/*
|
||||
* Make sure init_thread_info.ttbr0 always generates translation
|
||||
|
@ -467,6 +467,8 @@ void __init smp_prepare_boot_cpu(void)
|
||||
init_gic_priority_masking();
|
||||
|
||||
kasan_init_hw_tags();
|
||||
/* Init percpu seeds for random tags after cpus are set up. */
|
||||
kasan_init_sw_tags();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -111,7 +111,7 @@ void gio_device_unregister(struct gio_device *giodev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gio_device_unregister);
|
||||
|
||||
static int gio_bus_match(struct device *dev, struct device_driver *drv)
|
||||
static int gio_bus_match(struct device *dev, const struct device_driver *drv)
|
||||
{
|
||||
struct gio_device *gio_dev = to_gio_device(dev);
|
||||
struct gio_driver *gio_drv = to_gio_driver(drv);
|
||||
|
@ -145,6 +145,7 @@ static inline int cpu_to_coregroup_id(int cpu)
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_SMT
|
||||
#include <linux/cpu_smt.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/cputhreads.h>
|
||||
|
||||
static inline bool topology_is_primary_thread(unsigned int cpu)
|
||||
@ -156,6 +157,18 @@ static inline bool topology_smt_thread_allowed(unsigned int cpu)
|
||||
{
|
||||
return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
|
||||
}
|
||||
|
||||
#define topology_is_core_online topology_is_core_online
|
||||
static inline bool topology_is_core_online(unsigned int cpu)
|
||||
{
|
||||
int i, first_cpu = cpu_first_thread_sibling(cpu);
|
||||
|
||||
for (i = first_cpu; i < first_cpu + threads_per_core; ++i) {
|
||||
if (cpu_online(i))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -959,6 +959,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
mem_topology_setup();
|
||||
/* Set max_mapnr before paging_init() */
|
||||
set_max_mapnr(max_pfn);
|
||||
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Release secondary cpus out of their spinloops at 0x60 now that
|
||||
|
@ -73,7 +73,7 @@ void setup_kup(void)
|
||||
|
||||
#define CTOR(shift) static void ctor_##shift(void *addr) \
|
||||
{ \
|
||||
memset(addr, 0, sizeof(void *) << (shift)); \
|
||||
memset(addr, 0, sizeof(pgd_t) << (shift)); \
|
||||
}
|
||||
|
||||
CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7);
|
||||
@ -117,7 +117,7 @@ EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */
|
||||
void pgtable_cache_add(unsigned int shift)
|
||||
{
|
||||
char *name;
|
||||
unsigned long table_size = sizeof(void *) << shift;
|
||||
unsigned long table_size = sizeof(pgd_t) << shift;
|
||||
unsigned long align = table_size;
|
||||
|
||||
/* When batching pgtable pointers for RCU freeing, we store
|
||||
|
@ -290,8 +290,6 @@ void __init mem_init(void)
|
||||
swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
|
||||
#endif
|
||||
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
kasan_late_init();
|
||||
|
||||
memblock_free_all();
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include <uapi/asm/hwprobe.h>
|
||||
|
||||
#define RISCV_HWPROBE_MAX_KEY 8
|
||||
#define RISCV_HWPROBE_MAX_KEY 9
|
||||
|
||||
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
|
||||
{
|
||||
|
@ -82,6 +82,12 @@ struct riscv_hwprobe {
|
||||
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
|
||||
#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7
|
||||
#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8
|
||||
#define RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF 9
|
||||
#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN 0
|
||||
#define RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED 1
|
||||
#define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2
|
||||
#define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3
|
||||
#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4
|
||||
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
|
||||
|
||||
/* Flags */
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
#include <asm/numa.h>
|
||||
|
||||
static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
|
||||
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
|
||||
|
||||
int __init acpi_numa_get_nid(unsigned int cpu)
|
||||
{
|
||||
|
@ -205,6 +205,8 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)
|
||||
int ret;
|
||||
|
||||
ret = patch_insn_set(addr, c, len);
|
||||
if (!ret)
|
||||
flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -239,6 +241,8 @@ int patch_text_nosync(void *addr, const void *insns, size_t len)
|
||||
int ret;
|
||||
|
||||
ret = patch_insn_write(addr, insns, len);
|
||||
if (!ret)
|
||||
flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -178,13 +178,13 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)
|
||||
perf = this_perf;
|
||||
|
||||
if (perf != this_perf) {
|
||||
perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
|
||||
perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (perf == -1ULL)
|
||||
return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
|
||||
return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
|
||||
|
||||
return perf;
|
||||
}
|
||||
@ -192,12 +192,12 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)
|
||||
static u64 hwprobe_misaligned(const struct cpumask *cpus)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
|
||||
return RISCV_HWPROBE_MISALIGNED_FAST;
|
||||
return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
|
||||
return RISCV_HWPROBE_MISALIGNED_EMULATED;
|
||||
return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
|
||||
|
||||
return RISCV_HWPROBE_MISALIGNED_SLOW;
|
||||
return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -225,6 +225,7 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
|
||||
break;
|
||||
|
||||
case RISCV_HWPROBE_KEY_CPUPERF_0:
|
||||
case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
|
||||
pair->value = hwprobe_misaligned(cpus);
|
||||
break;
|
||||
|
||||
|
@ -319,6 +319,7 @@ void do_trap_ecall_u(struct pt_regs *regs)
|
||||
|
||||
regs->epc += 4;
|
||||
regs->orig_a0 = regs->a0;
|
||||
regs->a0 = -ENOSYS;
|
||||
|
||||
riscv_v_vstate_discard(regs);
|
||||
|
||||
@ -328,8 +329,7 @@ void do_trap_ecall_u(struct pt_regs *regs)
|
||||
|
||||
if (syscall >= 0 && syscall < NR_syscalls)
|
||||
syscall_handler(regs, syscall);
|
||||
else if (syscall != -1)
|
||||
regs->a0 = -ENOSYS;
|
||||
|
||||
/*
|
||||
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
|
||||
* so the maximum stack offset is 1k bytes (10 bits).
|
||||
|
@ -338,7 +338,7 @@ int handle_misaligned_load(struct pt_regs *regs)
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
|
||||
|
||||
#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
|
||||
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
|
||||
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
|
||||
#endif
|
||||
|
||||
if (!unaligned_enabled)
|
||||
@ -532,13 +532,13 @@ static bool check_unaligned_access_emulated(int cpu)
|
||||
unsigned long tmp_var, tmp_val;
|
||||
bool misaligned_emu_detected;
|
||||
|
||||
*mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
|
||||
*mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" "REG_L" %[tmp], 1(%[ptr])\n"
|
||||
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
|
||||
|
||||
misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
|
||||
misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED);
|
||||
/*
|
||||
* If unaligned_ctl is already set, this means that we detected that all
|
||||
* CPUS uses emulated misaligned access at boot time. If that changed
|
||||
|
@ -34,9 +34,9 @@ static int check_unaligned_access(void *param)
|
||||
struct page *page = param;
|
||||
void *dst;
|
||||
void *src;
|
||||
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
|
||||
long speed = RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
|
||||
|
||||
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
|
||||
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
|
||||
return 0;
|
||||
|
||||
/* Make an unaligned destination buffer. */
|
||||
@ -95,14 +95,14 @@ static int check_unaligned_access(void *param)
|
||||
}
|
||||
|
||||
if (word_cycles < byte_cycles)
|
||||
speed = RISCV_HWPROBE_MISALIGNED_FAST;
|
||||
speed = RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
|
||||
|
||||
ratio = div_u64((byte_cycles * 100), word_cycles);
|
||||
pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
|
||||
cpu,
|
||||
ratio / 100,
|
||||
ratio % 100,
|
||||
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
|
||||
(speed == RISCV_HWPROBE_MISALIGNED_SCALAR_FAST) ? "fast" : "slow");
|
||||
|
||||
per_cpu(misaligned_access_speed, cpu) = speed;
|
||||
|
||||
@ -110,7 +110,7 @@ static int check_unaligned_access(void *param)
|
||||
* Set the value of fast_misaligned_access of a CPU. These operations
|
||||
* are atomic to avoid race conditions.
|
||||
*/
|
||||
if (speed == RISCV_HWPROBE_MISALIGNED_FAST)
|
||||
if (speed == RISCV_HWPROBE_MISALIGNED_SCALAR_FAST)
|
||||
cpumask_set_cpu(cpu, &fast_misaligned_access);
|
||||
else
|
||||
cpumask_clear_cpu(cpu, &fast_misaligned_access);
|
||||
@ -188,7 +188,7 @@ static int riscv_online_cpu(unsigned int cpu)
|
||||
static struct page *buf;
|
||||
|
||||
/* We are already set since the last check */
|
||||
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
|
||||
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
|
||||
goto exit;
|
||||
|
||||
buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
|
||||
|
@ -38,7 +38,7 @@ bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsig
|
||||
#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES
|
||||
case ANDES_VENDOR_ID:
|
||||
bmap = &riscv_isa_vendor_ext_list_andes.all_harts_isa_bitmap;
|
||||
cpu_bmap = &riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap[cpu];
|
||||
cpu_bmap = riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
|
@ -927,7 +927,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
|
||||
PMD_SIZE, PAGE_KERNEL_EXEC);
|
||||
|
||||
/* Map the data in RAM */
|
||||
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
|
||||
end_va = kernel_map.virt_addr + kernel_map.size;
|
||||
for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
|
||||
create_pgd_mapping(pgdir, va,
|
||||
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
|
||||
@ -1096,7 +1096,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
||||
|
||||
phys_ram_base = CONFIG_PHYS_RAM_BASE;
|
||||
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
|
||||
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
|
||||
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
|
||||
|
||||
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
|
||||
#else
|
||||
|
@ -38,6 +38,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
|
||||
void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
unsigned int users;
|
||||
unsigned long flags;
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
|
||||
/*
|
||||
@ -56,11 +57,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irq(&tags->lock);
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
users = tags->active_queues + 1;
|
||||
WRITE_ONCE(tags->active_queues, users);
|
||||
blk_mq_update_wake_batch(tags, users);
|
||||
spin_unlock_irq(&tags->lock);
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <stdio.h>
|
||||
#include <libgen.h>
|
||||
#include <string.h>
|
||||
#include <linux/version.h>
|
||||
#include <ctype.h>
|
||||
#include "utils.h"
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <stdio.h>
|
||||
#include <libgen.h>
|
||||
#include <string.h>
|
||||
#include <linux/version.h>
|
||||
#include <ctype.h>
|
||||
#include "utils.h"
|
||||
|
||||
|
@ -188,13 +188,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
|
||||
u8 acpi_ns_is_locked);
|
||||
|
||||
void
|
||||
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
|
||||
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
|
||||
acpi_adr_space_type space_id, u32 function);
|
||||
|
||||
void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
|
||||
acpi_adr_space_type space_id);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
|
||||
|
||||
|
@ -20,6 +20,10 @@ extern u8 acpi_gbl_default_address_spaces[];
|
||||
|
||||
/* Local prototypes */
|
||||
|
||||
static void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
|
||||
acpi_adr_space_type space_id);
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_reg_run(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value);
|
||||
@ -61,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
|
||||
acpi_gbl_default_address_spaces
|
||||
[i])) {
|
||||
acpi_ev_execute_reg_methods(acpi_gbl_root_node,
|
||||
ACPI_UINT32_MAX,
|
||||
acpi_gbl_default_address_spaces
|
||||
[i], ACPI_REG_CONNECT);
|
||||
}
|
||||
@ -668,6 +673,7 @@ cleanup1:
|
||||
* FUNCTION: acpi_ev_execute_reg_methods
|
||||
*
|
||||
* PARAMETERS: node - Namespace node for the device
|
||||
* max_depth - Depth to which search for _REG
|
||||
* space_id - The address space ID
|
||||
* function - Passed to _REG: On (1) or Off (0)
|
||||
*
|
||||
@ -679,7 +685,7 @@ cleanup1:
|
||||
******************************************************************************/
|
||||
|
||||
void
|
||||
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
|
||||
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
|
||||
acpi_adr_space_type space_id, u32 function)
|
||||
{
|
||||
struct acpi_reg_walk_info info;
|
||||
@ -713,7 +719,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
|
||||
* regions and _REG methods. (i.e. handlers must be installed for all
|
||||
* regions of this Space ID before we can run any _REG methods)
|
||||
*/
|
||||
(void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
|
||||
(void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth,
|
||||
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
|
||||
&info, NULL);
|
||||
|
||||
@ -814,7 +820,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
void
|
||||
static void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
|
||||
acpi_adr_space_type space_id)
|
||||
{
|
||||
|
@ -85,7 +85,8 @@ acpi_install_address_space_handler_internal(acpi_handle device,
|
||||
/* Run all _REG methods for this address space */
|
||||
|
||||
if (run_reg) {
|
||||
acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
|
||||
acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id,
|
||||
ACPI_REG_CONNECT);
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
@ -263,6 +264,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
|
||||
* FUNCTION: acpi_execute_reg_methods
|
||||
*
|
||||
* PARAMETERS: device - Handle for the device
|
||||
* max_depth - Depth to which search for _REG
|
||||
* space_id - The address space ID
|
||||
*
|
||||
* RETURN: Status
|
||||
@ -271,7 +273,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
|
||||
acpi_execute_reg_methods(acpi_handle device, u32 max_depth,
|
||||
acpi_adr_space_type space_id)
|
||||
{
|
||||
struct acpi_namespace_node *node;
|
||||
acpi_status status;
|
||||
@ -296,7 +299,8 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
|
||||
|
||||
/* Run all _REG methods for this address space */
|
||||
|
||||
acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
|
||||
acpi_ev_execute_reg_methods(node, max_depth, space_id,
|
||||
ACPI_REG_CONNECT);
|
||||
} else {
|
||||
status = AE_BAD_PARAMETER;
|
||||
}
|
||||
@ -306,57 +310,3 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_execute_orphan_reg_method
|
||||
*
|
||||
* PARAMETERS: device - Handle for the device
|
||||
* space_id - The address space ID
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
|
||||
* device. This is a _REG method that has no corresponding region
|
||||
* within the device's scope.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
|
||||
{
|
||||
struct acpi_namespace_node *node;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (!device) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Convert and validate the device handle */
|
||||
|
||||
node = acpi_ns_validate_handle(device);
|
||||
if (node) {
|
||||
|
||||
/*
|
||||
* If an "orphan" _REG method is present in the device's scope
|
||||
* for the given address space ID, run it.
|
||||
*/
|
||||
|
||||
acpi_ev_execute_orphan_reg_method(node, space_id);
|
||||
} else {
|
||||
status = AE_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
|
||||
|
@ -1487,12 +1487,13 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
|
||||
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
|
||||
bool call_reg)
|
||||
{
|
||||
acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
|
||||
acpi_status status;
|
||||
|
||||
acpi_ec_start(ec, false);
|
||||
|
||||
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
|
||||
acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
|
||||
|
||||
acpi_ec_enter_noirq(ec);
|
||||
status = acpi_install_address_space_handler_no_reg(scope_handle,
|
||||
ACPI_ADR_SPACE_EC,
|
||||
@ -1506,10 +1507,7 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
|
||||
}
|
||||
|
||||
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
|
||||
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
|
||||
if (scope_handle != ec->handle)
|
||||
acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
|
||||
|
||||
acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC);
|
||||
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
|
||||
}
|
||||
|
||||
@ -1724,6 +1722,12 @@ static void acpi_ec_remove(struct acpi_device *device)
|
||||
}
|
||||
}
|
||||
|
||||
void acpi_ec_register_opregions(struct acpi_device *adev)
|
||||
{
|
||||
if (first_ec && first_ec->handle != adev->handle)
|
||||
acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC);
|
||||
}
|
||||
|
||||
static acpi_status
|
||||
ec_parse_io_ports(struct acpi_resource *resource, void *context)
|
||||
{
|
||||
|
@ -223,6 +223,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
|
||||
acpi_handle handle, acpi_ec_query_func func,
|
||||
void *data);
|
||||
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
|
||||
void acpi_ec_register_opregions(struct acpi_device *adev);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
void acpi_ec_flush_work(void);
|
||||
|
@ -2273,6 +2273,8 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
|
||||
if (device->handler)
|
||||
goto ok;
|
||||
|
||||
acpi_ec_register_opregions(device);
|
||||
|
||||
if (!device->flags.initialized) {
|
||||
device->flags.power_manageable =
|
||||
device->power.states[ACPI_STATE_D0].flags.valid;
|
||||
|
@ -2945,9 +2945,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
|
||||
&hdev->quirks);
|
||||
if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES,
|
||||
&hdev->quirks);
|
||||
|
||||
err = btintel_legacy_rom_setup(hdev, &ver);
|
||||
break;
|
||||
@ -2956,7 +2953,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
case 0x12: /* ThP */
|
||||
case 0x13: /* HrP */
|
||||
case 0x14: /* CcP */
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
fallthrough;
|
||||
case 0x0c: /* WsP */
|
||||
/* Apply the device specific HCI quirks
|
||||
@ -3048,9 +3044,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
/* These variants don't seem to support LE Coded PHY */
|
||||
set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
|
||||
|
||||
/* Set Valid LE States quirk */
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
|
||||
/* Setup MSFT Extension support */
|
||||
btintel_set_msft_opcode(hdev, ver.hw_variant);
|
||||
|
||||
@ -3076,9 +3069,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
*/
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
||||
|
||||
/* Apply LE States quirk from solar onwards */
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
|
||||
/* Setup MSFT Extension support */
|
||||
btintel_set_msft_opcode(hdev,
|
||||
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
|
||||
|
@ -1180,9 +1180,6 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
|
||||
*/
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
||||
|
||||
/* Apply LE States quirk from solar onwards */
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
|
||||
/* Setup MSFT Extension support */
|
||||
btintel_set_msft_opcode(hdev,
|
||||
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
|
||||
|
@ -1148,9 +1148,6 @@ static int btmtksdio_setup(struct hci_dev *hdev)
|
||||
}
|
||||
}
|
||||
|
||||
/* Valid LE States quirk for MediaTek 7921 */
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
|
||||
break;
|
||||
case 0x7663:
|
||||
case 0x7668:
|
||||
|
@ -1287,7 +1287,6 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
|
||||
case CHIP_ID_8852C:
|
||||
case CHIP_ID_8851B:
|
||||
case CHIP_ID_8852BT:
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
||||
|
||||
/* RTL8852C needs to transmit mSBC data continuously without
|
||||
|
@ -3956,8 +3956,8 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
||||
|
||||
if (id->driver_info & BTUSB_VALID_LE_STATES)
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
if (!(id->driver_info & BTUSB_VALID_LE_STATES))
|
||||
set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
|
||||
|
||||
if (id->driver_info & BTUSB_DIGIANSWER) {
|
||||
data->cmdreq_type = USB_TYPE_VENDOR;
|
||||
|
@ -2474,8 +2474,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
|
||||
&hdev->quirks);
|
||||
|
||||
if (data->capabilities & QCA_CAP_VALID_LE_STATES)
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
|
||||
set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -425,8 +425,6 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
|
||||
if (opcode & 0x80)
|
||||
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
|
||||
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
|
||||
if (hci_register_dev(hdev) < 0) {
|
||||
BT_ERR("Can't register HCI device");
|
||||
hci_free_dev(hdev);
|
||||
|
@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
|
||||
static const char xillyname[] = "xillyusb";
|
||||
|
||||
static unsigned int fifo_buf_order;
|
||||
static struct workqueue_struct *wakeup_wq;
|
||||
|
||||
#define USB_VENDOR_ID_XILINX 0x03fd
|
||||
#define USB_VENDOR_ID_ALTERA 0x09fb
|
||||
@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref)
|
||||
* errors if executed. The mechanism relies on that xdev->error is assigned
|
||||
* a non-zero value by report_io_error() prior to queueing wakeup_all(),
|
||||
* which prevents bulk_in_work() from calling process_bulk_in().
|
||||
*
|
||||
* The fact that wakeup_all() and bulk_in_work() are queued on the same
|
||||
* workqueue makes their concurrent execution very unlikely, however the
|
||||
* kernel's API doesn't seem to ensure this strictly.
|
||||
*/
|
||||
|
||||
static void wakeup_all(struct work_struct *work)
|
||||
@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
|
||||
|
||||
if (do_once) {
|
||||
kref_get(&xdev->kref); /* xdev is used by work item */
|
||||
queue_work(xdev->workq, &xdev->wakeup_workitem);
|
||||
queue_work(wakeup_wq, &xdev->wakeup_workitem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = {
|
||||
|
||||
static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
|
||||
{
|
||||
struct usb_device *udev = xdev->udev;
|
||||
|
||||
/* Verify that device has the two fundamental bulk in/out endpoints */
|
||||
if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
|
||||
usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
|
||||
return -ENODEV;
|
||||
|
||||
xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
|
||||
bulk_out_work, 1, 2);
|
||||
if (!xdev->msg_ep)
|
||||
@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
|
||||
__le16 *chandesc,
|
||||
int num_channels)
|
||||
{
|
||||
struct xillyusb_channel *chan;
|
||||
struct usb_device *udev = xdev->udev;
|
||||
struct xillyusb_channel *chan, *new_channels;
|
||||
int i;
|
||||
|
||||
chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
|
||||
if (!chan)
|
||||
return -ENOMEM;
|
||||
|
||||
xdev->channels = chan;
|
||||
new_channels = chan;
|
||||
|
||||
for (i = 0; i < num_channels; i++, chan++) {
|
||||
unsigned int in_desc = le16_to_cpu(*chandesc++);
|
||||
@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
|
||||
*/
|
||||
|
||||
if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
|
||||
if (usb_pipe_type_check(udev,
|
||||
usb_sndbulkpipe(udev, i + 2))) {
|
||||
dev_err(xdev->dev,
|
||||
"Missing BULK OUT endpoint %d\n",
|
||||
i + 2);
|
||||
kfree(new_channels);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
chan->writable = 1;
|
||||
chan->out_synchronous = !!(out_desc & 0x40);
|
||||
chan->out_seekable = !!(out_desc & 0x20);
|
||||
@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
|
||||
}
|
||||
}
|
||||
|
||||
xdev->channels = new_channels;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
|
||||
* just after responding with the IDT, there is no reason for any
|
||||
* work item to be running now. To be sure that xdev->channels
|
||||
* is updated on anything that might run in parallel, flush the
|
||||
* workqueue, which rarely does anything.
|
||||
* device's workqueue and the wakeup work item. This rarely
|
||||
* does anything.
|
||||
*/
|
||||
flush_workqueue(xdev->workq);
|
||||
flush_work(&xdev->wakeup_workitem);
|
||||
|
||||
xdev->num_channels = num_channels;
|
||||
|
||||
@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
wakeup_wq = alloc_workqueue(xillyname, 0, 0);
|
||||
if (!wakeup_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
|
||||
fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
|
||||
else
|
||||
@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void)
|
||||
|
||||
rc = usb_register(&xillyusb_driver);
|
||||
|
||||
if (rc)
|
||||
destroy_workqueue(wakeup_wq);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit xillyusb_exit(void)
|
||||
{
|
||||
usb_deregister(&xillyusb_driver);
|
||||
|
||||
destroy_workqueue(wakeup_wq);
|
||||
}
|
||||
|
||||
module_init(xillyusb_init);
|
||||
|
@ -738,7 +738,7 @@ static struct ccu_div vp_axi_clk = {
|
||||
.hw.init = CLK_HW_INIT_PARENTS_HW("vp-axi",
|
||||
video_pll_clk_parent,
|
||||
&ccu_div_ops,
|
||||
0),
|
||||
CLK_IGNORE_UNUSED),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -834,11 +834,13 @@ static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
|
||||
void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
|
||||
{
|
||||
struct device *dport_dev = dport->dport_dev;
|
||||
struct pci_host_bridge *host_bridge;
|
||||
|
||||
host_bridge = to_pci_host_bridge(dport_dev);
|
||||
if (host_bridge->native_aer)
|
||||
dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
|
||||
if (dport->rch) {
|
||||
struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport_dev);
|
||||
|
||||
if (host_bridge->native_aer)
|
||||
dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
|
||||
}
|
||||
|
||||
dport->reg_map.host = host;
|
||||
cxl_dport_map_regs(dport);
|
||||
|
@ -39,6 +39,8 @@
|
||||
#define MLXBF_GPIO_CAUSE_OR_EVTEN0 0x14
|
||||
#define MLXBF_GPIO_CAUSE_OR_CLRCAUSE 0x18
|
||||
|
||||
#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
|
||||
|
||||
struct mlxbf3_gpio_context {
|
||||
struct gpio_chip gc;
|
||||
|
||||
@ -82,6 +84,8 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
|
||||
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
|
||||
val &= ~BIT(offset);
|
||||
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
|
||||
|
||||
writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
|
||||
raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
|
||||
|
||||
gpiochip_disable_irq(gc, offset);
|
||||
@ -253,6 +257,15 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlxbf3_gpio_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct mlxbf3_gpio_context *gs = platform_get_drvdata(pdev);
|
||||
|
||||
/* Disable and clear all interrupts */
|
||||
writel(0, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
|
||||
writel(MLXBF_GPIO_CLR_ALL_INTS, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
|
||||
}
|
||||
|
||||
static const struct acpi_device_id mlxbf3_gpio_acpi_match[] = {
|
||||
{ "MLNXBF33", 0 },
|
||||
{}
|
||||
@ -265,6 +278,7 @@ static struct platform_driver mlxbf3_gpio_driver = {
|
||||
.acpi_match_table = mlxbf3_gpio_acpi_match,
|
||||
},
|
||||
.probe = mlxbf3_gpio_probe,
|
||||
.shutdown = mlxbf3_gpio_shutdown,
|
||||
};
|
||||
module_platform_driver(mlxbf3_gpio_driver);
|
||||
|
||||
|
@ -1057,6 +1057,9 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
|
||||
r = amdgpu_ring_parse_cs(ring, p, job, ib);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (ib->sa_bo)
|
||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||
} else {
|
||||
ib->ptr = (uint32_t *)kptr;
|
||||
r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
|
||||
|
@ -685,16 +685,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
switch (args->in.op) {
|
||||
case AMDGPU_CTX_OP_ALLOC_CTX:
|
||||
if (args->in.flags)
|
||||
return -EINVAL;
|
||||
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
|
||||
args->out.alloc.ctx_id = id;
|
||||
break;
|
||||
case AMDGPU_CTX_OP_FREE_CTX:
|
||||
if (args->in.flags)
|
||||
return -EINVAL;
|
||||
r = amdgpu_ctx_free(fpriv, id);
|
||||
break;
|
||||
case AMDGPU_CTX_OP_QUERY_STATE:
|
||||
if (args->in.flags)
|
||||
return -EINVAL;
|
||||
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
|
||||
break;
|
||||
case AMDGPU_CTX_OP_QUERY_STATE2:
|
||||
if (args->in.flags)
|
||||
return -EINVAL;
|
||||
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
|
||||
break;
|
||||
case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
|
||||
|
@ -509,6 +509,16 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
|
||||
int i, r = 0;
|
||||
int j;
|
||||
|
||||
if (adev->enable_mes) {
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
j = i + xcc_id * adev->gfx.num_compute_rings;
|
||||
amdgpu_mes_unmap_legacy_queue(adev,
|
||||
&adev->gfx.compute_ring[j],
|
||||
RESET_QUEUES, 0, 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
@ -551,6 +561,18 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
|
||||
int i, r = 0;
|
||||
int j;
|
||||
|
||||
if (adev->enable_mes) {
|
||||
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||
j = i + xcc_id * adev->gfx.num_gfx_rings;
|
||||
amdgpu_mes_unmap_legacy_queue(adev,
|
||||
&adev->gfx.gfx_ring[j],
|
||||
PREEMPT_QUEUES, 0, 0);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
@ -995,7 +1017,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return 0;
|
||||
|
||||
if (adev->mes.ring.sched.ready)
|
||||
if (adev->mes.ring[0].sched.ready)
|
||||
return amdgpu_mes_rreg(adev, reg);
|
||||
|
||||
BUG_ON(!ring->funcs->emit_rreg);
|
||||
@ -1065,7 +1087,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return;
|
||||
|
||||
if (adev->mes.ring.sched.ready) {
|
||||
if (adev->mes.ring[0].sched.ready) {
|
||||
amdgpu_mes_wreg(adev, reg, v);
|
||||
return;
|
||||
}
|
||||
|
@ -589,7 +589,8 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
ring = adev->rings[i];
|
||||
vmhub = ring->vm_hub;
|
||||
|
||||
if (ring == &adev->mes.ring ||
|
||||
if (ring == &adev->mes.ring[0] ||
|
||||
ring == &adev->mes.ring[1] ||
|
||||
ring == &adev->umsch_mm.ring)
|
||||
continue;
|
||||
|
||||
@ -761,7 +762,7 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
|
||||
unsigned long flags;
|
||||
uint32_t seq;
|
||||
|
||||
if (adev->mes.ring.sched.ready) {
|
||||
if (adev->mes.ring[0].sched.ready) {
|
||||
amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
|
||||
ref, mask);
|
||||
return;
|
||||
|
@ -135,9 +135,11 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
idr_init(&adev->mes.queue_id_idr);
|
||||
ida_init(&adev->mes.doorbell_ida);
|
||||
spin_lock_init(&adev->mes.queue_id_lock);
|
||||
spin_lock_init(&adev->mes.ring_lock);
|
||||
mutex_init(&adev->mes.mutex_hidden);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
|
||||
spin_lock_init(&adev->mes.ring_lock[i]);
|
||||
|
||||
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
|
||||
adev->mes.vmid_mask_mmhub = 0xffffff00;
|
||||
adev->mes.vmid_mask_gfxhub = 0xffffff00;
|
||||
@ -163,36 +165,38 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
adev->mes.sdma_hqd_mask[i] = 0xfc;
|
||||
}
|
||||
|
||||
r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"(%d) ring trail_fence_offs wb alloc failed\n", r);
|
||||
goto error_ids;
|
||||
}
|
||||
adev->mes.sch_ctx_gpu_addr =
|
||||
adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
|
||||
adev->mes.sch_ctx_ptr =
|
||||
(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
|
||||
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
|
||||
r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"(%d) ring trail_fence_offs wb alloc failed\n",
|
||||
r);
|
||||
goto error;
|
||||
}
|
||||
adev->mes.sch_ctx_gpu_addr[i] =
|
||||
adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
|
||||
adev->mes.sch_ctx_ptr[i] =
|
||||
(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
|
||||
|
||||
r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
|
||||
if (r) {
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
dev_err(adev->dev,
|
||||
"(%d) query_status_fence_offs wb alloc failed\n", r);
|
||||
goto error_ids;
|
||||
r = amdgpu_device_wb_get(adev,
|
||||
&adev->mes.query_status_fence_offs[i]);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"(%d) query_status_fence_offs wb alloc failed\n",
|
||||
r);
|
||||
goto error;
|
||||
}
|
||||
adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
|
||||
(adev->mes.query_status_fence_offs[i] * 4);
|
||||
adev->mes.query_status_fence_ptr[i] =
|
||||
(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
|
||||
}
|
||||
adev->mes.query_status_fence_gpu_addr =
|
||||
adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
|
||||
adev->mes.query_status_fence_ptr =
|
||||
(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
|
||||
|
||||
r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
|
||||
if (r) {
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
|
||||
dev_err(adev->dev,
|
||||
"(%d) read_val_offs alloc failed\n", r);
|
||||
goto error_ids;
|
||||
goto error;
|
||||
}
|
||||
adev->mes.read_val_gpu_addr =
|
||||
adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
|
||||
@ -212,10 +216,16 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
error_doorbell:
|
||||
amdgpu_mes_doorbell_free(adev);
|
||||
error:
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
|
||||
error_ids:
|
||||
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
|
||||
if (adev->mes.sch_ctx_ptr[i])
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
|
||||
if (adev->mes.query_status_fence_ptr[i])
|
||||
amdgpu_device_wb_free(adev,
|
||||
adev->mes.query_status_fence_offs[i]);
|
||||
}
|
||||
if (adev->mes.read_val_ptr)
|
||||
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
|
||||
|
||||
idr_destroy(&adev->mes.pasid_idr);
|
||||
idr_destroy(&adev->mes.gang_id_idr);
|
||||
idr_destroy(&adev->mes.queue_id_idr);
|
||||
@ -226,13 +236,22 @@ error_ids:
|
||||
|
||||
void amdgpu_mes_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
|
||||
&adev->mes.event_log_gpu_addr,
|
||||
&adev->mes.event_log_cpu_addr);
|
||||
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
|
||||
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
|
||||
if (adev->mes.sch_ctx_ptr[i])
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
|
||||
if (adev->mes.query_status_fence_ptr[i])
|
||||
amdgpu_device_wb_free(adev,
|
||||
adev->mes.query_status_fence_offs[i]);
|
||||
}
|
||||
if (adev->mes.read_val_ptr)
|
||||
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
|
||||
|
||||
amdgpu_mes_doorbell_free(adev);
|
||||
|
||||
idr_destroy(&adev->mes.pasid_idr);
|
||||
@ -1499,7 +1518,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
|
||||
|
||||
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
|
||||
sizeof(ucode_prefix));
|
||||
if (adev->enable_uni_mes && pipe == AMDGPU_MES_SCHED_PIPE) {
|
||||
if (adev->enable_uni_mes) {
|
||||
snprintf(fw_name, sizeof(fw_name),
|
||||
"amdgpu/%s_uni_mes.bin", ucode_prefix);
|
||||
} else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
|
||||
|
@ -82,8 +82,8 @@ struct amdgpu_mes {
|
||||
uint64_t default_process_quantum;
|
||||
uint64_t default_gang_quantum;
|
||||
|
||||
struct amdgpu_ring ring;
|
||||
spinlock_t ring_lock;
|
||||
struct amdgpu_ring ring[AMDGPU_MAX_MES_PIPES];
|
||||
spinlock_t ring_lock[AMDGPU_MAX_MES_PIPES];
|
||||
|
||||
const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
|
||||
|
||||
@ -112,12 +112,12 @@ struct amdgpu_mes {
|
||||
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
|
||||
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
|
||||
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
|
||||
uint32_t sch_ctx_offs;
|
||||
uint64_t sch_ctx_gpu_addr;
|
||||
uint64_t *sch_ctx_ptr;
|
||||
uint32_t query_status_fence_offs;
|
||||
uint64_t query_status_fence_gpu_addr;
|
||||
uint64_t *query_status_fence_ptr;
|
||||
uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
|
||||
uint64_t sch_ctx_gpu_addr[AMDGPU_MAX_MES_PIPES];
|
||||
uint64_t *sch_ctx_ptr[AMDGPU_MAX_MES_PIPES];
|
||||
uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
|
||||
uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
|
||||
uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
|
||||
uint32_t read_val_offs;
|
||||
uint64_t read_val_gpu_addr;
|
||||
uint32_t *read_val_ptr;
|
||||
|
@ -212,6 +212,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
*/
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
|
||||
sched_hw_submission = max(sched_hw_submission, 256);
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_MES)
|
||||
sched_hw_submission = 8;
|
||||
else if (ring == &adev->sdma.instance[0].page)
|
||||
sched_hw_submission = 256;
|
||||
|
||||
|
@ -461,8 +461,11 @@ struct amdgpu_vcn5_fw_shared {
|
||||
struct amdgpu_fw_shared_unified_queue_struct sq;
|
||||
uint8_t pad1[8];
|
||||
struct amdgpu_fw_shared_fw_logging fw_log;
|
||||
uint8_t pad2[20];
|
||||
struct amdgpu_fw_shared_rb_setup rb_setup;
|
||||
uint8_t pad2[4];
|
||||
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
|
||||
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
|
||||
uint8_t pad3[9];
|
||||
};
|
||||
|
||||
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
|
||||
|
@ -858,7 +858,7 @@ void amdgpu_virt_post_reset(struct amdgpu_device *adev)
|
||||
adev->gfx.is_poweron = false;
|
||||
}
|
||||
|
||||
adev->mes.ring.sched.ready = false;
|
||||
adev->mes.ring[0].sched.ready = false;
|
||||
}
|
||||
|
||||
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
|
||||
|
@ -3546,33 +3546,9 @@ static int gfx_v12_0_hw_init(void *handle)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v12_0_kiq_disable_kgq(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
|
||||
struct amdgpu_ring *kiq_ring = &kiq->ring;
|
||||
int i, r = 0;
|
||||
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
|
||||
adev->gfx.num_gfx_rings))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
|
||||
PREEMPT_QUEUES, 0, 0);
|
||||
|
||||
if (adev->gfx.kiq[0].ring.sched.ready)
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gfx_v12_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
uint32_t tmp;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
@ -3580,8 +3556,7 @@ static int gfx_v12_0_hw_fini(void *handle)
|
||||
|
||||
if (!adev->no_hw_access) {
|
||||
if (amdgpu_async_gfx_ring) {
|
||||
r = gfx_v12_0_kiq_disable_kgq(adev);
|
||||
if (r)
|
||||
if (amdgpu_gfx_disable_kgq(adev, 0))
|
||||
DRM_ERROR("KGQ disable failed\n");
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
/* This is necessary for SRIOV as well as for GFXOFF to function
|
||||
* properly under bare metal
|
||||
*/
|
||||
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
|
||||
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
|
||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
|
||||
amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
|
||||
1 << vmid, GET_INST(GC, 0));
|
||||
|
@ -299,7 +299,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
/* This is necessary for SRIOV as well as for GFXOFF to function
|
||||
* properly under bare metal
|
||||
*/
|
||||
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
|
||||
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
|
||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
|
||||
const unsigned eng = 17;
|
||||
|
@ -538,11 +538,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_jpeg.h"
|
||||
#include "amdgpu_cs.h"
|
||||
#include "soc15.h"
|
||||
#include "soc15d.h"
|
||||
#include "jpeg_v4_0_3.h"
|
||||
@ -782,11 +783,15 @@ void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
|
||||
|
||||
if (ring->funcs->parse_cs)
|
||||
amdgpu_ring_write(ring, 0);
|
||||
else
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
@ -1084,6 +1089,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
|
||||
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
|
||||
.parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
|
||||
@ -1248,3 +1254,56 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->jpeg.ras = &jpeg_v4_0_3_ras;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
|
||||
*
|
||||
* @parser: Command submission parser context
|
||||
* @job: the job to parse
|
||||
* @ib: the IB to parse
|
||||
*
|
||||
* Parse the command stream, return -EINVAL for invalid packet,
|
||||
* 0 otherwise
|
||||
*/
|
||||
int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
uint32_t i, reg, res, cond, type;
|
||||
struct amdgpu_device *adev = parser->adev;
|
||||
|
||||
for (i = 0; i < ib->length_dw ; i += 2) {
|
||||
reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
|
||||
res = CP_PACKETJ_GET_RES(ib->ptr[i]);
|
||||
cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
|
||||
type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
|
||||
|
||||
if (res) /* only support 0 at the moment */
|
||||
return -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
case PACKETJ_TYPE0:
|
||||
if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
|
||||
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case PACKETJ_TYPE3:
|
||||
if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
|
||||
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case PACKETJ_TYPE6:
|
||||
if (ib->ptr[i] == CP_PACKETJ_NOP)
|
||||
continue;
|
||||
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
|
||||
return -EINVAL;
|
||||
default:
|
||||
dev_err(adev->dev, "Unknown packet type %d !\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -46,6 +46,9 @@
|
||||
|
||||
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
|
||||
|
||||
#define JPEG_REG_RANGE_START 0x4000
|
||||
#define JPEG_REG_RANGE_END 0x41c2
|
||||
|
||||
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
|
||||
|
||||
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
@ -62,5 +65,7 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
|
||||
void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
|
||||
void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
uint32_t val, uint32_t mask);
|
||||
|
||||
int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib);
|
||||
#endif /* __JPEG_V4_0_3_H__ */
|
||||
|
@ -646,6 +646,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
|
||||
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
|
||||
.parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
|
||||
|
@ -162,13 +162,13 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
|
||||
signed long timeout = 3000000; /* 3000 ms */
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
struct amdgpu_ring *ring = &mes->ring;
|
||||
struct amdgpu_ring *ring = &mes->ring[0];
|
||||
struct MES_API_STATUS *api_status;
|
||||
union MESAPI__MISC *x_pkt = pkt;
|
||||
const char *op_str, *misc_op_str;
|
||||
unsigned long flags;
|
||||
u64 status_gpu_addr;
|
||||
u32 status_offset;
|
||||
u32 seq, status_offset;
|
||||
u64 *status_ptr;
|
||||
signed long r;
|
||||
int ret;
|
||||
@ -191,11 +191,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
status_ptr = (u64 *)&adev->wb.wb[status_offset];
|
||||
*status_ptr = 0;
|
||||
|
||||
spin_lock_irqsave(&mes->ring_lock, flags);
|
||||
spin_lock_irqsave(&mes->ring_lock[0], flags);
|
||||
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
|
||||
if (r)
|
||||
goto error_unlock_free;
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
r = amdgpu_fence_wait_polling(ring,
|
||||
seq - ring->fence_drv.num_fences_mask,
|
||||
timeout);
|
||||
if (r < 1)
|
||||
goto error_undo;
|
||||
|
||||
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
|
||||
api_status->api_completion_fence_addr = status_gpu_addr;
|
||||
api_status->api_completion_fence_value = 1;
|
||||
@ -208,14 +215,13 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
mes_status_pkt.api_status.api_completion_fence_addr =
|
||||
ring->fence_drv.gpu_addr;
|
||||
mes_status_pkt.api_status.api_completion_fence_value =
|
||||
++ring->fence_drv.sync_seq;
|
||||
mes_status_pkt.api_status.api_completion_fence_value = seq;
|
||||
|
||||
amdgpu_ring_write_multiple(ring, &mes_status_pkt,
|
||||
sizeof(mes_status_pkt) / 4);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
spin_unlock_irqrestore(&mes->ring_lock[0], flags);
|
||||
|
||||
op_str = mes_v11_0_get_op_string(x_pkt);
|
||||
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
|
||||
@ -229,7 +235,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
dev_dbg(adev->dev, "MES msg=%d was emitted\n",
|
||||
x_pkt->header.opcode);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, timeout);
|
||||
if (r < 1 || !*status_ptr) {
|
||||
|
||||
if (misc_op_str)
|
||||
@ -252,8 +258,12 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
amdgpu_device_wb_free(adev, status_offset);
|
||||
return 0;
|
||||
|
||||
error_undo:
|
||||
dev_err(adev->dev, "MES ring buffer is full.\n");
|
||||
amdgpu_ring_undo(ring);
|
||||
|
||||
error_unlock_free:
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
spin_unlock_irqrestore(&mes->ring_lock[0], flags);
|
||||
|
||||
error_wb_free:
|
||||
amdgpu_device_wb_free(adev, status_offset);
|
||||
@ -512,9 +522,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
|
||||
mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
|
||||
mes_set_hw_res_pkt.paging_vmid = 0;
|
||||
mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
|
||||
mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr[0];
|
||||
mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
|
||||
mes->query_status_fence_gpu_addr;
|
||||
mes->query_status_fence_gpu_addr[0];
|
||||
|
||||
for (i = 0; i < MAX_COMPUTE_PIPES; i++)
|
||||
mes_set_hw_res_pkt.compute_hqd_mask[i] =
|
||||
@ -1015,7 +1025,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
|
||||
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
|
||||
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
|
||||
|
||||
return amdgpu_ring_test_helper(kiq_ring);
|
||||
}
|
||||
@ -1029,7 +1039,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
|
||||
if (pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
ring = &adev->gfx.kiq[0].ring;
|
||||
else if (pipe == AMDGPU_MES_SCHED_PIPE)
|
||||
ring = &adev->mes.ring;
|
||||
ring = &adev->mes.ring[0];
|
||||
else
|
||||
BUG();
|
||||
|
||||
@ -1071,7 +1081,7 @@ static int mes_v11_0_ring_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
ring = &adev->mes.ring;
|
||||
ring = &adev->mes.ring[0];
|
||||
|
||||
ring->funcs = &mes_v11_0_ring_funcs;
|
||||
|
||||
@ -1124,7 +1134,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
|
||||
if (pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
ring = &adev->gfx.kiq[0].ring;
|
||||
else if (pipe == AMDGPU_MES_SCHED_PIPE)
|
||||
ring = &adev->mes.ring;
|
||||
ring = &adev->mes.ring[0];
|
||||
else
|
||||
BUG();
|
||||
|
||||
@ -1200,9 +1210,6 @@ static int mes_v11_0_sw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int pipe;
|
||||
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
|
||||
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
kfree(adev->mes.mqd_backup[pipe]);
|
||||
|
||||
@ -1216,12 +1223,12 @@ static int mes_v11_0_sw_fini(void *handle)
|
||||
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
|
||||
&adev->gfx.kiq[0].ring.mqd_ptr);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
|
||||
&adev->mes.ring.mqd_gpu_addr,
|
||||
&adev->mes.ring.mqd_ptr);
|
||||
amdgpu_bo_free_kernel(&adev->mes.ring[0].mqd_obj,
|
||||
&adev->mes.ring[0].mqd_gpu_addr,
|
||||
&adev->mes.ring[0].mqd_ptr);
|
||||
|
||||
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
|
||||
amdgpu_ring_fini(&adev->mes.ring);
|
||||
amdgpu_ring_fini(&adev->mes.ring[0]);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
|
||||
@ -1332,9 +1339,9 @@ failure:
|
||||
|
||||
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mes.ring.sched.ready) {
|
||||
mes_v11_0_kiq_dequeue(&adev->mes.ring);
|
||||
adev->mes.ring.sched.ready = false;
|
||||
if (adev->mes.ring[0].sched.ready) {
|
||||
mes_v11_0_kiq_dequeue(&adev->mes.ring[0]);
|
||||
adev->mes.ring[0].sched.ready = false;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
@ -1352,7 +1359,7 @@ static int mes_v11_0_hw_init(void *handle)
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->mes.ring.sched.ready)
|
||||
if (adev->mes.ring[0].sched.ready)
|
||||
goto out;
|
||||
|
||||
if (!adev->enable_mes_kiq) {
|
||||
@ -1397,7 +1404,7 @@ out:
|
||||
* with MES enabled.
|
||||
*/
|
||||
adev->gfx.kiq[0].ring.sched.ready = false;
|
||||
adev->mes.ring.sched.ready = true;
|
||||
adev->mes.ring[0].sched.ready = true;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -142,19 +142,20 @@ static const char *mes_v12_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
|
||||
}
|
||||
|
||||
static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
void *pkt, int size,
|
||||
int api_status_off)
|
||||
int pipe, void *pkt, int size,
|
||||
int api_status_off)
|
||||
{
|
||||
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
|
||||
signed long timeout = 3000000; /* 3000 ms */
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
struct amdgpu_ring *ring = &mes->ring;
|
||||
struct amdgpu_ring *ring = &mes->ring[pipe];
|
||||
spinlock_t *ring_lock = &mes->ring_lock[pipe];
|
||||
struct MES_API_STATUS *api_status;
|
||||
union MESAPI__MISC *x_pkt = pkt;
|
||||
const char *op_str, *misc_op_str;
|
||||
unsigned long flags;
|
||||
u64 status_gpu_addr;
|
||||
u32 status_offset;
|
||||
u32 seq, status_offset;
|
||||
u64 *status_ptr;
|
||||
signed long r;
|
||||
int ret;
|
||||
@ -177,11 +178,18 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
status_ptr = (u64 *)&adev->wb.wb[status_offset];
|
||||
*status_ptr = 0;
|
||||
|
||||
spin_lock_irqsave(&mes->ring_lock, flags);
|
||||
spin_lock_irqsave(ring_lock, flags);
|
||||
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
|
||||
if (r)
|
||||
goto error_unlock_free;
|
||||
|
||||
seq = ++ring->fence_drv.sync_seq;
|
||||
r = amdgpu_fence_wait_polling(ring,
|
||||
seq - ring->fence_drv.num_fences_mask,
|
||||
timeout);
|
||||
if (r < 1)
|
||||
goto error_undo;
|
||||
|
||||
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
|
||||
api_status->api_completion_fence_addr = status_gpu_addr;
|
||||
api_status->api_completion_fence_value = 1;
|
||||
@ -194,39 +202,39 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
mes_status_pkt.api_status.api_completion_fence_addr =
|
||||
ring->fence_drv.gpu_addr;
|
||||
mes_status_pkt.api_status.api_completion_fence_value =
|
||||
++ring->fence_drv.sync_seq;
|
||||
mes_status_pkt.api_status.api_completion_fence_value = seq;
|
||||
|
||||
amdgpu_ring_write_multiple(ring, &mes_status_pkt,
|
||||
sizeof(mes_status_pkt) / 4);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
spin_unlock_irqrestore(ring_lock, flags);
|
||||
|
||||
op_str = mes_v12_0_get_op_string(x_pkt);
|
||||
misc_op_str = mes_v12_0_get_misc_op_string(x_pkt);
|
||||
|
||||
if (misc_op_str)
|
||||
dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
|
||||
misc_op_str);
|
||||
dev_dbg(adev->dev, "MES(%d) msg=%s (%s) was emitted\n",
|
||||
pipe, op_str, misc_op_str);
|
||||
else if (op_str)
|
||||
dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
|
||||
dev_dbg(adev->dev, "MES(%d) msg=%s was emitted\n",
|
||||
pipe, op_str);
|
||||
else
|
||||
dev_dbg(adev->dev, "MES msg=%d was emitted\n",
|
||||
x_pkt->header.opcode);
|
||||
dev_dbg(adev->dev, "MES(%d) msg=%d was emitted\n",
|
||||
pipe, x_pkt->header.opcode);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, timeout);
|
||||
if (r < 1 || !*status_ptr) {
|
||||
|
||||
if (misc_op_str)
|
||||
dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
|
||||
op_str, misc_op_str);
|
||||
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
|
||||
pipe, op_str, misc_op_str);
|
||||
else if (op_str)
|
||||
dev_err(adev->dev, "MES failed to respond to msg=%s\n",
|
||||
op_str);
|
||||
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s\n",
|
||||
pipe, op_str);
|
||||
else
|
||||
dev_err(adev->dev, "MES failed to respond to msg=%d\n",
|
||||
x_pkt->header.opcode);
|
||||
dev_err(adev->dev, "MES(%d) failed to respond to msg=%d\n",
|
||||
pipe, x_pkt->header.opcode);
|
||||
|
||||
while (halt_if_hws_hang)
|
||||
schedule();
|
||||
@ -238,8 +246,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
amdgpu_device_wb_free(adev, status_offset);
|
||||
return 0;
|
||||
|
||||
error_undo:
|
||||
dev_err(adev->dev, "MES ring buffer is full.\n");
|
||||
amdgpu_ring_undo(ring);
|
||||
|
||||
error_unlock_free:
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
spin_unlock_irqrestore(ring_lock, flags);
|
||||
|
||||
error_wb_free:
|
||||
amdgpu_device_wb_free(adev, status_offset);
|
||||
@ -254,6 +266,8 @@ static int convert_to_mes_queue_type(int queue_type)
|
||||
return MES_QUEUE_TYPE_COMPUTE;
|
||||
else if (queue_type == AMDGPU_RING_TYPE_SDMA)
|
||||
return MES_QUEUE_TYPE_SDMA;
|
||||
else if (queue_type == AMDGPU_RING_TYPE_MES)
|
||||
return MES_QUEUE_TYPE_SCHQ;
|
||||
else
|
||||
BUG();
|
||||
return -1;
|
||||
@ -311,6 +325,7 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
|
||||
mes_add_queue_pkt.gds_size = input->queue_size;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
AMDGPU_MES_SCHED_PIPE,
|
||||
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
|
||||
offsetof(union MESAPI__ADD_QUEUE, api_status));
|
||||
}
|
||||
@ -330,6 +345,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
|
||||
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
AMDGPU_MES_SCHED_PIPE,
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
|
||||
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
|
||||
}
|
||||
@ -338,6 +354,7 @@ static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
|
||||
struct mes_map_legacy_queue_input *input)
|
||||
{
|
||||
union MESAPI__ADD_QUEUE mes_add_queue_pkt;
|
||||
int pipe;
|
||||
|
||||
memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
|
||||
|
||||
@ -354,7 +371,12 @@ static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
|
||||
convert_to_mes_queue_type(input->queue_type);
|
||||
mes_add_queue_pkt.map_legacy_kq = 1;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
if (mes->adev->enable_uni_mes)
|
||||
pipe = AMDGPU_MES_KIQ_PIPE;
|
||||
else
|
||||
pipe = AMDGPU_MES_SCHED_PIPE;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
|
||||
offsetof(union MESAPI__ADD_QUEUE, api_status));
|
||||
}
|
||||
@ -363,6 +385,7 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
|
||||
struct mes_unmap_legacy_queue_input *input)
|
||||
{
|
||||
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
|
||||
int pipe;
|
||||
|
||||
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
|
||||
|
||||
@ -387,7 +410,12 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
|
||||
convert_to_mes_queue_type(input->queue_type);
|
||||
}
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
if (mes->adev->enable_uni_mes)
|
||||
pipe = AMDGPU_MES_KIQ_PIPE;
|
||||
else
|
||||
pipe = AMDGPU_MES_SCHED_PIPE;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
|
||||
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
|
||||
}
|
||||
@ -404,7 +432,7 @@ static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes)
|
||||
static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
|
||||
{
|
||||
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
|
||||
|
||||
@ -414,7 +442,7 @@ static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes)
|
||||
mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
|
||||
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_status_pkt, sizeof(mes_status_pkt),
|
||||
offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
|
||||
}
|
||||
@ -423,6 +451,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
|
||||
struct mes_misc_op_input *input)
|
||||
{
|
||||
union MESAPI__MISC misc_pkt;
|
||||
int pipe;
|
||||
|
||||
memset(&misc_pkt, 0, sizeof(misc_pkt));
|
||||
|
||||
@ -475,12 +504,17 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
if (mes->adev->enable_uni_mes)
|
||||
pipe = AMDGPU_MES_KIQ_PIPE;
|
||||
else
|
||||
pipe = AMDGPU_MES_SCHED_PIPE;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&misc_pkt, sizeof(misc_pkt),
|
||||
offsetof(union MESAPI__MISC, api_status));
|
||||
}
|
||||
|
||||
static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes)
|
||||
static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
|
||||
{
|
||||
union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_1_pkt;
|
||||
|
||||
@ -491,12 +525,12 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
|
||||
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
|
||||
}
|
||||
|
||||
static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
@ -508,27 +542,33 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
|
||||
mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
|
||||
mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
|
||||
mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
|
||||
mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
|
||||
mes_set_hw_res_pkt.paging_vmid = 0;
|
||||
mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
|
||||
if (pipe == AMDGPU_MES_SCHED_PIPE) {
|
||||
mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
|
||||
mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
|
||||
mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
|
||||
mes_set_hw_res_pkt.paging_vmid = 0;
|
||||
|
||||
for (i = 0; i < MAX_COMPUTE_PIPES; i++)
|
||||
mes_set_hw_res_pkt.compute_hqd_mask[i] =
|
||||
mes->compute_hqd_mask[i];
|
||||
|
||||
for (i = 0; i < MAX_GFX_PIPES; i++)
|
||||
mes_set_hw_res_pkt.gfx_hqd_mask[i] =
|
||||
mes->gfx_hqd_mask[i];
|
||||
|
||||
for (i = 0; i < MAX_SDMA_PIPES; i++)
|
||||
mes_set_hw_res_pkt.sdma_hqd_mask[i] =
|
||||
mes->sdma_hqd_mask[i];
|
||||
|
||||
for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
|
||||
mes_set_hw_res_pkt.aggregated_doorbells[i] =
|
||||
mes->aggregated_doorbells[i];
|
||||
}
|
||||
|
||||
mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr =
|
||||
mes->sch_ctx_gpu_addr[pipe];
|
||||
mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
|
||||
mes->query_status_fence_gpu_addr;
|
||||
|
||||
for (i = 0; i < MAX_COMPUTE_PIPES; i++)
|
||||
mes_set_hw_res_pkt.compute_hqd_mask[i] =
|
||||
mes->compute_hqd_mask[i];
|
||||
|
||||
for (i = 0; i < MAX_GFX_PIPES; i++)
|
||||
mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
|
||||
|
||||
for (i = 0; i < MAX_SDMA_PIPES; i++)
|
||||
mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
|
||||
|
||||
for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
|
||||
mes_set_hw_res_pkt.aggregated_doorbells[i] =
|
||||
mes->aggregated_doorbells[i];
|
||||
mes->query_status_fence_gpu_addr[pipe];
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
|
||||
@ -556,7 +596,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
|
||||
}
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes,
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
|
||||
}
|
||||
@ -734,16 +774,11 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
|
||||
if (enable) {
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
|
||||
(!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
|
||||
pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
continue;
|
||||
|
||||
soc21_grbm_select(adev, 3, pipe, 0, 0);
|
||||
|
||||
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
|
||||
@ -757,8 +792,7 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
/* unhalt MES and activate pipe0 */
|
||||
data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
|
||||
(!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
|
||||
|
||||
if (amdgpu_emu_mode)
|
||||
@ -774,8 +808,7 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL,
|
||||
MES_INVALIDATE_ICACHE, 1);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
|
||||
(!adev->enable_uni_mes && adev->enable_mes_kiq) ? 1 : 0);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
|
||||
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
|
||||
}
|
||||
@ -790,10 +823,6 @@ static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev)
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
if ((!adev->enable_mes_kiq || adev->enable_uni_mes) &&
|
||||
pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
continue;
|
||||
|
||||
/* me=3, queue=0 */
|
||||
soc21_grbm_select(adev, 3, pipe, 0, 0);
|
||||
|
||||
@ -1085,7 +1114,7 @@ static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
|
||||
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
|
||||
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
|
||||
|
||||
r = amdgpu_ring_test_ring(kiq_ring);
|
||||
if (r) {
|
||||
@ -1101,14 +1130,12 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
|
||||
if (pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
ring = &adev->gfx.kiq[0].ring;
|
||||
else if (pipe == AMDGPU_MES_SCHED_PIPE)
|
||||
ring = &adev->mes.ring;
|
||||
else
|
||||
BUG();
|
||||
ring = &adev->mes.ring[pipe];
|
||||
|
||||
if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
|
||||
if ((adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) &&
|
||||
(amdgpu_in_reset(adev) || adev->in_suspend)) {
|
||||
*(ring->wptr_cpu_addr) = 0;
|
||||
*(ring->rptr_cpu_addr) = 0;
|
||||
@ -1120,13 +1147,12 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
|
||||
return r;
|
||||
|
||||
if (pipe == AMDGPU_MES_SCHED_PIPE) {
|
||||
if (adev->enable_uni_mes) {
|
||||
mes_v12_0_queue_init_register(ring);
|
||||
} else {
|
||||
if (adev->enable_uni_mes)
|
||||
r = amdgpu_mes_map_legacy_queue(adev, ring);
|
||||
else
|
||||
r = mes_v12_0_kiq_enable_queue(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
mes_v12_0_queue_init_register(ring);
|
||||
}
|
||||
@ -1146,25 +1172,29 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mes_v12_0_ring_init(struct amdgpu_device *adev)
|
||||
static int mes_v12_0_ring_init(struct amdgpu_device *adev, int pipe)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
ring = &adev->mes.ring;
|
||||
ring = &adev->mes.ring[pipe];
|
||||
|
||||
ring->funcs = &mes_v12_0_ring_funcs;
|
||||
|
||||
ring->me = 3;
|
||||
ring->pipe = 0;
|
||||
ring->pipe = pipe;
|
||||
ring->queue = 0;
|
||||
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
|
||||
ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
|
||||
ring->eop_gpu_addr = adev->mes.eop_gpu_addr[pipe];
|
||||
ring->no_scheduler = true;
|
||||
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
|
||||
if (pipe == AMDGPU_MES_SCHED_PIPE)
|
||||
ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
|
||||
else
|
||||
ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
|
||||
|
||||
return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
}
|
||||
@ -1178,7 +1208,7 @@ static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev)
|
||||
ring = &adev->gfx.kiq[0].ring;
|
||||
|
||||
ring->me = 3;
|
||||
ring->pipe = adev->enable_uni_mes ? 0 : 1;
|
||||
ring->pipe = 1;
|
||||
ring->queue = 0;
|
||||
|
||||
ring->adev = NULL;
|
||||
@ -1200,12 +1230,10 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
|
||||
int r, mqd_size = sizeof(struct v12_compute_mqd);
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
if (pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
ring = &adev->gfx.kiq[0].ring;
|
||||
else if (pipe == AMDGPU_MES_SCHED_PIPE)
|
||||
ring = &adev->mes.ring;
|
||||
else
|
||||
BUG();
|
||||
ring = &adev->mes.ring[pipe];
|
||||
|
||||
if (ring->mqd_obj)
|
||||
return 0;
|
||||
@ -1246,9 +1274,6 @@ static int mes_v12_0_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
continue;
|
||||
|
||||
r = mes_v12_0_allocate_eop_buf(adev, pipe);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1256,18 +1281,15 @@ static int mes_v12_0_sw_init(void *handle)
|
||||
r = mes_v12_0_mqd_sw_init(adev, pipe);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->enable_mes_kiq) {
|
||||
r = mes_v12_0_kiq_ring_init(adev);
|
||||
if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
r = mes_v12_0_kiq_ring_init(adev);
|
||||
else
|
||||
r = mes_v12_0_ring_init(adev, pipe);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = mes_v12_0_ring_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1276,9 +1298,6 @@ static int mes_v12_0_sw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int pipe;
|
||||
|
||||
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
|
||||
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
|
||||
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
kfree(adev->mes.mqd_backup[pipe]);
|
||||
|
||||
@ -1286,18 +1305,21 @@ static int mes_v12_0_sw_fini(void *handle)
|
||||
&adev->mes.eop_gpu_addr[pipe],
|
||||
NULL);
|
||||
amdgpu_ucode_release(&adev->mes.fw[pipe]);
|
||||
|
||||
if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) {
|
||||
amdgpu_bo_free_kernel(&adev->mes.ring[pipe].mqd_obj,
|
||||
&adev->mes.ring[pipe].mqd_gpu_addr,
|
||||
&adev->mes.ring[pipe].mqd_ptr);
|
||||
amdgpu_ring_fini(&adev->mes.ring[pipe]);
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
|
||||
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
|
||||
&adev->gfx.kiq[0].ring.mqd_ptr);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
|
||||
&adev->mes.ring.mqd_gpu_addr,
|
||||
&adev->mes.ring.mqd_ptr);
|
||||
|
||||
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
|
||||
amdgpu_ring_fini(&adev->mes.ring);
|
||||
if (!adev->enable_uni_mes) {
|
||||
amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
|
||||
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
|
||||
&adev->gfx.kiq[0].ring.mqd_ptr);
|
||||
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
|
||||
}
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
|
||||
@ -1341,7 +1363,7 @@ static void mes_v12_0_kiq_dequeue_sched(struct amdgpu_device *adev)
|
||||
soc21_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
adev->mes.ring.sched.ready = false;
|
||||
adev->mes.ring[0].sched.ready = false;
|
||||
}
|
||||
|
||||
static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
|
||||
@ -1362,10 +1384,10 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring);
|
||||
|
||||
if (adev->enable_uni_mes)
|
||||
return mes_v12_0_hw_init(adev);
|
||||
mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]);
|
||||
else
|
||||
mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
|
||||
@ -1392,6 +1414,14 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
if (adev->enable_uni_mes) {
|
||||
r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_KIQ_PIPE);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
|
||||
}
|
||||
|
||||
r = mes_v12_0_hw_init(adev);
|
||||
if (r)
|
||||
goto failure;
|
||||
@ -1405,9 +1435,15 @@ failure:
|
||||
|
||||
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mes.ring.sched.ready) {
|
||||
mes_v12_0_kiq_dequeue_sched(adev);
|
||||
adev->mes.ring.sched.ready = false;
|
||||
if (adev->mes.ring[0].sched.ready) {
|
||||
if (adev->enable_uni_mes)
|
||||
amdgpu_mes_unmap_legacy_queue(adev,
|
||||
&adev->mes.ring[AMDGPU_MES_SCHED_PIPE],
|
||||
RESET_QUEUES, 0, 0);
|
||||
else
|
||||
mes_v12_0_kiq_dequeue_sched(adev);
|
||||
|
||||
adev->mes.ring[0].sched.ready = false;
|
||||
}
|
||||
|
||||
mes_v12_0_enable(adev, false);
|
||||
@ -1420,10 +1456,10 @@ static int mes_v12_0_hw_init(void *handle)
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->mes.ring.sched.ready)
|
||||
if (adev->mes.ring[0].sched.ready)
|
||||
goto out;
|
||||
|
||||
if (!adev->enable_mes_kiq || adev->enable_uni_mes) {
|
||||
if (!adev->enable_mes_kiq) {
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
r = mes_v12_0_load_microcode(adev,
|
||||
AMDGPU_MES_SCHED_PIPE, true);
|
||||
@ -1443,23 +1479,23 @@ static int mes_v12_0_hw_init(void *handle)
|
||||
mes_v12_0_enable(adev, true);
|
||||
}
|
||||
|
||||
/* Enable the MES to handle doorbell ring on unmapped queue */
|
||||
mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true);
|
||||
|
||||
r = mes_v12_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
r = mes_v12_0_set_hw_resources(&adev->mes);
|
||||
r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_SCHED_PIPE);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
if (adev->enable_uni_mes)
|
||||
mes_v12_0_set_hw_resources_1(&adev->mes);
|
||||
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
|
||||
|
||||
mes_v12_0_init_aggregated_doorbell(&adev->mes);
|
||||
|
||||
/* Enable the MES to handle doorbell ring on unmapped queue */
|
||||
mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true);
|
||||
|
||||
r = mes_v12_0_query_sched_status(&adev->mes);
|
||||
r = mes_v12_0_query_sched_status(&adev->mes, AMDGPU_MES_SCHED_PIPE);
|
||||
if (r) {
|
||||
DRM_ERROR("MES is busy\n");
|
||||
goto failure;
|
||||
@ -1472,7 +1508,7 @@ out:
|
||||
* with MES enabled.
|
||||
*/
|
||||
adev->gfx.kiq[0].ring.sched.ready = false;
|
||||
adev->mes.ring.sched.ready = true;
|
||||
adev->mes.ring[0].sched.ready = true;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1515,17 +1551,7 @@ static int mes_v12_0_early_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int pipe, r;
|
||||
|
||||
if (adev->enable_uni_mes) {
|
||||
r = amdgpu_mes_init_microcode(adev, AMDGPU_MES_SCHED_PIPE);
|
||||
if (!r)
|
||||
return 0;
|
||||
|
||||
adev->enable_uni_mes = false;
|
||||
}
|
||||
|
||||
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
|
||||
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
|
||||
continue;
|
||||
r = amdgpu_mes_init_microcode(adev, pipe);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -76,6 +76,12 @@
|
||||
((cond & 0xF) << 24) | \
|
||||
((type & 0xF) << 28))
|
||||
|
||||
#define CP_PACKETJ_NOP 0x60000000
|
||||
#define CP_PACKETJ_GET_REG(x) ((x) & 0x3FFFF)
|
||||
#define CP_PACKETJ_GET_RES(x) (((x) >> 18) & 0x3F)
|
||||
#define CP_PACKETJ_GET_COND(x) (((x) >> 24) & 0xF)
|
||||
#define CP_PACKETJ_GET_TYPE(x) (((x) >> 28) & 0xF)
|
||||
|
||||
/* Packet 3 types */
|
||||
#define PACKET3_NOP 0x10
|
||||
#define PACKET3_SET_BASE 0x11
|
||||
|
@ -406,6 +406,7 @@ static int soc24_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_SD |
|
||||
AMD_CG_SUPPORT_MC_LS;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_JPEG |
|
||||
@ -424,6 +425,7 @@ static int soc24_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_SD |
|
||||
AMD_CG_SUPPORT_MC_LS;
|
||||
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
|
@ -2893,6 +2893,9 @@ static int dm_suspend(void *handle)
|
||||
|
||||
hpd_rx_irq_work_suspend(dm);
|
||||
|
||||
if (adev->dm.dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(adev->dm.dc, true);
|
||||
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
|
||||
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
|
||||
|
||||
|
@ -804,12 +804,25 @@ struct dsc_mst_fairness_params {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
static int kbps_to_peak_pbn(int kbps)
|
||||
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
|
||||
{
|
||||
u8 link_coding_cap;
|
||||
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
|
||||
|
||||
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
|
||||
if (link_coding_cap == DP_128b_132b_ENCODING)
|
||||
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
|
||||
|
||||
return fec_overhead_multiplier_x1000;
|
||||
}
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps = div_u64(peak_kbps, 1000);
|
||||
peak_kbps *= fec_overhead_multiplier_x1000;
|
||||
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
|
||||
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
@ -910,11 +923,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
initial_slack[i] =
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
@ -1010,6 +1024,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled
|
||||
@ -1039,7 +1054,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
if (next_index == -1)
|
||||
break;
|
||||
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
@ -1052,8 +1067,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||
vars[next_index].dsc_enabled = false;
|
||||
vars[next_index].bpp_x16 = 0;
|
||||
} else {
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(
|
||||
params[next_index].bw_range.max_kbps);
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
@ -1082,6 +1096,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
int count = 0;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
||||
@ -1146,7 +1161,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
/* Try no compression */
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i + k].aconnector = params[i].aconnector;
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
@ -1165,7 +1180,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
/* Try max compression */
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
@ -1173,7 +1188,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
@ -46,6 +46,9 @@
|
||||
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
|
||||
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
|
||||
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
|
||||
|
||||
enum mst_msg_ready_type {
|
||||
NONE_MSG_RDY_EVENT = 0,
|
||||
DOWN_REP_MSG_RDY_EVENT = 1,
|
||||
|
@ -3589,7 +3589,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
||||
(int)hubp->curs_attr.width || pos_cpy.x
|
||||
<= (int)hubp->curs_attr.width +
|
||||
pipe_ctx->plane_state->src_rect.x) {
|
||||
pos_cpy.x = temp_x + viewport_width;
|
||||
pos_cpy.x = 2 * viewport_width - temp_x;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -3682,7 +3682,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
||||
(int)hubp->curs_attr.width || pos_cpy.x
|
||||
<= (int)hubp->curs_attr.width +
|
||||
pipe_ctx->plane_state->src_rect.x) {
|
||||
pos_cpy.x = 2 * viewport_width - temp_x;
|
||||
pos_cpy.x = temp_x + viewport_width;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -1778,6 +1778,9 @@ static bool dcn321_resource_construct(
|
||||
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
|
||||
dc->caps.color.mpc.ocsc = 1;
|
||||
|
||||
/* Use pipe context based otg sync logic */
|
||||
dc->config.use_pipe_ctx_sync_logic = true;
|
||||
|
||||
dc->config.dc_mode_clk_limit_support = true;
|
||||
dc->config.enable_windowed_mpo_odm = true;
|
||||
/* read VBIOS LTTPR caps */
|
||||
|
@ -97,6 +97,7 @@ enum MES_QUEUE_TYPE {
|
||||
MES_QUEUE_TYPE_SDMA,
|
||||
|
||||
MES_QUEUE_TYPE_MAX,
|
||||
MES_QUEUE_TYPE_SCHQ = MES_QUEUE_TYPE_MAX,
|
||||
};
|
||||
|
||||
struct MES_API_STATUS {
|
||||
@ -242,8 +243,12 @@ union MESAPI_SET_HW_RESOURCES {
|
||||
uint32_t send_write_data : 1;
|
||||
uint32_t os_tdr_timeout_override : 1;
|
||||
uint32_t use_rs64mem_for_proc_gang_ctx : 1;
|
||||
uint32_t halt_on_misaligned_access : 1;
|
||||
uint32_t use_add_queue_unmap_flag_addr : 1;
|
||||
uint32_t enable_mes_sch_stb_log : 1;
|
||||
uint32_t limit_single_process : 1;
|
||||
uint32_t unmapped_doorbell_handling: 2;
|
||||
uint32_t reserved : 15;
|
||||
uint32_t reserved : 11;
|
||||
};
|
||||
uint32_t uint32_all;
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user