mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
regmap: Implement regmap_multi_reg_read()
Merge series from Guenter Roeck <linux@roeck-us.net>: regmap_multi_reg_read() is similar to regmap_bilk_read() but reads from an array of non-sequential registers. It is helpful if multiple non- sequential registers need to be read in a single operation which would otherwise have to be mutex protected. The name of the new function was chosen to match the existing function regmap_multi_reg_write().
This commit is contained in:
commit
450a60ef60
1
.mailmap
1
.mailmap
@ -608,6 +608,7 @@ Simon Kelley <simon@thekelleys.org.uk>
|
||||
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
|
||||
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
|
||||
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
|
||||
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
|
||||
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
|
||||
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
|
||||
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
|
||||
|
@ -2192,12 +2192,6 @@
|
||||
Format: 0 | 1
|
||||
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
|
||||
|
||||
init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
|
||||
it was mlock'ed and not explicitly munlock'ed
|
||||
afterwards.
|
||||
Format: 0 | 1
|
||||
Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
|
||||
|
||||
init_pkru= [X86] Specify the default memory protection keys rights
|
||||
register contents for all processes. 0x55555554 by
|
||||
default (disallow access to all but pkey 0). Can
|
||||
|
@ -59,8 +59,8 @@ properties:
|
||||
- 3
|
||||
|
||||
dma-channels:
|
||||
minItems: 1
|
||||
maxItems: 64
|
||||
minimum: 1
|
||||
maximum: 64
|
||||
|
||||
clocks:
|
||||
minItems: 1
|
||||
|
@ -77,7 +77,7 @@ required:
|
||||
- clocks
|
||||
|
||||
allOf:
|
||||
- $ref: i2c-controller.yaml
|
||||
- $ref: /schemas/i2c/i2c-controller.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -21,7 +21,7 @@ description: |
|
||||
google,cros-ec-spi or google,cros-ec-i2c.
|
||||
|
||||
allOf:
|
||||
- $ref: i2c-controller.yaml#
|
||||
- $ref: /schemas/i2c/i2c-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -1,5 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
<!-- Updated to inclusive terminology by Wolfram Sang -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
@ -1120,7 +1121,7 @@
|
||||
<rect
|
||||
style="opacity:1;fill:#ffb9b9;fill-opacity:1;stroke:#f00000;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="rect4424-3-2-9-7"
|
||||
width="112.5"
|
||||
width="134.5"
|
||||
height="113.75008"
|
||||
x="112.5"
|
||||
y="471.11221"
|
||||
@ -1133,15 +1134,15 @@
|
||||
y="521.46259"
|
||||
id="text4349"><tspan
|
||||
sodipodi:role="line"
|
||||
x="167.5354"
|
||||
x="178.5354"
|
||||
y="521.46259"
|
||||
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
|
||||
id="tspan1273">I2C</tspan><tspan
|
||||
sodipodi:role="line"
|
||||
x="167.5354"
|
||||
x="178.5354"
|
||||
y="552.71259"
|
||||
style="font-size:25px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle"
|
||||
id="tspan1285">Master</tspan></text>
|
||||
id="tspan1285">Controller</tspan></text>
|
||||
<rect
|
||||
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
|
||||
id="rect4424-3-2-9-7-3-3-5-3"
|
||||
@ -1171,7 +1172,7 @@
|
||||
x="318.59131"
|
||||
y="552.08752"
|
||||
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
|
||||
id="tspan1287">Slave</tspan></text>
|
||||
id="tspan1287">Target</tspan></text>
|
||||
<path
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968767;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 112.49995,677.36223 c 712.50005,0 712.50005,0 712.50005,0"
|
||||
@ -1233,7 +1234,7 @@
|
||||
x="468.59131"
|
||||
y="552.08746"
|
||||
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
|
||||
id="tspan1287-6">Slave</tspan></text>
|
||||
id="tspan1287-6">Target</tspan></text>
|
||||
<rect
|
||||
style="color:#000000;clip-rule:nonzero;display:inline;overflow:visible;visibility:visible;opacity:1;isolation:auto;mix-blend-mode:normal;color-interpolation:sRGB;color-interpolation-filters:linearRGB;solid-color:#000000;solid-opacity:1;vector-effect:none;fill:#b9ffb9;fill-opacity:1;fill-rule:nonzero;stroke:#006400;stroke-width:2.8125;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;enable-background:accumulate"
|
||||
id="rect4424-3-2-9-7-3-3-5-3-1"
|
||||
@ -1258,7 +1259,7 @@
|
||||
x="618.59131"
|
||||
y="552.08746"
|
||||
style="font-size:25.00000191px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;stroke-width:1px"
|
||||
id="tspan1287-9">Slave</tspan></text>
|
||||
id="tspan1287-9">Target</tspan></text>
|
||||
<path
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.99968743;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#DotM)"
|
||||
d="m 150,583.61221 v 93.75"
|
||||
|
Before Width: | Height: | Size: 55 KiB After Width: | Height: | Size: 55 KiB |
@ -3,29 +3,27 @@ Introduction to I2C and SMBus
|
||||
=============================
|
||||
|
||||
I²C (pronounce: I squared C and written I2C in the kernel documentation) is
|
||||
a protocol developed by Philips. It is a slow two-wire protocol (variable
|
||||
speed, up to 400 kHz), with a high speed extension (3.4 MHz). It provides
|
||||
a protocol developed by Philips. It is a two-wire protocol with variable
|
||||
speed (typically up to 400 kHz, high speed modes up to 5 MHz). It provides
|
||||
an inexpensive bus for connecting many types of devices with infrequent or
|
||||
low bandwidth communications needs. I2C is widely used with embedded
|
||||
systems. Some systems use variants that don't meet branding requirements,
|
||||
low bandwidth communications needs. I2C is widely used with embedded
|
||||
systems. Some systems use variants that don't meet branding requirements,
|
||||
and so are not advertised as being I2C but come under different names,
|
||||
e.g. TWI (Two Wire Interface), IIC.
|
||||
|
||||
The latest official I2C specification is the `"I2C-bus specification and user
|
||||
manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
|
||||
published by NXP Semiconductors. However, you need to log-in to the site to
|
||||
access the PDF. An older version of the specification (revision 6) is archived
|
||||
`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
|
||||
The latest official I2C specification is the `"I²C-bus specification and user
|
||||
manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
|
||||
published by NXP Semiconductors, version 7 as of this writing.
|
||||
|
||||
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
|
||||
a subset of I2C protocols and signaling. Many I2C devices will work on an
|
||||
a subset of I2C protocols and signaling. Many I2C devices will work on an
|
||||
SMBus, but some SMBus protocols add semantics beyond what is required to
|
||||
achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
|
||||
achieve I2C branding. Modern PC mainboards rely on SMBus. The most common
|
||||
devices connected through SMBus are RAM modules configured using I2C EEPROMs,
|
||||
and hardware monitoring chips.
|
||||
|
||||
Because the SMBus is mostly a subset of the generalized I2C bus, we can
|
||||
use its protocols on many I2C systems. However, there are systems that don't
|
||||
use its protocols on many I2C systems. However, there are systems that don't
|
||||
meet both SMBus and I2C electrical constraints; and others which can't
|
||||
implement all the common SMBus protocol semantics or messages.
|
||||
|
||||
@ -33,29 +31,52 @@ implement all the common SMBus protocol semantics or messages.
|
||||
Terminology
|
||||
===========
|
||||
|
||||
Using the terminology from the official documentation, the I2C bus connects
|
||||
one or more *master* chips and one or more *slave* chips.
|
||||
The I2C bus connects one or more controller chips and one or more target chips.
|
||||
|
||||
.. kernel-figure:: i2c_bus.svg
|
||||
:alt: Simple I2C bus with one master and 3 slaves
|
||||
:alt: Simple I2C bus with one controller and 3 targets
|
||||
|
||||
Simple I2C bus
|
||||
|
||||
A **master** chip is a node that starts communications with slaves. In the
|
||||
Linux kernel implementation it is called an **adapter** or bus. Adapter
|
||||
drivers are in the ``drivers/i2c/busses/`` subdirectory.
|
||||
A **controller** chip is a node that starts communications with targets. In the
|
||||
Linux kernel implementation it is also called an "adapter" or "bus". Controller
|
||||
drivers are usually in the ``drivers/i2c/busses/`` subdirectory.
|
||||
|
||||
An **algorithm** contains general code that can be used to implement a
|
||||
whole class of I2C adapters. Each specific adapter driver either depends on
|
||||
an algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes
|
||||
its own implementation.
|
||||
An **algorithm** contains general code that can be used to implement a whole
|
||||
class of I2C controllers. Each specific controller driver either depends on an
|
||||
algorithm driver in the ``drivers/i2c/algos/`` subdirectory, or includes its
|
||||
own implementation.
|
||||
|
||||
A **slave** chip is a node that responds to communications when addressed
|
||||
by the master. In Linux it is called a **client**. Client drivers are kept
|
||||
in a directory specific to the feature they provide, for example
|
||||
``drivers/media/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
|
||||
A **target** chip is a node that responds to communications when addressed by a
|
||||
controller. In the Linux kernel implementation it is also called a "client".
|
||||
While targets are usually separate external chips, Linux can also act as a
|
||||
target (needs hardware support) and respond to another controller on the bus.
|
||||
This is then called a **local target**. In contrast, an external chip is called
|
||||
a **remote target**.
|
||||
|
||||
Target drivers are kept in a directory specific to the feature they provide,
|
||||
for example ``drivers/gpio/`` for GPIO expanders and ``drivers/media/i2c/`` for
|
||||
video-related chips.
|
||||
|
||||
For the example configuration in figure, you will need a driver for your
|
||||
I2C adapter, and drivers for your I2C devices (usually one driver for each
|
||||
device).
|
||||
For the example configuration in the figure above, you will need one driver for
|
||||
the I2C controller, and drivers for your I2C targets. Usually one driver for
|
||||
each target.
|
||||
|
||||
Synonyms
|
||||
--------
|
||||
|
||||
As mentioned above, the Linux I2C implementation historically uses the terms
|
||||
"adapter" for controller and "client" for target. A number of data structures
|
||||
have these synonyms in their name. So, when discussing implementation details,
|
||||
you should be aware of these terms as well. The official wording is preferred,
|
||||
though.
|
||||
|
||||
Outdated terminology
|
||||
--------------------
|
||||
|
||||
In earlier I2C specifications, controller was named "master" and target was
|
||||
named "slave". These terms have been obsoleted with v7 of the specification and
|
||||
their use is also discouraged by the Linux Kernel Code of Conduct. You may
|
||||
still find them in references to documentation which has not been updated. The
|
||||
general attitude, however, is to use the inclusive terms: controller and
|
||||
target. Work to replace the old terminology in the Linux Kernel is on-going.
|
||||
|
@ -123,8 +123,6 @@ operations:
|
||||
doc: dump pending nfsd rpc
|
||||
attribute-set: rpc-status
|
||||
dump:
|
||||
pre: nfsd-nl-rpc-status-get-start
|
||||
post: nfsd-nl-rpc-status-get-done
|
||||
reply:
|
||||
attributes:
|
||||
- xid
|
||||
|
@ -32,6 +32,7 @@ Security-related interfaces
|
||||
seccomp_filter
|
||||
landlock
|
||||
lsm
|
||||
mfd_noexec
|
||||
spec_ctrl
|
||||
tee
|
||||
|
||||
|
86
Documentation/userspace-api/mfd_noexec.rst
Normal file
86
Documentation/userspace-api/mfd_noexec.rst
Normal file
@ -0,0 +1,86 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==================================
|
||||
Introduction of non-executable mfd
|
||||
==================================
|
||||
:Author:
|
||||
Daniel Verkamp <dverkamp@chromium.org>
|
||||
Jeff Xu <jeffxu@chromium.org>
|
||||
|
||||
:Contributor:
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
|
||||
Since Linux introduced the memfd feature, memfds have always had their
|
||||
execute bit set, and the memfd_create() syscall doesn't allow setting
|
||||
it differently.
|
||||
|
||||
However, in a secure-by-default system, such as ChromeOS, (where all
|
||||
executables should come from the rootfs, which is protected by verified
|
||||
boot), this executable nature of memfd opens a door for NoExec bypass
|
||||
and enables “confused deputy attack”. E.g, in VRP bug [1]: cros_vm
|
||||
process created a memfd to share the content with an external process,
|
||||
however the memfd is overwritten and used for executing arbitrary code
|
||||
and root escalation. [2] lists more VRP of this kind.
|
||||
|
||||
On the other hand, executable memfd has its legit use: runc uses memfd’s
|
||||
seal and executable feature to copy the contents of the binary then
|
||||
execute them. For such a system, we need a solution to differentiate runc's
|
||||
use of executable memfds and an attacker's [3].
|
||||
|
||||
To address those above:
|
||||
- Let memfd_create() set X bit at creation time.
|
||||
- Let memfd be sealed for modifying X bit when NX is set.
|
||||
- Add a new pid namespace sysctl: vm.memfd_noexec to help applications in
|
||||
migrating and enforcing non-executable MFD.
|
||||
|
||||
User API
|
||||
========
|
||||
``int memfd_create(const char *name, unsigned int flags)``
|
||||
|
||||
``MFD_NOEXEC_SEAL``
|
||||
When MFD_NOEXEC_SEAL bit is set in the ``flags``, memfd is created
|
||||
with NX. F_SEAL_EXEC is set and the memfd can't be modified to
|
||||
add X later. MFD_ALLOW_SEALING is also implied.
|
||||
This is the most common case for the application to use memfd.
|
||||
|
||||
``MFD_EXEC``
|
||||
When MFD_EXEC bit is set in the ``flags``, memfd is created with X.
|
||||
|
||||
Note:
|
||||
``MFD_NOEXEC_SEAL`` implies ``MFD_ALLOW_SEALING``. In case that
|
||||
an app doesn't want sealing, it can add F_SEAL_SEAL after creation.
|
||||
|
||||
|
||||
Sysctl:
|
||||
========
|
||||
``pid namespaced sysctl vm.memfd_noexec``
|
||||
|
||||
The new pid namespaced sysctl vm.memfd_noexec has 3 values:
|
||||
|
||||
- 0: MEMFD_NOEXEC_SCOPE_EXEC
|
||||
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
|
||||
MFD_EXEC was set.
|
||||
|
||||
- 1: MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL
|
||||
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
|
||||
MFD_NOEXEC_SEAL was set.
|
||||
|
||||
- 2: MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED
|
||||
memfd_create() without MFD_NOEXEC_SEAL will be rejected.
|
||||
|
||||
The sysctl allows finer control of memfd_create for old software that
|
||||
doesn't set the executable bit; for example, a container with
|
||||
vm.memfd_noexec=1 means the old software will create non-executable memfd
|
||||
by default while new software can create executable memfd by setting
|
||||
MFD_EXEC.
|
||||
|
||||
The value of vm.memfd_noexec is passed to child namespace at creation
|
||||
time. In addition, the setting is hierarchical, i.e. during memfd_create,
|
||||
we will search from current ns to root ns and use the most restrictive
|
||||
setting.
|
||||
|
||||
[1] https://crbug.com/1305267
|
||||
|
||||
[2] https://bugs.chromium.org/p/chromium/issues/list?q=type%3Dbug-security%20memfd%20escalation&can=1
|
||||
|
||||
[3] https://lwn.net/Articles/781013/
|
@ -62,12 +62,21 @@ shared page with scale and offset values into user space. User
|
||||
space code performs the same algorithm of reading the TSC and
|
||||
applying the scale and offset to get the constant 10 MHz clock.
|
||||
|
||||
Linux clockevents are based on Hyper-V synthetic timer 0. While
|
||||
Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
|
||||
timer 0. Interrupts from stimer0 are recorded on the "HVS" line in
|
||||
/proc/interrupts. Clockevents based on the virtualized PIT and
|
||||
local APIC timer also work, but the Hyper-V synthetic timer is
|
||||
preferred.
|
||||
Linux clockevents are based on Hyper-V synthetic timer 0 (stimer0).
|
||||
While Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
|
||||
timer 0. In older versions of Hyper-V, an interrupt from stimer0
|
||||
results in a VMBus control message that is demultiplexed by
|
||||
vmbus_isr() as described in the Documentation/virt/hyperv/vmbus.rst
|
||||
documentation. In newer versions of Hyper-V, stimer0 interrupts can
|
||||
be mapped to an architectural interrupt, which is referred to as
|
||||
"Direct Mode". Linux prefers to use Direct Mode when available. Since
|
||||
x86/x64 doesn't support per-CPU interrupts, Direct Mode statically
|
||||
allocates an x86 interrupt vector (HYPERV_STIMER0_VECTOR) across all CPUs
|
||||
and explicitly codes it to call the stimer0 interrupt handler. Hence
|
||||
interrupts from stimer0 are recorded on the "HVS" line in /proc/interrupts
|
||||
rather than being associated with a Linux IRQ. Clockevents based on the
|
||||
virtualized PIT and local APIC timer also work, but Hyper-V stimer0
|
||||
is preferred.
|
||||
|
||||
The driver for the Hyper-V synthetic system clock and timers is
|
||||
drivers/clocksource/hyperv_timer.c.
|
||||
|
@ -40,7 +40,7 @@ Linux guests communicate with Hyper-V in four different ways:
|
||||
arm64, these synthetic registers must be accessed using explicit
|
||||
hypercalls.
|
||||
|
||||
* VMbus: VMbus is a higher-level software construct that is built on
|
||||
* VMBus: VMBus is a higher-level software construct that is built on
|
||||
the other 3 mechanisms. It is a message passing interface between
|
||||
the Hyper-V host and the Linux guest. It uses memory that is shared
|
||||
between Hyper-V and the guest, along with various signaling
|
||||
@ -54,8 +54,8 @@ x86/x64 architecture only.
|
||||
|
||||
.. _Hyper-V Top Level Functional Spec (TLFS): https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs
|
||||
|
||||
VMbus is not documented. This documentation provides a high-level
|
||||
overview of VMbus and how it works, but the details can be discerned
|
||||
VMBus is not documented. This documentation provides a high-level
|
||||
overview of VMBus and how it works, but the details can be discerned
|
||||
only from the code.
|
||||
|
||||
Sharing Memory
|
||||
@ -74,7 +74,7 @@ follows:
|
||||
physical address space. How Hyper-V is told about the GPA or list
|
||||
of GPAs varies. In some cases, a single GPA is written to a
|
||||
synthetic register. In other cases, a GPA or list of GPAs is sent
|
||||
in a VMbus message.
|
||||
in a VMBus message.
|
||||
|
||||
* Hyper-V translates the GPAs into "real" physical memory addresses,
|
||||
and creates a virtual mapping that it can use to access the memory.
|
||||
@ -133,9 +133,9 @@ only the CPUs actually present in the VM, so Linux does not report
|
||||
any hot-add CPUs.
|
||||
|
||||
A Linux guest CPU may be taken offline using the normal Linux
|
||||
mechanisms, provided no VMbus channel interrupts are assigned to
|
||||
the CPU. See the section on VMbus Interrupts for more details
|
||||
on how VMbus channel interrupts can be re-assigned to permit
|
||||
mechanisms, provided no VMBus channel interrupts are assigned to
|
||||
the CPU. See the section on VMBus Interrupts for more details
|
||||
on how VMBus channel interrupts can be re-assigned to permit
|
||||
taking a CPU offline.
|
||||
|
||||
32-bit and 64-bit
|
||||
@ -169,14 +169,14 @@ and functionality. Hyper-V indicates feature/function availability
|
||||
via flags in synthetic MSRs that Hyper-V provides to the guest,
|
||||
and the guest code tests these flags.
|
||||
|
||||
VMbus has its own protocol version that is negotiated during the
|
||||
initial VMbus connection from the guest to Hyper-V. This version
|
||||
VMBus has its own protocol version that is negotiated during the
|
||||
initial VMBus connection from the guest to Hyper-V. This version
|
||||
number is also output to dmesg during boot. This version number
|
||||
is checked in a few places in the code to determine if specific
|
||||
functionality is present.
|
||||
|
||||
Furthermore, each synthetic device on VMbus also has a protocol
|
||||
version that is separate from the VMbus protocol version. Device
|
||||
Furthermore, each synthetic device on VMBus also has a protocol
|
||||
version that is separate from the VMBus protocol version. Device
|
||||
drivers for these synthetic devices typically negotiate the device
|
||||
protocol version, and may test that protocol version to determine
|
||||
if specific device functionality is present.
|
||||
|
@ -1,8 +1,8 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
VMbus
|
||||
VMBus
|
||||
=====
|
||||
VMbus is a software construct provided by Hyper-V to guest VMs. It
|
||||
VMBus is a software construct provided by Hyper-V to guest VMs. It
|
||||
consists of a control path and common facilities used by synthetic
|
||||
devices that Hyper-V presents to guest VMs. The control path is
|
||||
used to offer synthetic devices to the guest VM and, in some cases,
|
||||
@ -12,9 +12,9 @@ and the synthetic device implementation that is part of Hyper-V, and
|
||||
signaling primitives to allow Hyper-V and the guest to interrupt
|
||||
each other.
|
||||
|
||||
VMbus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
|
||||
entry in a running Linux guest. The VMbus driver (drivers/hv/vmbus_drv.c)
|
||||
establishes the VMbus control path with the Hyper-V host, then
|
||||
VMBus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
|
||||
entry in a running Linux guest. The VMBus driver (drivers/hv/vmbus_drv.c)
|
||||
establishes the VMBus control path with the Hyper-V host, then
|
||||
registers itself as a Linux bus driver. It implements the standard
|
||||
bus functions for adding and removing devices to/from the bus.
|
||||
|
||||
@ -49,9 +49,9 @@ synthetic NIC is referred to as "netvsc" and the Linux driver for
|
||||
the synthetic SCSI controller is "storvsc". These drivers contain
|
||||
functions with names like "storvsc_connect_to_vsp".
|
||||
|
||||
VMbus channels
|
||||
VMBus channels
|
||||
--------------
|
||||
An instance of a synthetic device uses VMbus channels to communicate
|
||||
An instance of a synthetic device uses VMBus channels to communicate
|
||||
between the VSP and the VSC. Channels are bi-directional and used
|
||||
for passing messages. Most synthetic devices use a single channel,
|
||||
but the synthetic SCSI controller and synthetic NIC may use multiple
|
||||
@ -73,7 +73,7 @@ write indices and some control flags, followed by the memory for the
|
||||
actual ring. The size of the ring is determined by the VSC in the
|
||||
guest and is specific to each synthetic device. The list of GPAs
|
||||
making up the ring is communicated to the Hyper-V host over the
|
||||
VMbus control path as a GPA Descriptor List (GPADL). See function
|
||||
VMBus control path as a GPA Descriptor List (GPADL). See function
|
||||
vmbus_establish_gpadl().
|
||||
|
||||
Each ring buffer is mapped into contiguous Linux kernel virtual
|
||||
@ -102,10 +102,10 @@ resources. For Windows Server 2019 and later, this limit is
|
||||
approximately 1280 Mbytes. For versions prior to Windows Server
|
||||
2019, the limit is approximately 384 Mbytes.
|
||||
|
||||
VMbus messages
|
||||
--------------
|
||||
All VMbus messages have a standard header that includes the message
|
||||
length, the offset of the message payload, some flags, and a
|
||||
VMBus channel messages
|
||||
----------------------
|
||||
All messages sent in a VMBus channel have a standard header that includes
|
||||
the message length, the offset of the message payload, some flags, and a
|
||||
transactionID. The portion of the message after the header is
|
||||
unique to each VSP/VSC pair.
|
||||
|
||||
@ -137,7 +137,7 @@ control message contains a list of GPAs that describe the data
|
||||
buffer. For example, the storvsc driver uses this approach to
|
||||
specify the data buffers to/from which disk I/O is done.
|
||||
|
||||
Three functions exist to send VMbus messages:
|
||||
Three functions exist to send VMBus channel messages:
|
||||
|
||||
1. vmbus_sendpacket(): Control-only messages and messages with
|
||||
embedded data -- no GPAs
|
||||
@ -154,20 +154,51 @@ Historically, Linux guests have trusted Hyper-V to send well-formed
|
||||
and valid messages, and Linux drivers for synthetic devices did not
|
||||
fully validate messages. With the introduction of processor
|
||||
technologies that fully encrypt guest memory and that allow the
|
||||
guest to not trust the hypervisor (AMD SNP-SEV, Intel TDX), trusting
|
||||
guest to not trust the hypervisor (AMD SEV-SNP, Intel TDX), trusting
|
||||
the Hyper-V host is no longer a valid assumption. The drivers for
|
||||
VMbus synthetic devices are being updated to fully validate any
|
||||
VMBus synthetic devices are being updated to fully validate any
|
||||
values read from memory that is shared with Hyper-V, which includes
|
||||
messages from VMbus devices. To facilitate such validation,
|
||||
messages from VMBus devices. To facilitate such validation,
|
||||
messages read by the guest from the "in" ring buffer are copied to a
|
||||
temporary buffer that is not shared with Hyper-V. Validation is
|
||||
performed in this temporary buffer without the risk of Hyper-V
|
||||
maliciously modifying the message after it is validated but before
|
||||
it is used.
|
||||
|
||||
VMbus interrupts
|
||||
Synthetic Interrupt Controller (synic)
|
||||
--------------------------------------
|
||||
Hyper-V provides each guest CPU with a synthetic interrupt controller
|
||||
that is used by VMBus for host-guest communication. While each synic
|
||||
defines 16 synthetic interrupts (SINT), Linux uses only one of the 16
|
||||
(VMBUS_MESSAGE_SINT). All interrupts related to communication between
|
||||
the Hyper-V host and a guest CPU use that SINT.
|
||||
|
||||
The SINT is mapped to a single per-CPU architectural interrupt (i.e,
|
||||
an 8-bit x86/x64 interrupt vector, or an arm64 PPI INTID). Because
|
||||
each CPU in the guest has a synic and may receive VMBus interrupts,
|
||||
they are best modeled in Linux as per-CPU interrupts. This model works
|
||||
well on arm64 where a single per-CPU Linux IRQ is allocated for
|
||||
VMBUS_MESSAGE_SINT. This IRQ appears in /proc/interrupts as an IRQ labelled
|
||||
"Hyper-V VMbus". Since x86/x64 lacks support for per-CPU IRQs, an x86
|
||||
interrupt vector is statically allocated (HYPERVISOR_CALLBACK_VECTOR)
|
||||
across all CPUs and explicitly coded to call vmbus_isr(). In this case,
|
||||
there's no Linux IRQ, and the interrupts are visible in aggregate in
|
||||
/proc/interrupts on the "HYP" line.
|
||||
|
||||
The synic provides the means to demultiplex the architectural interrupt into
|
||||
one or more logical interrupts and route the logical interrupt to the proper
|
||||
VMBus handler in Linux. This demultiplexing is done by vmbus_isr() and
|
||||
related functions that access synic data structures.
|
||||
|
||||
The synic is not modeled in Linux as an irq chip or irq domain,
|
||||
and the demultiplexed logical interrupts are not Linux IRQs. As such,
|
||||
they don't appear in /proc/interrupts or /proc/irq. The CPU
|
||||
affinity for one of these logical interrupts is controlled via an
|
||||
entry under /sys/bus/vmbus as described below.
|
||||
|
||||
VMBus interrupts
|
||||
----------------
|
||||
VMbus provides a mechanism for the guest to interrupt the host when
|
||||
VMBus provides a mechanism for the guest to interrupt the host when
|
||||
the guest has queued new messages in a ring buffer. The host
|
||||
expects that the guest will send an interrupt only when an "out"
|
||||
ring buffer transitions from empty to non-empty. If the guest sends
|
||||
@ -176,63 +207,55 @@ unnecessary. If a guest sends an excessive number of unnecessary
|
||||
interrupts, the host may throttle that guest by suspending its
|
||||
execution for a few seconds to prevent a denial-of-service attack.
|
||||
|
||||
Similarly, the host will interrupt the guest when it sends a new
|
||||
message on the VMbus control path, or when a VMbus channel "in" ring
|
||||
buffer transitions from empty to non-empty. Each CPU in the guest
|
||||
may receive VMbus interrupts, so they are best modeled as per-CPU
|
||||
interrupts in Linux. This model works well on arm64 where a single
|
||||
per-CPU IRQ is allocated for VMbus. Since x86/x64 lacks support for
|
||||
per-CPU IRQs, an x86 interrupt vector is statically allocated (see
|
||||
HYPERVISOR_CALLBACK_VECTOR) across all CPUs and explicitly coded to
|
||||
call the VMbus interrupt service routine. These interrupts are
|
||||
visible in /proc/interrupts on the "HYP" line.
|
||||
Similarly, the host will interrupt the guest via the synic when
|
||||
it sends a new message on the VMBus control path, or when a VMBus
|
||||
channel "in" ring buffer transitions from empty to non-empty due to
|
||||
the host inserting a new VMBus channel message. The control message stream
|
||||
and each VMBus channel "in" ring buffer are separate logical interrupts
|
||||
that are demultiplexed by vmbus_isr(). It demultiplexes by first checking
|
||||
for channel interrupts by calling vmbus_chan_sched(), which looks at a synic
|
||||
bitmap to determine which channels have pending interrupts on this CPU.
|
||||
If multiple channels have pending interrupts for this CPU, they are
|
||||
processed sequentially. When all channel interrupts have been processed,
|
||||
vmbus_isr() checks for and processes any messages received on the VMBus
|
||||
control path.
|
||||
|
||||
The guest CPU that a VMbus channel will interrupt is selected by the
|
||||
The guest CPU that a VMBus channel will interrupt is selected by the
|
||||
guest when the channel is created, and the host is informed of that
|
||||
selection. VMbus devices are broadly grouped into two categories:
|
||||
selection. VMBus devices are broadly grouped into two categories:
|
||||
|
||||
1. "Slow" devices that need only one VMbus channel. The devices
|
||||
1. "Slow" devices that need only one VMBus channel. The devices
|
||||
(such as keyboard, mouse, heartbeat, and timesync) generate
|
||||
relatively few interrupts. Their VMbus channels are all
|
||||
relatively few interrupts. Their VMBus channels are all
|
||||
assigned to interrupt the VMBUS_CONNECT_CPU, which is always
|
||||
CPU 0.
|
||||
|
||||
2. "High speed" devices that may use multiple VMbus channels for
|
||||
2. "High speed" devices that may use multiple VMBus channels for
|
||||
higher parallelism and performance. These devices include the
|
||||
synthetic SCSI controller and synthetic NIC. Their VMbus
|
||||
synthetic SCSI controller and synthetic NIC. Their VMBus
|
||||
channels interrupts are assigned to CPUs that are spread out
|
||||
among the available CPUs in the VM so that interrupts on
|
||||
multiple channels can be processed in parallel.
|
||||
|
||||
The assignment of VMbus channel interrupts to CPUs is done in the
|
||||
The assignment of VMBus channel interrupts to CPUs is done in the
|
||||
function init_vp_index(). This assignment is done outside of the
|
||||
normal Linux interrupt affinity mechanism, so the interrupts are
|
||||
neither "unmanaged" nor "managed" interrupts.
|
||||
|
||||
The CPU that a VMbus channel will interrupt can be seen in
|
||||
The CPU that a VMBus channel will interrupt can be seen in
|
||||
/sys/bus/vmbus/devices/<deviceGUID>/ channels/<channelRelID>/cpu.
|
||||
When running on later versions of Hyper-V, the CPU can be changed
|
||||
by writing a new value to this sysfs entry. Because the interrupt
|
||||
assignment is done outside of the normal Linux affinity mechanism,
|
||||
there are no entries in /proc/irq corresponding to individual
|
||||
VMbus channel interrupts.
|
||||
by writing a new value to this sysfs entry. Because VMBus channel
|
||||
interrupts are not Linux IRQs, there are no entries in /proc/interrupts
|
||||
or /proc/irq corresponding to individual VMBus channel interrupts.
|
||||
|
||||
An online CPU in a Linux guest may not be taken offline if it has
|
||||
VMbus channel interrupts assigned to it. Any such channel
|
||||
VMBus channel interrupts assigned to it. Any such channel
|
||||
interrupts must first be manually reassigned to another CPU as
|
||||
described above. When no channel interrupts are assigned to the
|
||||
CPU, it can be taken offline.
|
||||
|
||||
When a guest CPU receives a VMbus interrupt from the host, the
|
||||
function vmbus_isr() handles the interrupt. It first checks for
|
||||
channel interrupts by calling vmbus_chan_sched(), which looks at a
|
||||
bitmap setup by the host to determine which channels have pending
|
||||
interrupts on this CPU. If multiple channels have pending
|
||||
interrupts for this CPU, they are processed sequentially. When all
|
||||
channel interrupts have been processed, vmbus_isr() checks for and
|
||||
processes any message received on the VMbus control path.
|
||||
|
||||
The VMbus channel interrupt handling code is designed to work
|
||||
The VMBus channel interrupt handling code is designed to work
|
||||
correctly even if an interrupt is received on a CPU other than the
|
||||
CPU assigned to the channel. Specifically, the code does not use
|
||||
CPU-based exclusion for correctness. In normal operation, Hyper-V
|
||||
@ -242,23 +265,23 @@ when Hyper-V will make the transition. The code must work correctly
|
||||
even if there is a time lag before Hyper-V starts interrupting the
|
||||
new CPU. See comments in target_cpu_store().
|
||||
|
||||
VMbus device creation/deletion
|
||||
VMBus device creation/deletion
|
||||
------------------------------
|
||||
Hyper-V and the Linux guest have a separate message-passing path
|
||||
that is used for synthetic device creation and deletion. This
|
||||
path does not use a VMbus channel. See vmbus_post_msg() and
|
||||
path does not use a VMBus channel. See vmbus_post_msg() and
|
||||
vmbus_on_msg_dpc().
|
||||
|
||||
The first step is for the guest to connect to the generic
|
||||
Hyper-V VMbus mechanism. As part of establishing this connection,
|
||||
the guest and Hyper-V agree on a VMbus protocol version they will
|
||||
Hyper-V VMBus mechanism. As part of establishing this connection,
|
||||
the guest and Hyper-V agree on a VMBus protocol version they will
|
||||
use. This negotiation allows newer Linux kernels to run on older
|
||||
Hyper-V versions, and vice versa.
|
||||
|
||||
The guest then tells Hyper-V to "send offers". Hyper-V sends an
|
||||
offer message to the guest for each synthetic device that the VM
|
||||
is configured to have. Each VMbus device type has a fixed GUID
|
||||
known as the "class ID", and each VMbus device instance is also
|
||||
is configured to have. Each VMBus device type has a fixed GUID
|
||||
known as the "class ID", and each VMBus device instance is also
|
||||
identified by a GUID. The offer message from Hyper-V contains
|
||||
both GUIDs to uniquely (within the VM) identify the device.
|
||||
There is one offer message for each device instance, so a VM with
|
||||
@ -275,7 +298,7 @@ type based on the class ID, and invokes the correct driver to set up
|
||||
the device. Driver/device matching is performed using the standard
|
||||
Linux mechanism.
|
||||
|
||||
The device driver probe function opens the primary VMbus channel to
|
||||
The device driver probe function opens the primary VMBus channel to
|
||||
the corresponding VSP. It allocates guest memory for the channel
|
||||
ring buffers and shares the ring buffer with the Hyper-V host by
|
||||
giving the host a list of GPAs for the ring buffer memory. See
|
||||
@ -285,7 +308,7 @@ Once the ring buffer is set up, the device driver and VSP exchange
|
||||
setup messages via the primary channel. These messages may include
|
||||
negotiating the device protocol version to be used between the Linux
|
||||
VSC and the VSP on the Hyper-V host. The setup messages may also
|
||||
include creating additional VMbus channels, which are somewhat
|
||||
include creating additional VMBus channels, which are somewhat
|
||||
mis-named as "sub-channels" since they are functionally
|
||||
equivalent to the primary channel once they are created.
|
||||
|
||||
|
32
MAINTAINERS
32
MAINTAINERS
@ -3980,7 +3980,7 @@ R: Song Liu <song@kernel.org>
|
||||
R: Yonghong Song <yonghong.song@linux.dev>
|
||||
R: John Fastabend <john.fastabend@gmail.com>
|
||||
R: KP Singh <kpsingh@kernel.org>
|
||||
R: Stanislav Fomichev <sdf@google.com>
|
||||
R: Stanislav Fomichev <sdf@fomichev.me>
|
||||
R: Hao Luo <haoluo@google.com>
|
||||
R: Jiri Olsa <jolsa@kernel.org>
|
||||
L: bpf@vger.kernel.org
|
||||
@ -5295,7 +5295,7 @@ F: drivers/infiniband/hw/usnic/
|
||||
|
||||
CLANG CONTROL FLOW INTEGRITY SUPPORT
|
||||
M: Sami Tolvanen <samitolvanen@google.com>
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Nathan Chancellor <nathan@kernel.org>
|
||||
L: llvm@lists.linux.dev
|
||||
S: Supported
|
||||
@ -8211,7 +8211,7 @@ F: rust/kernel/net/phy.rs
|
||||
|
||||
EXEC & BINFMT API, ELF
|
||||
R: Eric Biederman <ebiederm@xmission.com>
|
||||
R: Kees Cook <keescook@chromium.org>
|
||||
R: Kees Cook <kees@kernel.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
|
||||
@ -8612,7 +8612,7 @@ S: Maintained
|
||||
F: drivers/net/ethernet/nvidia/*
|
||||
|
||||
FORTIFY_SOURCE
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
@ -9102,7 +9102,7 @@ F: include/linux/mfd/gsc.h
|
||||
F: include/linux/platform_data/gsc_hwmon.h
|
||||
|
||||
GCC PLUGINS
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
@ -9236,7 +9236,7 @@ S: Maintained
|
||||
F: drivers/input/touchscreen/resistive-adc-touch.c
|
||||
|
||||
GENERIC STRING LIBRARY
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Andy Shevchenko <andy@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
@ -11950,7 +11950,7 @@ F: scripts/package/
|
||||
F: usr/
|
||||
|
||||
KERNEL HARDENING (not covered by other areas)
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Gustavo A. R. Silva <gustavoars@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
@ -12382,7 +12382,6 @@ F: drivers/video/backlight/ktz8866.c
|
||||
|
||||
KVM PARAVIRT (KVM/paravirt)
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
R: Wanpeng Li <wanpengli@tencent.com>
|
||||
R: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
@ -12478,7 +12477,7 @@ F: drivers/scsi/53c700*
|
||||
|
||||
LEAKING_ADDRESSES
|
||||
M: Tycho Andersen <tycho@tycho.pizza>
|
||||
R: Kees Cook <keescook@chromium.org>
|
||||
R: Kees Cook <kees@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
@ -12774,7 +12773,7 @@ F: arch/powerpc/platforms/8xx/
|
||||
F: arch/powerpc/platforms/83xx/
|
||||
|
||||
LINUX KERNEL DUMP TEST MODULE (LKDTM)
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
S: Maintained
|
||||
F: drivers/misc/lkdtm/*
|
||||
F: tools/testing/selftests/lkdtm/*
|
||||
@ -12904,7 +12903,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
|
||||
F: drivers/media/usb/dvb-usb-v2/lmedm04*
|
||||
|
||||
LOADPIN SECURITY MODULE
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
F: Documentation/admin-guide/LSM/LoadPin.rst
|
||||
@ -17996,7 +17995,7 @@ F: tools/testing/selftests/proc/
|
||||
|
||||
PROC SYSCTL
|
||||
M: Luis Chamberlain <mcgrof@kernel.org>
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
M: Joel Granados <j.granados@samsung.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
@ -18052,7 +18051,7 @@ F: Documentation/devicetree/bindings/net/pse-pd/
|
||||
F: drivers/net/pse-pd/
|
||||
|
||||
PSTORE FILESYSTEM
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Tony Luck <tony.luck@intel.com>
|
||||
R: Guilherme G. Piccoli <gpiccoli@igalia.com>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
@ -20058,7 +20057,7 @@ F: drivers/media/cec/platform/seco/seco-cec.c
|
||||
F: drivers/media/cec/platform/seco/seco-cec.h
|
||||
|
||||
SECURE COMPUTING
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Andy Lutomirski <luto@amacapital.net>
|
||||
R: Will Drewry <wad@chromium.org>
|
||||
S: Supported
|
||||
@ -22972,7 +22971,7 @@ F: drivers/block/ublk_drv.c
|
||||
F: include/uapi/linux/ublk_cmd.h
|
||||
|
||||
UBSAN
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Marco Elver <elver@google.com>
|
||||
R: Andrey Konovalov <andreyknvl@gmail.com>
|
||||
R: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
@ -23974,7 +23973,6 @@ VMALLOC
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
R: Uladzislau Rezki <urezki@gmail.com>
|
||||
R: Christoph Hellwig <hch@infradead.org>
|
||||
R: Lorenzo Stoakes <lstoakes@gmail.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
@ -24810,7 +24808,7 @@ F: drivers/net/hamradio/yam*
|
||||
F: include/linux/yam.h
|
||||
|
||||
YAMA SECURITY MODULE
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
F: Documentation/admin-guide/LSM/Yama.rst
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -85,7 +85,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
panel {
|
||||
panel_dpi: panel {
|
||||
compatible = "sii,43wvf1g";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_display_power>;
|
||||
|
@ -10,8 +10,6 @@
|
||||
/plugin/;
|
||||
|
||||
&{/} {
|
||||
/delete-node/ panel;
|
||||
|
||||
hdmi: connector-hdmi {
|
||||
compatible = "hdmi-connector";
|
||||
label = "hdmi";
|
||||
@ -82,6 +80,10 @@
|
||||
};
|
||||
};
|
||||
|
||||
&panel_dpi {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&tve {
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
void efi_init(void);
|
||||
@ -25,6 +26,18 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, boo
|
||||
#define arch_efi_call_virt_setup() efi_virtmap_load()
|
||||
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
|
||||
|
||||
#ifdef CONFIG_CPU_TTBR0_PAN
|
||||
#undef arch_efi_call_virt
|
||||
#define arch_efi_call_virt(p, f, args...) ({ \
|
||||
unsigned int flags = uaccess_save_and_enable(); \
|
||||
efi_status_t res = _Generic((p)->f(args), \
|
||||
efi_status_t: (p)->f(args), \
|
||||
default: ((p)->f(args), EFI_ABORTED)); \
|
||||
uaccess_restore(flags); \
|
||||
res; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define ARCH_EFI_IRQ_FLAGS_MASK \
|
||||
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
|
||||
PSR_T_BIT | MODE_MASK)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <dt-bindings/phy/phy-imx8-pcie.h>
|
||||
#include <dt-bindings/pwm/pwm.h>
|
||||
#include "imx8mm.dtsi"
|
||||
#include "imx8mm-overdrive.dtsi"
|
||||
|
||||
/ {
|
||||
chosen {
|
||||
@ -935,7 +936,7 @@
|
||||
/* Verdin GPIO_9_DSI (pulled-up as active-low) */
|
||||
pinctrl_gpio_9_dsi: gpio9dsigrp {
|
||||
fsl,pins =
|
||||
<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
|
||||
<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
|
||||
};
|
||||
|
||||
/* Verdin GPIO_10_DSI (pulled-up as active-low) */
|
||||
|
@ -254,7 +254,7 @@
|
||||
<&clk IMX8MP_CLK_CLKOUT2>,
|
||||
<&clk IMX8MP_AUDIO_PLL2_OUT>;
|
||||
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
|
||||
assigned-clock-rates = <13000000>, <13000000>, <156000000>;
|
||||
assigned-clock-rates = <13000000>, <13000000>, <208000000>;
|
||||
reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
|
||||
status = "disabled";
|
||||
|
||||
|
@ -219,7 +219,7 @@
|
||||
|
||||
bluetooth {
|
||||
compatible = "brcm,bcm4330-bt";
|
||||
shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
|
||||
shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -36,7 +36,7 @@
|
||||
regulator-name = "SD1_SPWR";
|
||||
regulator-min-microvolt = <3000000>;
|
||||
regulator-max-microvolt = <3000000>;
|
||||
gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
|
||||
gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
};
|
||||
|
||||
|
@ -296,7 +296,6 @@
|
||||
vmmc-supply = <®_usdhc2_vmmc>;
|
||||
bus-width = <4>;
|
||||
status = "okay";
|
||||
no-sdio;
|
||||
no-mmc;
|
||||
};
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
@ -213,6 +214,7 @@ l: if (!p) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kmemleak_not_leak(p);
|
||||
efi_rt_stack_top = p + THREAD_SIZE;
|
||||
return 0;
|
||||
}
|
||||
|
@ -177,6 +177,14 @@ static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
|
||||
res);
|
||||
}
|
||||
|
||||
static void ffa_rx_release(struct arm_smccc_res *res)
|
||||
{
|
||||
arm_smccc_1_1_smc(FFA_RX_RELEASE,
|
||||
0, 0,
|
||||
0, 0, 0, 0, 0,
|
||||
res);
|
||||
}
|
||||
|
||||
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
@ -543,16 +551,19 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
|
||||
if (WARN_ON(offset > len ||
|
||||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
|
||||
ret = FFA_RET_ABORTED;
|
||||
ffa_rx_release(res);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (len > ffa_desc_buf.len) {
|
||||
ret = FFA_RET_NO_MEMORY;
|
||||
ffa_rx_release(res);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
buf = ffa_desc_buf.buf;
|
||||
memcpy(buf, hyp_buffers.rx, fraglen);
|
||||
ffa_rx_release(res);
|
||||
|
||||
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
|
||||
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
|
||||
@ -563,6 +574,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
|
||||
|
||||
fraglen = res->a3;
|
||||
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
|
||||
ffa_rx_release(res);
|
||||
}
|
||||
|
||||
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
|
||||
|
@ -391,7 +391,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
|
||||
|
||||
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
|
||||
vgic_v3_free_redist_region(rdreg);
|
||||
vgic_v3_free_redist_region(kvm, rdreg);
|
||||
INIT_LIST_HEAD(&dist->rd_regions);
|
||||
} else {
|
||||
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
|
||||
|
@ -919,8 +919,19 @@ free:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
|
||||
void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long c;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
/* Garbage collect the region */
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
if (vcpu->arch.vgic_cpu.rdreg == rdreg)
|
||||
vcpu->arch.vgic_cpu.rdreg = NULL;
|
||||
}
|
||||
|
||||
list_del(&rdreg->list);
|
||||
kfree(rdreg);
|
||||
}
|
||||
@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
|
||||
vgic_v3_free_redist_region(rdreg);
|
||||
vgic_v3_free_redist_region(kvm, rdreg);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
|
||||
|
||||
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
|
||||
u32 index);
|
||||
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
|
||||
void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
|
||||
|
||||
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
|
||||
|
||||
|
@ -143,7 +143,7 @@ config LOONGARCH
|
||||
select HAVE_LIVEPATCH
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
|
||||
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
|
||||
select HAVE_PCI
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
@ -261,6 +261,9 @@ config AS_HAS_EXPLICIT_RELOCS
|
||||
config AS_HAS_FCSR_CLASS
|
||||
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
|
||||
|
||||
config AS_HAS_THIN_ADD_SUB
|
||||
def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
|
||||
|
||||
config AS_HAS_LSX_EXTENSION
|
||||
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
|
||||
|
||||
|
@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
|
||||
|
||||
config UNWINDER_ORC
|
||||
bool "ORC unwinder"
|
||||
depends on HAVE_OBJTOOL
|
||||
select OBJTOOL
|
||||
help
|
||||
This option enables the ORC (Oops Rewind Capability) unwinder for
|
||||
|
@ -75,6 +75,8 @@ do { \
|
||||
#define CSR_MWPC_NUM 0x3f
|
||||
|
||||
#define CTRL_PLV_ENABLE 0x1e
|
||||
#define CTRL_PLV0_ENABLE 0x02
|
||||
#define CTRL_PLV3_ENABLE 0x10
|
||||
|
||||
#define MWPnCFG3_LoadEn 8
|
||||
#define MWPnCFG3_StoreEn 9
|
||||
@ -101,7 +103,7 @@ struct perf_event;
|
||||
struct perf_event_attr;
|
||||
|
||||
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
|
||||
int *gen_len, int *gen_type, int *offset);
|
||||
int *gen_len, int *gen_type);
|
||||
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
|
||||
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
|
||||
const struct perf_event_attr *attr,
|
||||
|
@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
|
||||
static int hw_breakpoint_control(struct perf_event *bp,
|
||||
enum hw_breakpoint_ops ops)
|
||||
{
|
||||
u32 ctrl;
|
||||
u32 ctrl, privilege;
|
||||
int i, max_slots, enable;
|
||||
struct pt_regs *regs;
|
||||
struct perf_event **slots;
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
if (arch_check_bp_in_kernelspace(info))
|
||||
privilege = CTRL_PLV0_ENABLE;
|
||||
else
|
||||
privilege = CTRL_PLV3_ENABLE;
|
||||
|
||||
/* Whether bp belongs to a task. */
|
||||
if (bp->hw.target)
|
||||
regs = task_pt_regs(bp->hw.target);
|
||||
|
||||
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
|
||||
/* Breakpoint */
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
|
||||
switch (ops) {
|
||||
case HW_BREAKPOINT_INSTALL:
|
||||
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
|
||||
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
|
||||
} else {
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
|
||||
ctrl = encode_ctrl_reg(info->ctrl);
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
|
||||
}
|
||||
enable = csr_read64(LOONGARCH_CSR_CRMD);
|
||||
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
|
||||
if (bp->hw.target)
|
||||
regs->csr_prmd |= CSR_PRMD_PWE;
|
||||
break;
|
||||
case HW_BREAKPOINT_UNINSTALL:
|
||||
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 1, 0);
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
|
||||
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
|
||||
} else {
|
||||
write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
|
||||
write_wb_reg(CSR_CFG_MASK, i, 1, 0);
|
||||
write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
|
||||
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
|
||||
}
|
||||
if (bp->hw.target)
|
||||
regs->csr_prmd &= ~CSR_PRMD_PWE;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
|
||||
* to generic breakpoint descriptions.
|
||||
*/
|
||||
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
|
||||
int *gen_len, int *gen_type, int *offset)
|
||||
int *gen_len, int *gen_type)
|
||||
{
|
||||
/* Type */
|
||||
switch (ctrl.type) {
|
||||
@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!ctrl.len)
|
||||
return -EINVAL;
|
||||
|
||||
*offset = __ffs(ctrl.len);
|
||||
|
||||
/* Len */
|
||||
switch (ctrl.len) {
|
||||
case LOONGARCH_BREAKPOINT_LEN_1:
|
||||
@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
|
||||
struct arch_hw_breakpoint *hw)
|
||||
{
|
||||
int ret;
|
||||
u64 alignment_mask, offset;
|
||||
u64 alignment_mask;
|
||||
|
||||
/* Build the arch_hw_breakpoint. */
|
||||
ret = arch_build_bp_info(bp, attr, hw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
|
||||
alignment_mask = 0x7;
|
||||
else
|
||||
if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
|
||||
alignment_mask = 0x3;
|
||||
offset = hw->address & alignment_mask;
|
||||
|
||||
hw->address &= ~alignment_mask;
|
||||
hw->ctrl.len <<= offset;
|
||||
hw->address &= ~alignment_mask;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
|
||||
slots = this_cpu_ptr(bp_on_reg);
|
||||
|
||||
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
|
||||
bp = slots[i];
|
||||
if (bp == NULL)
|
||||
continue;
|
||||
perf_bp_event(bp, regs);
|
||||
if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
|
||||
bp = slots[i];
|
||||
if (bp == NULL)
|
||||
continue;
|
||||
perf_bp_event(bp, regs);
|
||||
csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
|
||||
update_bp_registers(regs, 0, 0);
|
||||
}
|
||||
}
|
||||
update_bp_registers(regs, 0, 0);
|
||||
}
|
||||
NOKPROBE_SYMBOL(breakpoint_handler);
|
||||
|
||||
@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
|
||||
slots = this_cpu_ptr(wp_on_reg);
|
||||
|
||||
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
|
||||
wp = slots[i];
|
||||
if (wp == NULL)
|
||||
continue;
|
||||
perf_bp_event(wp, regs);
|
||||
if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
|
||||
wp = slots[i];
|
||||
if (wp == NULL)
|
||||
continue;
|
||||
perf_bp_event(wp, regs);
|
||||
csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
|
||||
update_bp_registers(regs, 0, 1);
|
||||
}
|
||||
}
|
||||
update_bp_registers(regs, 0, 1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(watchpoint_handler);
|
||||
|
||||
|
@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
|
||||
struct arch_hw_breakpoint_ctrl ctrl,
|
||||
struct perf_event_attr *attr)
|
||||
{
|
||||
int err, len, type, offset;
|
||||
int err, len, type;
|
||||
|
||||
err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
|
||||
err = arch_bp_generic_fields(ctrl, &len, &type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (note_type) {
|
||||
case NT_LOONGARCH_HW_BREAK:
|
||||
if ((type & HW_BREAKPOINT_X) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case NT_LOONGARCH_HW_WATCH:
|
||||
if ((type & HW_BREAKPOINT_RW) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
attr->bp_len = len;
|
||||
attr->bp_type = type;
|
||||
attr->bp_addr += offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -609,10 +595,27 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
|
||||
return PTR_ERR(bp);
|
||||
|
||||
attr = bp->attr;
|
||||
decode_ctrl_reg(uctrl, &ctrl);
|
||||
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (note_type) {
|
||||
case NT_LOONGARCH_HW_BREAK:
|
||||
ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
|
||||
ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
|
||||
break;
|
||||
case NT_LOONGARCH_HW_WATCH:
|
||||
decode_ctrl_reg(uctrl, &ctrl);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (uctrl & CTRL_PLV_ENABLE) {
|
||||
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
|
||||
if (err)
|
||||
return err;
|
||||
attr.disabled = 0;
|
||||
} else {
|
||||
attr.disabled = 1;
|
||||
}
|
||||
|
||||
return modify_user_hw_breakpoint(bp, &attr);
|
||||
}
|
||||
@ -643,6 +646,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
|
||||
struct perf_event *bp;
|
||||
struct perf_event_attr attr;
|
||||
|
||||
/* Kernel-space address cannot be monitored by user-space */
|
||||
if ((unsigned long)addr >= XKPRANGE)
|
||||
return -EINVAL;
|
||||
|
||||
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
|
||||
if (IS_ERR(bp))
|
||||
return PTR_ERR(bp);
|
||||
|
@ -761,7 +761,7 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
|
||||
default:
|
||||
ret = KVM_HCALL_INVALID_CODE;
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
|
||||
}
|
||||
|
@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
|
||||
* RAC flush causes kernel panics on BCM6358 when booting from TP1
|
||||
* because the bootloader is not initializing it properly.
|
||||
*/
|
||||
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
|
||||
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
|
||||
!!BMIPS_GET_CBR();
|
||||
}
|
||||
|
||||
static void bcm6368_quirks(void)
|
||||
|
@ -322,7 +322,7 @@ static inline void ehb(void)
|
||||
" .set push \n" \
|
||||
" .set "MIPS_ISA_LEVEL" \n" \
|
||||
_ASM_SET_MFTC0 \
|
||||
" mftc0 $1, " #rt ", " #sel " \n" \
|
||||
" mftc0 %0, " #rt ", " #sel " \n" \
|
||||
_ASM_UNSET_MFTC0 \
|
||||
" .set pop \n" \
|
||||
: "=r" (__res)); \
|
||||
|
@ -27,7 +27,7 @@
|
||||
17 o32 break sys_ni_syscall
|
||||
# 18 was sys_stat
|
||||
18 o32 unused18 sys_ni_syscall
|
||||
19 o32 lseek sys_lseek
|
||||
19 o32 lseek sys_lseek compat_sys_lseek
|
||||
20 o32 getpid sys_getpid
|
||||
21 o32 mount sys_mount
|
||||
22 o32 umount sys_oldumount
|
||||
|
@ -112,8 +112,8 @@ retry:
|
||||
* gives them time to settle
|
||||
*/
|
||||
if (where == PCI_VENDOR_ID) {
|
||||
if (ret == 0xffffffff || ret == 0x00000000 ||
|
||||
ret == 0x0000ffff || ret == 0xffff0000) {
|
||||
if (*val == 0xffffffff || *val == 0x00000000 ||
|
||||
*val == 0x0000ffff || *val == 0xffff0000) {
|
||||
if (delay > 4)
|
||||
return 0;
|
||||
delay *= 2;
|
||||
|
2
arch/powerpc/crypto/.gitignore
vendored
2
arch/powerpc/crypto/.gitignore
vendored
@ -1,3 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
aesp10-ppc.S
|
||||
aesp8-ppc.S
|
||||
ghashp10-ppc.S
|
||||
ghashp8-ppc.S
|
||||
|
@ -130,14 +130,16 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
fdput(f);
|
||||
|
||||
if (!found)
|
||||
if (!found) {
|
||||
fdput(f);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
table_group = iommu_group_get_iommudata(grp);
|
||||
if (WARN_ON(!table_group))
|
||||
if (WARN_ON(!table_group)) {
|
||||
fdput(f);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
||||
struct iommu_table *tbltmp = table_group->tables[i];
|
||||
@ -158,8 +160,10 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!tbl)
|
||||
if (!tbl) {
|
||||
fdput(f);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
|
||||
@ -170,6 +174,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
/* stit is being destroyed */
|
||||
iommu_tce_table_put(tbl);
|
||||
rcu_read_unlock();
|
||||
fdput(f);
|
||||
return -ENOTTY;
|
||||
}
|
||||
/*
|
||||
@ -177,6 +182,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
* its KVM reference counter and can return.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
fdput(f);
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@ -184,6 +190,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
|
||||
if (!stit) {
|
||||
iommu_tce_table_put(tbl);
|
||||
fdput(f);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -192,6 +199,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
|
||||
list_add_rcu(&stit->next, &stt->iommu_tables);
|
||||
|
||||
fdput(f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -45,6 +45,7 @@
|
||||
no-1-8-v;
|
||||
no-mmc;
|
||||
no-sdio;
|
||||
disable-wp;
|
||||
};
|
||||
|
||||
&uart0 {
|
||||
|
@ -401,7 +401,6 @@ extern int __init efi_memmap_alloc(unsigned int num_entries,
|
||||
struct efi_memory_map_data *data);
|
||||
extern void __efi_memmap_free(u64 phys, unsigned long size,
|
||||
unsigned long flags);
|
||||
#define __efi_memmap_free __efi_memmap_free
|
||||
|
||||
extern int __init efi_memmap_install(struct efi_memory_map_data *data);
|
||||
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
|
||||
|
@ -519,7 +519,8 @@ void free_rmid(u32 closid, u32 rmid)
|
||||
* allows architectures that ignore the closid parameter to avoid an
|
||||
* unnecessary check.
|
||||
*/
|
||||
if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
|
||||
if (!resctrl_arch_mon_capable() ||
|
||||
idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
|
||||
RESCTRL_RESERVED_RMID))
|
||||
return;
|
||||
|
||||
|
@ -2843,7 +2843,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
|
||||
if (sev_es_prevent_msr_access(vcpu, msr_info)) {
|
||||
msr_info->data = 0;
|
||||
return -EINVAL;
|
||||
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
switch (msr_info->index) {
|
||||
@ -2998,7 +2998,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
u64 data = msr->data;
|
||||
|
||||
if (sev_es_prevent_msr_access(vcpu, msr))
|
||||
return -EINVAL;
|
||||
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
|
||||
|
||||
switch (ecx) {
|
||||
case MSR_AMD64_TSC_RATIO:
|
||||
|
@ -10718,13 +10718,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|
||||
|
||||
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
|
||||
|
||||
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
|
||||
if (irqchip_split(vcpu->kvm))
|
||||
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||
else {
|
||||
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
if (ioapic_in_kernel(vcpu->kvm))
|
||||
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||
}
|
||||
else if (ioapic_in_kernel(vcpu->kvm))
|
||||
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||
|
||||
if (is_guest_mode(vcpu))
|
||||
vcpu->arch.load_eoi_exitmap_pending = true;
|
||||
|
@ -92,12 +92,22 @@ int __init efi_memmap_alloc(unsigned int num_entries,
|
||||
*/
|
||||
int __init efi_memmap_install(struct efi_memory_map_data *data)
|
||||
{
|
||||
unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
|
||||
unsigned long flags = efi.memmap.flags;
|
||||
u64 phys = efi.memmap.phys_map;
|
||||
int ret;
|
||||
|
||||
efi_memmap_unmap();
|
||||
|
||||
if (efi_enabled(EFI_PARAVIRT))
|
||||
return 0;
|
||||
|
||||
return __efi_memmap_init(data);
|
||||
ret = __efi_memmap_init(data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
__efi_memmap_free(phys, size, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
|
||||
struct acpi_mem_mapping *mm = mem_info->cur_mm;
|
||||
u32 length;
|
||||
acpi_size map_length;
|
||||
acpi_size page_boundary_map_length;
|
||||
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
|
||||
u32 remainder;
|
||||
#endif
|
||||
@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
|
||||
map_length = (acpi_size)
|
||||
((mem_info->address + mem_info->length) - address);
|
||||
|
||||
/*
|
||||
* If mapping the entire remaining portion of the region will cross
|
||||
* a page boundary, just map up to the page boundary, do not cross.
|
||||
* On some systems, crossing a page boundary while mapping regions
|
||||
* can cause warnings if the pages have different attributes
|
||||
* due to resource management.
|
||||
*
|
||||
* This has the added benefit of constraining a single mapping to
|
||||
* one page, which is similar to the original code that used a 4k
|
||||
* maximum window.
|
||||
*/
|
||||
page_boundary_map_length = (acpi_size)
|
||||
(ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
|
||||
if (page_boundary_map_length == 0) {
|
||||
page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (map_length > page_boundary_map_length) {
|
||||
map_length = page_boundary_map_length;
|
||||
}
|
||||
if (map_length > ACPI_DEFAULT_PAGE_SIZE)
|
||||
map_length = ACPI_DEFAULT_PAGE_SIZE;
|
||||
|
||||
/* Create a new mapping starting at the address given */
|
||||
|
||||
|
@ -302,6 +302,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
|
||||
void acpi_mipi_scan_crs_csi2(void);
|
||||
void acpi_mipi_init_crs_csi2_swnodes(void);
|
||||
void acpi_mipi_crs_csi2_cleanup(void);
|
||||
#ifdef CONFIG_X86
|
||||
bool acpi_graph_ignore_port(acpi_handle handle);
|
||||
#else
|
||||
static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; }
|
||||
#endif
|
||||
|
||||
#endif /* _ACPI_INTERNAL_H_ */
|
||||
|
@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void)
|
||||
acpi_mipi_del_crs_csi2(csi2);
|
||||
}
|
||||
|
||||
static const struct dmi_system_id dmi_ignore_port_nodes[] = {
|
||||
{
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */
|
||||
static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = {
|
||||
X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
|
||||
X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
|
||||
X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
|
||||
X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
|
||||
X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
|
||||
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
|
||||
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
static const char *strnext(const char *s1, const char *s2)
|
||||
@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle)
|
||||
static bool dmi_tested, ignore_port;
|
||||
|
||||
if (!dmi_tested) {
|
||||
ignore_port = dmi_first_match(dmi_ignore_port_nodes);
|
||||
if (dmi_name_in_vendors("Dell Inc.") &&
|
||||
x86_match_cpu(dell_broken_mipi_disco_cpu_gens))
|
||||
ignore_port = true;
|
||||
|
||||
dmi_tested = true;
|
||||
}
|
||||
|
||||
@ -794,3 +803,4 @@ out_free:
|
||||
kfree(orig_path);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
@ -1735,6 +1735,14 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
|
||||
if (ap->pflags & ATA_PFLAG_EXTERNAL)
|
||||
return;
|
||||
|
||||
/* If no LPM states are supported by the HBA, do not bother with LPM */
|
||||
if ((ap->host->flags & ATA_HOST_NO_PART) &&
|
||||
(ap->host->flags & ATA_HOST_NO_SSC) &&
|
||||
(ap->host->flags & ATA_HOST_NO_DEVSLP)) {
|
||||
ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* user modified policy via module param */
|
||||
if (mobile_lpm_policy != -1) {
|
||||
policy = mobile_lpm_policy;
|
||||
|
@ -3101,8 +3101,53 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_fields_read);
|
||||
|
||||
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
|
||||
unsigned int *regs, void *val, size_t val_count)
|
||||
{
|
||||
u32 *u32 = val;
|
||||
u16 *u16 = val;
|
||||
u8 *u8 = val;
|
||||
int ret, i;
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
for (i = 0; i < val_count; i++) {
|
||||
unsigned int ival;
|
||||
|
||||
if (regs) {
|
||||
if (!IS_ALIGNED(regs[i], map->reg_stride)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ret = _regmap_read(map, regs[i], &ival);
|
||||
} else {
|
||||
ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
|
||||
}
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
|
||||
switch (map->format.val_bytes) {
|
||||
case 4:
|
||||
u32[i] = ival;
|
||||
break;
|
||||
case 2:
|
||||
u16[i] = ival;
|
||||
break;
|
||||
case 1:
|
||||
u8[i] = ival;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
map->unlock(map->lock_arg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* regmap_bulk_read() - Read multiple registers from the device
|
||||
* regmap_bulk_read() - Read multiple sequential registers from the device
|
||||
*
|
||||
* @map: Register map to read from
|
||||
* @reg: First register to be read from
|
||||
@ -3132,47 +3177,35 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
||||
for (i = 0; i < val_count * val_bytes; i += val_bytes)
|
||||
map->format.parse_inplace(val + i);
|
||||
} else {
|
||||
u32 *u32 = val;
|
||||
u16 *u16 = val;
|
||||
u8 *u8 = val;
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
for (i = 0; i < val_count; i++) {
|
||||
unsigned int ival;
|
||||
|
||||
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
|
||||
&ival);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
|
||||
switch (map->format.val_bytes) {
|
||||
case 4:
|
||||
u32[i] = ival;
|
||||
break;
|
||||
case 2:
|
||||
u16[i] = ival;
|
||||
break;
|
||||
case 1:
|
||||
u8[i] = ival;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
map->unlock(map->lock_arg);
|
||||
ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_bulk_read);
|
||||
|
||||
/**
|
||||
* regmap_multi_reg_read() - Read multiple non-sequential registers from the device
|
||||
*
|
||||
* @map: Register map to read from
|
||||
* @regs: Array of registers to read from
|
||||
* @val: Pointer to store read value, in native register size for device
|
||||
* @val_count: Number of registers to read
|
||||
*
|
||||
* A value of zero will be returned on success, a negative errno will
|
||||
* be returned in error cases.
|
||||
*/
|
||||
int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val,
|
||||
size_t val_count)
|
||||
{
|
||||
if (val_count == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return _regmap_bulk_read(map, 0, regs, val, val_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
|
||||
|
||||
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
|
||||
unsigned int mask, unsigned int val,
|
||||
bool *change, bool force_write)
|
||||
|
@ -394,7 +394,7 @@ config LS2X_APB_DMA
|
||||
|
||||
config MCF_EDMA
|
||||
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
|
||||
depends on M5441x || COMPILE_TEST
|
||||
depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
|
||||
|
||||
spin_unlock(&irq_entry->list_lock);
|
||||
|
||||
list_for_each_entry(desc, &flist, list) {
|
||||
list_for_each_entry_safe(desc, n, &flist, list) {
|
||||
/*
|
||||
* Check against the original status as ABORT is software defined
|
||||
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
|
||||
*/
|
||||
list_del(&desc->list);
|
||||
|
||||
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
|
||||
idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
|
||||
continue;
|
||||
|
@ -534,18 +534,6 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ioat_register(struct ioatdma_device *ioat_dma)
|
||||
{
|
||||
int err = dma_async_device_register(&ioat_dma->dma_dev);
|
||||
|
||||
if (err) {
|
||||
ioat_disable_interrupts(ioat_dma);
|
||||
dma_pool_destroy(ioat_dma->completion_pool);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
|
||||
{
|
||||
struct dma_device *dma = &ioat_dma->dma_dev;
|
||||
@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
|
||||
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
|
||||
}
|
||||
|
||||
err = ioat_register(ioat_dma);
|
||||
err = dma_async_device_register(&ioat_dma->dma_dev);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_disable_interrupts;
|
||||
|
||||
ioat_kobject_add(ioat_dma, &ioat_ktype);
|
||||
|
||||
@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
|
||||
|
||||
/* disable relaxed ordering */
|
||||
err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
|
||||
if (err)
|
||||
return pcibios_err_to_errno(err);
|
||||
if (err) {
|
||||
err = pcibios_err_to_errno(err);
|
||||
goto err_disable_interrupts;
|
||||
}
|
||||
|
||||
/* clear relaxed ordering enable */
|
||||
val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
|
||||
err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
|
||||
if (err)
|
||||
return pcibios_err_to_errno(err);
|
||||
if (err) {
|
||||
err = pcibios_err_to_errno(err);
|
||||
goto err_disable_interrupts;
|
||||
}
|
||||
|
||||
if (ioat_dma->cap & IOAT_CAP_DPS)
|
||||
writeb(ioat_pending_level + 1,
|
||||
ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_interrupts:
|
||||
ioat_disable_interrupts(ioat_dma);
|
||||
dma_pool_destroy(ioat_dma->completion_pool);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ioat_shutdown(struct pci_dev *pdev)
|
||||
@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
void __iomem * const *iomap;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ioatdma_device *device;
|
||||
unsigned int i;
|
||||
u8 version;
|
||||
int err;
|
||||
|
||||
err = pcim_enable_device(pdev);
|
||||
@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (!iomap)
|
||||
return -ENOMEM;
|
||||
|
||||
version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
|
||||
if (version < IOAT_VER_3_0)
|
||||
return -ENODEV;
|
||||
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
return err;
|
||||
@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pci_set_master(pdev);
|
||||
pci_set_drvdata(pdev, device);
|
||||
|
||||
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
|
||||
device->version = version;
|
||||
if (device->version >= IOAT_VER_3_4)
|
||||
ioat_dca_enabled = 0;
|
||||
if (device->version >= IOAT_VER_3_0) {
|
||||
if (is_skx_ioat(pdev))
|
||||
device->version = IOAT_VER_3_2;
|
||||
err = ioat3_dma_probe(device, ioat_dca_enabled);
|
||||
} else
|
||||
return -ENODEV;
|
||||
|
||||
if (is_skx_ioat(pdev))
|
||||
device->version = IOAT_VER_3_2;
|
||||
|
||||
err = ioat3_dma_probe(device, ioat_dca_enabled);
|
||||
if (err) {
|
||||
for (i = 0; i < IOAT_MAX_CHANS; i++)
|
||||
kfree(device->idx[i]);
|
||||
kfree(device);
|
||||
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
|
||||
static void __exit ioat_exit_module(void)
|
||||
{
|
||||
pci_unregister_driver(&ioat_pci_driver);
|
||||
kmem_cache_destroy(ioat_sed_cache);
|
||||
kmem_cache_destroy(ioat_cache);
|
||||
}
|
||||
module_exit(ioat_exit_module);
|
||||
|
@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu
|
||||
|
||||
ret = of_k3_udma_glue_parse(udmax_np, common);
|
||||
if (ret)
|
||||
goto out_put_spec;
|
||||
return ret;
|
||||
|
||||
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
|
||||
|
||||
out_put_spec:
|
||||
of_node_put(udmax_np);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
|
||||
u32 st;
|
||||
bool repeat_tx;
|
||||
|
||||
spin_lock(&xchan->vchan.lock);
|
||||
|
||||
if (xchan->stop_requested)
|
||||
complete(&xchan->last_interrupt);
|
||||
|
||||
spin_lock(&xchan->vchan.lock);
|
||||
|
||||
/* get submitted request */
|
||||
vd = vchan_next_desc(&xchan->vchan);
|
||||
if (!vd)
|
||||
|
@ -15,10 +15,6 @@
|
||||
#include <asm/early_ioremap.h>
|
||||
#include <asm/efi.h>
|
||||
|
||||
#ifndef __efi_memmap_free
|
||||
#define __efi_memmap_free(phys, size, flags) do { } while (0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __efi_memmap_init - Common code for mapping the EFI memory map
|
||||
* @data: EFI memory map data
|
||||
@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
|
||||
__efi_memmap_free(efi.memmap.phys_map,
|
||||
efi.memmap.desc_size * efi.memmap.nr_map,
|
||||
efi.memmap.flags);
|
||||
|
||||
map.phys_map = data->phys_map;
|
||||
map.nr_map = data->size / data->desc_size;
|
||||
map.map_end = map.map + data->size;
|
||||
|
@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
|
||||
|
||||
static int psci_system_suspend(unsigned long unused)
|
||||
{
|
||||
int err;
|
||||
phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
|
||||
|
||||
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
|
||||
err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
|
||||
pa_cpu_resume, 0, 0);
|
||||
return psci_to_linux_errno(err);
|
||||
}
|
||||
|
||||
static int psci_system_suspend_enter(suspend_state_t state)
|
||||
|
@ -41,8 +41,6 @@
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
#include <linux/pci-p2pdma.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
/**
|
||||
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
|
||||
@ -58,42 +56,11 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
struct drm_gem_object *obj = dmabuf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
int r;
|
||||
|
||||
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
|
||||
attach->peer2peer = false;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(1, __func__);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(0, __func__);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
|
||||
*
|
||||
* @dmabuf: DMA-buf where we remove the attachment from
|
||||
* @attach: the attachment to remove
|
||||
*
|
||||
* Called when an attachment is removed from the DMA-buf.
|
||||
*/
|
||||
static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct drm_gem_object *obj = dmabuf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(0, __func__);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -267,7 +234,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
|
||||
|
||||
const struct dma_buf_ops amdgpu_dmabuf_ops = {
|
||||
.attach = amdgpu_dma_buf_attach,
|
||||
.detach = amdgpu_dma_buf_detach,
|
||||
.pin = amdgpu_dma_buf_pin,
|
||||
.unpin = amdgpu_dma_buf_unpin,
|
||||
.map_dma_buf = amdgpu_dma_buf_map,
|
||||
|
@ -181,7 +181,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
|
||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(1, __func__);
|
||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
||||
struct dma_fence *old;
|
||||
@ -309,7 +308,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||
dma_fence_put(fence);
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
trace_amdgpu_runpm_reference_dumps(0, __func__);
|
||||
} while (last_seq != seq);
|
||||
|
||||
return true;
|
||||
|
@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
|
||||
unsigned int ndw;
|
||||
signed long r;
|
||||
int r;
|
||||
uint32_t seq;
|
||||
|
||||
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
|
||||
!down_read_trylock(&adev->reset_domain->sem)) {
|
||||
/*
|
||||
* A GPU reset should flush all TLBs anyway, so no need to do
|
||||
* this while one is ongoing.
|
||||
*/
|
||||
if (!down_read_trylock(&adev->reset_domain->sem))
|
||||
return 0;
|
||||
|
||||
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
|
||||
if (adev->gmc.flush_tlb_needs_extra_type_2)
|
||||
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
|
||||
2, all_hub,
|
||||
@ -703,44 +708,41 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
|
||||
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
|
||||
flush_type, all_hub,
|
||||
inst);
|
||||
return 0;
|
||||
}
|
||||
r = 0;
|
||||
} else {
|
||||
/* 2 dwords flush + 8 dwords fence */
|
||||
ndw = kiq->pmf->invalidate_tlbs_size + 8;
|
||||
|
||||
/* 2 dwords flush + 8 dwords fence */
|
||||
ndw = kiq->pmf->invalidate_tlbs_size + 8;
|
||||
if (adev->gmc.flush_tlb_needs_extra_type_2)
|
||||
ndw += kiq->pmf->invalidate_tlbs_size;
|
||||
|
||||
if (adev->gmc.flush_tlb_needs_extra_type_2)
|
||||
ndw += kiq->pmf->invalidate_tlbs_size;
|
||||
if (adev->gmc.flush_tlb_needs_extra_type_0)
|
||||
ndw += kiq->pmf->invalidate_tlbs_size;
|
||||
|
||||
if (adev->gmc.flush_tlb_needs_extra_type_0)
|
||||
ndw += kiq->pmf->invalidate_tlbs_size;
|
||||
spin_lock(&adev->gfx.kiq[inst].ring_lock);
|
||||
amdgpu_ring_alloc(ring, ndw);
|
||||
if (adev->gmc.flush_tlb_needs_extra_type_2)
|
||||
kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
|
||||
|
||||
spin_lock(&adev->gfx.kiq[inst].ring_lock);
|
||||
amdgpu_ring_alloc(ring, ndw);
|
||||
if (adev->gmc.flush_tlb_needs_extra_type_2)
|
||||
kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
|
||||
if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
|
||||
kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
|
||||
|
||||
if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
|
||||
kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
|
||||
kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
|
||||
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
|
||||
if (r) {
|
||||
amdgpu_ring_undo(ring);
|
||||
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
|
||||
goto error_unlock_reset;
|
||||
}
|
||||
|
||||
kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
|
||||
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
|
||||
if (r) {
|
||||
amdgpu_ring_undo(ring);
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
|
||||
goto error_unlock_reset;
|
||||
if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
|
||||
dev_err(adev->dev, "timeout waiting for kiq fence\n");
|
||||
r = -ETIME;
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
r = -ETIME;
|
||||
goto error_unlock_reset;
|
||||
}
|
||||
r = 0;
|
||||
|
||||
error_unlock_reset:
|
||||
up_read(&adev->reset_domain->sem);
|
||||
return r;
|
||||
|
@ -554,21 +554,6 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
|
||||
__entry->value)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_runpm_reference_dumps,
|
||||
TP_PROTO(uint32_t index, const char *func),
|
||||
TP_ARGS(index, func),
|
||||
TP_STRUCT__entry(
|
||||
__field(uint32_t, index)
|
||||
__string(func, func)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->index = index;
|
||||
__assign_str(func);
|
||||
),
|
||||
TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
|
||||
__entry->index,
|
||||
__get_str(func))
|
||||
);
|
||||
#undef AMDGPU_JOB_GET_TIMELINE_NAME
|
||||
#endif
|
||||
|
||||
|
@ -4195,9 +4195,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
|
||||
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
|
||||
struct amdgpu_cu_info *cu_info)
|
||||
{
|
||||
int i, j, k, counter, xcc_id, active_cu_number = 0;
|
||||
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
|
||||
int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
|
||||
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
|
||||
unsigned disable_masks[4 * 4];
|
||||
bool is_symmetric_cus;
|
||||
|
||||
if (!adev || !cu_info)
|
||||
return -EINVAL;
|
||||
@ -4215,6 +4216,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
|
||||
is_symmetric_cus = true;
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
mask = 1;
|
||||
@ -4242,6 +4244,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
|
||||
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
|
||||
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
|
||||
}
|
||||
if (i && is_symmetric_cus && prev_counter != counter)
|
||||
is_symmetric_cus = false;
|
||||
prev_counter = counter;
|
||||
}
|
||||
if (is_symmetric_cus) {
|
||||
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
|
||||
}
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
xcc_id);
|
||||
|
@ -154,18 +154,18 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
void *pkt, int size,
|
||||
int api_status_off)
|
||||
{
|
||||
int ndw = size / 4;
|
||||
signed long r;
|
||||
union MESAPI__MISC *x_pkt = pkt;
|
||||
struct MES_API_STATUS *api_status;
|
||||
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
|
||||
signed long timeout = 3000000; /* 3000 ms */
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
struct amdgpu_ring *ring = &mes->ring;
|
||||
unsigned long flags;
|
||||
signed long timeout = 3000000; /* 3000 ms */
|
||||
struct MES_API_STATUS *api_status;
|
||||
union MESAPI__MISC *x_pkt = pkt;
|
||||
const char *op_str, *misc_op_str;
|
||||
u32 fence_offset;
|
||||
u64 fence_gpu_addr;
|
||||
u64 *fence_ptr;
|
||||
unsigned long flags;
|
||||
u64 status_gpu_addr;
|
||||
u32 status_offset;
|
||||
u64 *status_ptr;
|
||||
signed long r;
|
||||
int ret;
|
||||
|
||||
if (x_pkt->header.opcode >= MES_SCH_API_MAX)
|
||||
@ -177,28 +177,38 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
|
||||
timeout = 15 * 600 * 1000;
|
||||
}
|
||||
BUG_ON(size % 4 != 0);
|
||||
|
||||
ret = amdgpu_device_wb_get(adev, &fence_offset);
|
||||
ret = amdgpu_device_wb_get(adev, &status_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
fence_gpu_addr =
|
||||
adev->wb.gpu_addr + (fence_offset * 4);
|
||||
fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
|
||||
*fence_ptr = 0;
|
||||
|
||||
status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
|
||||
status_ptr = (u64 *)&adev->wb.wb[status_offset];
|
||||
*status_ptr = 0;
|
||||
|
||||
spin_lock_irqsave(&mes->ring_lock, flags);
|
||||
if (amdgpu_ring_alloc(ring, ndw)) {
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
amdgpu_device_wb_free(adev, fence_offset);
|
||||
return -ENOMEM;
|
||||
}
|
||||
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
|
||||
if (r)
|
||||
goto error_unlock_free;
|
||||
|
||||
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
|
||||
api_status->api_completion_fence_addr = fence_gpu_addr;
|
||||
api_status->api_completion_fence_addr = status_gpu_addr;
|
||||
api_status->api_completion_fence_value = 1;
|
||||
|
||||
amdgpu_ring_write_multiple(ring, pkt, ndw);
|
||||
amdgpu_ring_write_multiple(ring, pkt, size / 4);
|
||||
|
||||
memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
|
||||
mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
|
||||
mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
|
||||
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
mes_status_pkt.api_status.api_completion_fence_addr =
|
||||
ring->fence_drv.gpu_addr;
|
||||
mes_status_pkt.api_status.api_completion_fence_value =
|
||||
++ring->fence_drv.sync_seq;
|
||||
|
||||
amdgpu_ring_write_multiple(ring, &mes_status_pkt,
|
||||
sizeof(mes_status_pkt) / 4);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
|
||||
@ -206,15 +216,16 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
|
||||
|
||||
if (misc_op_str)
|
||||
dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str, misc_op_str);
|
||||
dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
|
||||
misc_op_str);
|
||||
else if (op_str)
|
||||
dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
|
||||
else
|
||||
dev_dbg(adev->dev, "MES msg=%d was emitted\n", x_pkt->header.opcode);
|
||||
dev_dbg(adev->dev, "MES msg=%d was emitted\n",
|
||||
x_pkt->header.opcode);
|
||||
|
||||
r = amdgpu_mes_fence_wait_polling(fence_ptr, (u64)1, timeout);
|
||||
amdgpu_device_wb_free(adev, fence_offset);
|
||||
if (r < 1) {
|
||||
r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
|
||||
if (r < 1 || !*status_ptr) {
|
||||
|
||||
if (misc_op_str)
|
||||
dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
|
||||
@ -229,10 +240,19 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
while (halt_if_hws_hang)
|
||||
schedule();
|
||||
|
||||
return -ETIMEDOUT;
|
||||
r = -ETIMEDOUT;
|
||||
goto error_wb_free;
|
||||
}
|
||||
|
||||
amdgpu_device_wb_free(adev, status_offset);
|
||||
return 0;
|
||||
|
||||
error_unlock_free:
|
||||
spin_unlock_irqrestore(&mes->ring_lock, flags);
|
||||
|
||||
error_wb_free:
|
||||
amdgpu_device_wb_free(adev, status_offset);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int convert_to_mes_queue_type(int queue_type)
|
||||
|
@ -32,7 +32,9 @@
|
||||
#include "mp/mp_14_0_2_sh_mask.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
|
||||
|
||||
/* For large FW files the time to complete can be very long */
|
||||
#define USBC_PD_POLLING_LIMIT_S 240
|
||||
@ -64,6 +66,9 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
|
||||
case IP_VERSION(14, 0, 2):
|
||||
case IP_VERSION(14, 0, 3):
|
||||
err = psp_init_sos_microcode(psp, ucode_prefix);
|
||||
if (err)
|
||||
return err;
|
||||
err = psp_init_ta_microcode(psp, ucode_prefix);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
|
@ -8,7 +8,7 @@ config DRM_AMD_DC
|
||||
depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
|
||||
select SND_HDA_COMPONENT if SND_HDA_CORE
|
||||
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
|
||||
select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG)
|
||||
select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV))
|
||||
help
|
||||
Choose this option if you want to use the new display engine
|
||||
support for AMDGPU. This adds required support for Vega and
|
||||
|
@ -9169,9 +9169,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
|
||||
trace_amdgpu_dm_atomic_commit_tail_begin(state);
|
||||
|
||||
if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
|
||||
dc_allow_idle_optimizations(dm->dc, false);
|
||||
|
||||
drm_atomic_helper_update_legacy_modeset_state(dev, state);
|
||||
drm_dp_mst_atomic_wait_for_dependencies(state);
|
||||
|
||||
@ -11440,6 +11437,12 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
}
|
||||
|
||||
static inline void amdgpu_dm_exit_ips_for_hw_access(struct dc *dc)
|
||||
{
|
||||
if (dc->ctx->dmub_srv && !dc->ctx->dmub_srv->idle_exit_counter)
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
}
|
||||
|
||||
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
|
||||
u32 value, const char *func_name)
|
||||
{
|
||||
@ -11450,6 +11453,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
|
||||
cgs_write_register(ctx->cgs_device, address, value);
|
||||
trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
|
||||
}
|
||||
@ -11473,6 +11478,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
|
||||
return 0;
|
||||
}
|
||||
|
||||
amdgpu_dm_exit_ips_for_hw_access(ctx->dc);
|
||||
|
||||
value = cgs_read_register(ctx->cgs_device, address);
|
||||
|
||||
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
|
||||
|
@ -177,7 +177,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
.urgent_latency_vm_data_only_us = 4.0,
|
||||
.dram_clock_change_latency_us = 11.72,
|
||||
.dram_clock_change_latency_us = 34.0,
|
||||
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
|
||||
|
@ -215,7 +215,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
.urgent_latency_vm_data_only_us = 4.0,
|
||||
.dram_clock_change_latency_us = 11.72,
|
||||
.dram_clock_change_latency_us = 34,
|
||||
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
|
||||
|
@ -1439,3 +1439,75 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
/* Calculate average pixel count per TU, return false if under ~2.00 to
|
||||
* avoid empty TUs. This is only required for DPIA tunneling as empty TUs
|
||||
* are legal to generate for native DP links. Assume TU size 64 as there
|
||||
* is currently no scenario where it's reprogrammed from HW default.
|
||||
* MTPs have no such limitation, so this does not affect MST use cases.
|
||||
*/
|
||||
unsigned int pix_clk_mhz;
|
||||
unsigned int symclk_mhz;
|
||||
unsigned int avg_pix_per_tu_x1000;
|
||||
unsigned int tu_size_bytes = 64;
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
|
||||
const struct dc *dc = pipe_ctx->stream->link->dc;
|
||||
|
||||
if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
return false;
|
||||
|
||||
// Not necessary for MST configurations
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
return false;
|
||||
|
||||
pix_clk_mhz = timing->pix_clk_100hz / 10000;
|
||||
|
||||
// If this is true, can't block due to dynamic ODM
|
||||
if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
|
||||
return false;
|
||||
|
||||
switch (link_settings->link_rate) {
|
||||
case LINK_RATE_LOW:
|
||||
symclk_mhz = 162;
|
||||
break;
|
||||
case LINK_RATE_HIGH:
|
||||
symclk_mhz = 270;
|
||||
break;
|
||||
case LINK_RATE_HIGH2:
|
||||
symclk_mhz = 540;
|
||||
break;
|
||||
case LINK_RATE_HIGH3:
|
||||
symclk_mhz = 810;
|
||||
break;
|
||||
default:
|
||||
// We shouldn't be tunneling any other rates, something is wrong
|
||||
ASSERT(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
|
||||
/ (symclk_mhz * link_settings->lane_count);
|
||||
|
||||
// Add small empirically-decided margin to account for potential jitter
|
||||
return (avg_pix_per_tu_x1000 < 2020);
|
||||
}
|
||||
|
||||
bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
|
||||
if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
|
||||
return false;
|
||||
|
||||
if (should_avoid_empty_tu(pipe_ctx))
|
||||
return false;
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
|
||||
dc->debug.enable_dp_dig_pixel_rate_div_policy)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -95,4 +95,6 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
|
||||
void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
|
||||
int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
|
||||
|
||||
bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
#endif /* __DC_HWSS_DCN35_H__ */
|
||||
|
@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
|
||||
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
|
||||
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
|
||||
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
|
||||
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
|
||||
.is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
|
||||
.dsc_pg_control = dcn35_dsc_pg_control,
|
||||
.dsc_pg_status = dcn32_dsc_pg_status,
|
||||
.enable_plane = dcn35_enable_plane,
|
||||
|
@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
|
||||
|
||||
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
|
||||
if (table[i].ulSupportedSCLK != 0) {
|
||||
if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
|
||||
continue;
|
||||
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
|
||||
table[i].usVoltageID;
|
||||
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
|
||||
|
@ -442,6 +442,10 @@ bool intel_dp_has_bigjoiner(struct intel_dp *intel_dp)
|
||||
struct intel_encoder *encoder = &intel_dig_port->base;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
/* eDP MSO is not compatible with joiner */
|
||||
if (intel_dp->mso_link_count)
|
||||
return false;
|
||||
|
||||
return DISPLAY_VER(dev_priv) >= 12 ||
|
||||
(DISPLAY_VER(dev_priv) == 11 &&
|
||||
encoder->port != PORT_A);
|
||||
|
@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
|
||||
|
||||
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
|
||||
if (table[i].ulSupportedSCLK != 0) {
|
||||
if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
|
||||
continue;
|
||||
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
|
||||
table[i].usVoltageID;
|
||||
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
|
||||
|
@ -2,7 +2,7 @@
|
||||
config DRM_VMWGFX
|
||||
tristate "DRM driver for VMware Virtual GPU"
|
||||
depends on DRM && PCI && MMU
|
||||
depends on X86 || ARM64
|
||||
depends on (X86 && HYPERVISOR_GUEST) || ARM64
|
||||
select DRM_TTM
|
||||
select DRM_TTM_HELPER
|
||||
select MAPPING_DIRTY_HELPERS
|
||||
|
@ -631,8 +631,6 @@ int xe_guc_enable_communication(struct xe_guc *guc)
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
int err;
|
||||
|
||||
guc_enable_irq(guc);
|
||||
|
||||
if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
|
||||
struct xe_gt *gt = guc_to_gt(guc);
|
||||
struct xe_tile *tile = gt_to_tile(gt);
|
||||
@ -640,6 +638,8 @@ int xe_guc_enable_communication(struct xe_guc *guc)
|
||||
err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
guc_enable_irq(guc);
|
||||
}
|
||||
|
||||
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
|
||||
|
@ -45,8 +45,8 @@ int hv_init(void)
|
||||
* This involves a hypercall.
|
||||
*/
|
||||
int hv_post_message(union hv_connection_id connection_id,
|
||||
enum hv_message_type message_type,
|
||||
void *payload, size_t payload_size)
|
||||
enum hv_message_type message_type,
|
||||
void *payload, size_t payload_size)
|
||||
{
|
||||
struct hv_input_post_message *aligned_msg;
|
||||
unsigned long flags;
|
||||
@ -86,7 +86,7 @@ int hv_post_message(union hv_connection_id connection_id,
|
||||
status = HV_STATUS_INVALID_PARAMETER;
|
||||
} else {
|
||||
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
|
||||
aligned_msg, NULL);
|
||||
aligned_msg, NULL);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
@ -111,7 +111,7 @@ int hv_synic_alloc(void)
|
||||
|
||||
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
|
||||
GFP_KERNEL);
|
||||
if (hv_context.hv_numa_map == NULL) {
|
||||
if (!hv_context.hv_numa_map) {
|
||||
pr_err("Unable to allocate NUMA map\n");
|
||||
goto err;
|
||||
}
|
||||
@ -120,11 +120,11 @@ int hv_synic_alloc(void)
|
||||
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
tasklet_init(&hv_cpu->msg_dpc,
|
||||
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
|
||||
vmbus_on_msg_dpc, (unsigned long)hv_cpu);
|
||||
|
||||
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
|
||||
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->post_msg_page == NULL) {
|
||||
if (!hv_cpu->post_msg_page) {
|
||||
pr_err("Unable to allocate post msg page\n");
|
||||
goto err;
|
||||
}
|
||||
@ -147,14 +147,14 @@ int hv_synic_alloc(void)
|
||||
if (!ms_hyperv.paravisor_present && !hv_root_partition) {
|
||||
hv_cpu->synic_message_page =
|
||||
(void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->synic_message_page == NULL) {
|
||||
if (!hv_cpu->synic_message_page) {
|
||||
pr_err("Unable to allocate SYNIC message page\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
hv_cpu->synic_event_page =
|
||||
(void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->synic_event_page == NULL) {
|
||||
if (!hv_cpu->synic_event_page) {
|
||||
pr_err("Unable to allocate SYNIC event page\n");
|
||||
|
||||
free_page((unsigned long)hv_cpu->synic_message_page);
|
||||
@ -203,14 +203,13 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void hv_synic_free(void)
|
||||
{
|
||||
int cpu, ret;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
struct hv_per_cpu_context *hv_cpu =
|
||||
per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
/* It's better to leak the page if the encryption fails. */
|
||||
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
|
||||
@ -262,8 +261,8 @@ void hv_synic_free(void)
|
||||
*/
|
||||
void hv_synic_enable_regs(unsigned int cpu)
|
||||
{
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
struct hv_per_cpu_context *hv_cpu =
|
||||
per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
union hv_synic_simp simp;
|
||||
union hv_synic_siefp siefp;
|
||||
union hv_synic_sint shared_sint;
|
||||
@ -277,8 +276,8 @@ void hv_synic_enable_regs(unsigned int cpu)
|
||||
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
|
||||
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
|
||||
~ms_hyperv.shared_gpa_boundary;
|
||||
hv_cpu->synic_message_page
|
||||
= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
hv_cpu->synic_message_page =
|
||||
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
if (!hv_cpu->synic_message_page)
|
||||
pr_err("Fail to map synic message page.\n");
|
||||
} else {
|
||||
@ -296,8 +295,8 @@ void hv_synic_enable_regs(unsigned int cpu)
|
||||
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
|
||||
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
|
||||
~ms_hyperv.shared_gpa_boundary;
|
||||
hv_cpu->synic_event_page
|
||||
= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
hv_cpu->synic_event_page =
|
||||
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
if (!hv_cpu->synic_event_page)
|
||||
pr_err("Fail to map synic event page.\n");
|
||||
} else {
|
||||
@ -348,8 +347,8 @@ int hv_synic_init(unsigned int cpu)
|
||||
*/
|
||||
void hv_synic_disable_regs(unsigned int cpu)
|
||||
{
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
struct hv_per_cpu_context *hv_cpu =
|
||||
per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
union hv_synic_sint shared_sint;
|
||||
union hv_synic_simp simp;
|
||||
union hv_synic_siefp siefp;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/page_reporting.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <linux/hyperv.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
@ -41,8 +42,6 @@
|
||||
* Begin protocol definitions.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Protocol versions. The low word is the minor version, the high word the major
|
||||
* version.
|
||||
@ -71,8 +70,6 @@ enum {
|
||||
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
|
||||
};
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Message Types
|
||||
*/
|
||||
@ -101,7 +98,6 @@ enum dm_message_type {
|
||||
DM_VERSION_1_MAX = 12
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Structures defining the dynamic memory management
|
||||
* protocol.
|
||||
@ -115,7 +111,6 @@ union dm_version {
|
||||
__u32 version;
|
||||
} __packed;
|
||||
|
||||
|
||||
union dm_caps {
|
||||
struct {
|
||||
__u64 balloon:1;
|
||||
@ -148,8 +143,6 @@ union dm_mem_page_range {
|
||||
__u64 page_range;
|
||||
} __packed;
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The header for all dynamic memory messages:
|
||||
*
|
||||
@ -174,7 +167,6 @@ struct dm_message {
|
||||
__u8 data[]; /* enclosed message */
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Specific message types supporting the dynamic memory protocol.
|
||||
*/
|
||||
@ -271,7 +263,6 @@ struct dm_status {
|
||||
__u32 io_diff;
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Message to ask the guest to allocate memory - balloon up message.
|
||||
* This message is sent from the host to the guest. The guest may not be
|
||||
@ -286,14 +277,13 @@ struct dm_balloon {
|
||||
__u32 reservedz;
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Balloon response message; this message is sent from the guest
|
||||
* to the host in response to the balloon message.
|
||||
*
|
||||
* reservedz: Reserved; must be set to zero.
|
||||
* more_pages: If FALSE, this is the last message of the transaction.
|
||||
* if TRUE there will atleast one more message from the guest.
|
||||
* if TRUE there will be at least one more message from the guest.
|
||||
*
|
||||
* range_count: The number of ranges in the range array.
|
||||
*
|
||||
@ -314,7 +304,7 @@ struct dm_balloon_response {
|
||||
* to the guest to give guest more memory.
|
||||
*
|
||||
* more_pages: If FALSE, this is the last message of the transaction.
|
||||
* if TRUE there will atleast one more message from the guest.
|
||||
* if TRUE there will be at least one more message from the guest.
|
||||
*
|
||||
* reservedz: Reserved; must be set to zero.
|
||||
*
|
||||
@ -342,7 +332,6 @@ struct dm_unballoon_response {
|
||||
struct dm_header hdr;
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Hot add request message. Message sent from the host to the guest.
|
||||
*
|
||||
@ -390,7 +379,6 @@ enum dm_info_type {
|
||||
MAX_INFO_TYPE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Header for the information message.
|
||||
*/
|
||||
@ -425,11 +413,11 @@ struct dm_info_msg {
|
||||
* The range start_pfn : end_pfn specifies the range
|
||||
* that the host has asked us to hot add. The range
|
||||
* start_pfn : ha_end_pfn specifies the range that we have
|
||||
* currently hot added. We hot add in multiples of 128M
|
||||
* chunks; it is possible that we may not be able to bring
|
||||
* online all the pages in the region. The range
|
||||
* currently hot added. We hot add in chunks equal to the
|
||||
* memory block size; it is possible that we may not be able
|
||||
* to bring online all the pages in the region. The range
|
||||
* covered_start_pfn:covered_end_pfn defines the pages that can
|
||||
* be brough online.
|
||||
* be brought online.
|
||||
*/
|
||||
|
||||
struct hv_hotadd_state {
|
||||
@ -480,10 +468,10 @@ static unsigned long last_post_time;
|
||||
|
||||
static int hv_hypercall_multi_failure;
|
||||
|
||||
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
|
||||
module_param(hot_add, bool, 0644);
|
||||
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
|
||||
|
||||
module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
|
||||
module_param(pressure_report_delay, uint, 0644);
|
||||
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
|
||||
static atomic_t trans_id = ATOMIC_INIT(0);
|
||||
|
||||
@ -502,11 +490,13 @@ enum hv_dm_state {
|
||||
DM_INIT_ERROR
|
||||
};
|
||||
|
||||
|
||||
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
|
||||
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
|
||||
|
||||
static unsigned long ha_pages_in_chunk;
|
||||
#define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT)
|
||||
|
||||
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
|
||||
#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
|
||||
|
||||
struct hv_dynmem_device {
|
||||
struct hv_device *dev;
|
||||
@ -595,12 +585,12 @@ static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
|
||||
struct hv_hotadd_gap *gap;
|
||||
|
||||
/* The page is not backed. */
|
||||
if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
|
||||
if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
|
||||
return false;
|
||||
|
||||
/* Check for gaps. */
|
||||
list_for_each_entry(gap, &has->gap_list, list) {
|
||||
if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
|
||||
if (pfn >= gap->start_pfn && pfn < gap->end_pfn)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -724,28 +714,21 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||
unsigned long processed_pfn;
|
||||
unsigned long total_pfn = pfn_count;
|
||||
|
||||
for (i = 0; i < (size/HA_CHUNK); i++) {
|
||||
start_pfn = start + (i * HA_CHUNK);
|
||||
for (i = 0; i < (size/ha_pages_in_chunk); i++) {
|
||||
start_pfn = start + (i * ha_pages_in_chunk);
|
||||
|
||||
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||
has->ha_end_pfn += HA_CHUNK;
|
||||
|
||||
if (total_pfn > HA_CHUNK) {
|
||||
processed_pfn = HA_CHUNK;
|
||||
total_pfn -= HA_CHUNK;
|
||||
} else {
|
||||
processed_pfn = total_pfn;
|
||||
total_pfn = 0;
|
||||
}
|
||||
|
||||
has->covered_end_pfn += processed_pfn;
|
||||
has->ha_end_pfn += ha_pages_in_chunk;
|
||||
processed_pfn = umin(total_pfn, ha_pages_in_chunk);
|
||||
total_pfn -= processed_pfn;
|
||||
has->covered_end_pfn += processed_pfn;
|
||||
}
|
||||
|
||||
reinit_completion(&dm_device.ol_waitevent);
|
||||
|
||||
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
|
||||
ret = add_memory(nid, PFN_PHYS((start_pfn)),
|
||||
(HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
|
||||
HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE);
|
||||
|
||||
if (ret) {
|
||||
pr_err("hot_add memory failed error is %d\n", ret);
|
||||
@ -760,7 +743,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||
do_hot_add = false;
|
||||
}
|
||||
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||
has->ha_end_pfn -= HA_CHUNK;
|
||||
has->ha_end_pfn -= ha_pages_in_chunk;
|
||||
has->covered_end_pfn -= processed_pfn;
|
||||
}
|
||||
break;
|
||||
@ -787,8 +770,8 @@ static void hv_online_page(struct page *pg, unsigned int order)
|
||||
guard(spinlock_irqsave)(&dm_device.ha_lock);
|
||||
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
||||
/* The page belongs to a different HAS. */
|
||||
if ((pfn < has->start_pfn) ||
|
||||
(pfn + (1UL << order) > has->end_pfn))
|
||||
if (pfn < has->start_pfn ||
|
||||
(pfn + (1UL << order) > has->end_pfn))
|
||||
continue;
|
||||
|
||||
hv_bring_pgs_online(has, pfn, 1UL << order);
|
||||
@ -800,7 +783,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||
{
|
||||
struct hv_hotadd_state *has;
|
||||
struct hv_hotadd_gap *gap;
|
||||
unsigned long residual, new_inc;
|
||||
unsigned long residual;
|
||||
int ret = 0;
|
||||
|
||||
guard(spinlock_irqsave)(&dm_device.ha_lock);
|
||||
@ -836,15 +819,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||
* our current limit; extend it.
|
||||
*/
|
||||
if ((start_pfn + pfn_cnt) > has->end_pfn) {
|
||||
/* Extend the region by multiples of ha_pages_in_chunk */
|
||||
residual = (start_pfn + pfn_cnt - has->end_pfn);
|
||||
/*
|
||||
* Extend the region by multiples of HA_CHUNK.
|
||||
*/
|
||||
new_inc = (residual / HA_CHUNK) * HA_CHUNK;
|
||||
if (residual % HA_CHUNK)
|
||||
new_inc += HA_CHUNK;
|
||||
|
||||
has->end_pfn += new_inc;
|
||||
has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
@ -855,7 +832,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||
}
|
||||
|
||||
static unsigned long handle_pg_range(unsigned long pg_start,
|
||||
unsigned long pg_count)
|
||||
unsigned long pg_count)
|
||||
{
|
||||
unsigned long start_pfn = pg_start;
|
||||
unsigned long pfn_cnt = pg_count;
|
||||
@ -866,7 +843,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
||||
unsigned long res = 0, flags;
|
||||
|
||||
pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
|
||||
pg_start);
|
||||
pg_start);
|
||||
|
||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
||||
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
||||
@ -902,22 +879,19 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
||||
if (start_pfn > has->start_pfn &&
|
||||
online_section_nr(pfn_to_section_nr(start_pfn)))
|
||||
hv_bring_pgs_online(has, start_pfn, pgs_ol);
|
||||
|
||||
}
|
||||
|
||||
if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
|
||||
if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) {
|
||||
/*
|
||||
* We have some residual hot add range
|
||||
* that needs to be hot added; hot add
|
||||
* it now. Hot add a multiple of
|
||||
* HA_CHUNK that fully covers the pages
|
||||
* ha_pages_in_chunk that fully covers the pages
|
||||
* we have.
|
||||
*/
|
||||
size = (has->end_pfn - has->ha_end_pfn);
|
||||
if (pfn_cnt <= size) {
|
||||
size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
|
||||
if (pfn_cnt % HA_CHUNK)
|
||||
size += HA_CHUNK;
|
||||
size = ALIGN(pfn_cnt, ha_pages_in_chunk);
|
||||
} else {
|
||||
pfn_cnt = size;
|
||||
}
|
||||
@ -1010,10 +984,7 @@ static void hot_add_req(struct work_struct *dummy)
|
||||
rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
|
||||
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
|
||||
|
||||
if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
|
||||
unsigned long region_size;
|
||||
unsigned long region_start;
|
||||
|
||||
if (rg_start == 0 && !dm->host_specified_ha_region) {
|
||||
/*
|
||||
* The host has not specified the hot-add region.
|
||||
* Based on the hot-add page range being specified,
|
||||
@ -1021,19 +992,13 @@ static void hot_add_req(struct work_struct *dummy)
|
||||
* that need to be hot-added while ensuring the alignment
|
||||
* and size requirements of Linux as it relates to hot-add.
|
||||
*/
|
||||
region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
|
||||
if (pfn_cnt % HA_CHUNK)
|
||||
region_size += HA_CHUNK;
|
||||
|
||||
region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
|
||||
|
||||
rg_start = region_start;
|
||||
rg_sz = region_size;
|
||||
rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk);
|
||||
rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk);
|
||||
}
|
||||
|
||||
if (do_hot_add)
|
||||
resp.page_count = process_hot_add(pg_start, pfn_cnt,
|
||||
rg_start, rg_sz);
|
||||
rg_start, rg_sz);
|
||||
|
||||
dm->num_pages_added += resp.page_count;
|
||||
#endif
|
||||
@ -1211,11 +1176,10 @@ static void post_status(struct hv_dynmem_device *dm)
|
||||
sizeof(struct dm_status),
|
||||
(unsigned long)NULL,
|
||||
VM_PKT_DATA_INBAND, 0);
|
||||
|
||||
}
|
||||
|
||||
static void free_balloon_pages(struct hv_dynmem_device *dm,
|
||||
union dm_mem_page_range *range_array)
|
||||
union dm_mem_page_range *range_array)
|
||||
{
|
||||
int num_pages = range_array->finfo.page_cnt;
|
||||
__u64 start_frame = range_array->finfo.start_page;
|
||||
@ -1231,8 +1195,6 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
|
||||
unsigned int num_pages,
|
||||
struct dm_balloon_response *bl_resp,
|
||||
@ -1278,7 +1240,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
|
||||
page_to_pfn(pg);
|
||||
bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
|
||||
bl_resp->hdr.size += sizeof(union dm_mem_page_range);
|
||||
|
||||
}
|
||||
|
||||
return i * alloc_unit;
|
||||
@ -1332,7 +1293,7 @@ static void balloon_up(struct work_struct *dummy)
|
||||
|
||||
if (num_ballooned == 0 || num_ballooned == num_pages) {
|
||||
pr_debug("Ballooned %u out of %u requested pages.\n",
|
||||
num_pages, dm_device.balloon_wrk.num_pages);
|
||||
num_pages, dm_device.balloon_wrk.num_pages);
|
||||
|
||||
bl_resp->more_pages = 0;
|
||||
done = true;
|
||||
@ -1366,16 +1327,15 @@ static void balloon_up(struct work_struct *dummy)
|
||||
|
||||
for (i = 0; i < bl_resp->range_count; i++)
|
||||
free_balloon_pages(&dm_device,
|
||||
&bl_resp->range_array[i]);
|
||||
&bl_resp->range_array[i]);
|
||||
|
||||
done = true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void balloon_down(struct hv_dynmem_device *dm,
|
||||
struct dm_unballoon_request *req)
|
||||
struct dm_unballoon_request *req)
|
||||
{
|
||||
union dm_mem_page_range *range_array = req->range_array;
|
||||
int range_count = req->range_count;
|
||||
@ -1389,7 +1349,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
|
||||
}
|
||||
|
||||
pr_debug("Freed %u ballooned pages.\n",
|
||||
prev_pages_ballooned - dm->num_pages_ballooned);
|
||||
prev_pages_ballooned - dm->num_pages_ballooned);
|
||||
|
||||
if (req->more_pages == 1)
|
||||
return;
|
||||
@ -1414,8 +1374,7 @@ static int dm_thread_func(void *dm_dev)
|
||||
struct hv_dynmem_device *dm = dm_dev;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
wait_for_completion_interruptible_timeout(
|
||||
&dm_device.config_event, 1*HZ);
|
||||
wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
|
||||
/*
|
||||
* The host expects us to post information on the memory
|
||||
* pressure every second.
|
||||
@ -1439,9 +1398,8 @@ static int dm_thread_func(void *dm_dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void version_resp(struct hv_dynmem_device *dm,
|
||||
struct dm_version_response *vresp)
|
||||
struct dm_version_response *vresp)
|
||||
{
|
||||
struct dm_version_request version_req;
|
||||
int ret;
|
||||
@ -1502,7 +1460,7 @@ version_error:
|
||||
}
|
||||
|
||||
static void cap_resp(struct hv_dynmem_device *dm,
|
||||
struct dm_capabilities_resp_msg *cap_resp)
|
||||
struct dm_capabilities_resp_msg *cap_resp)
|
||||
{
|
||||
if (!cap_resp->is_accepted) {
|
||||
pr_err("Capabilities not accepted by host\n");
|
||||
@ -1535,7 +1493,7 @@ static void balloon_onchannelcallback(void *context)
|
||||
switch (dm_hdr->type) {
|
||||
case DM_VERSION_RESPONSE:
|
||||
version_resp(dm,
|
||||
(struct dm_version_response *)dm_msg);
|
||||
(struct dm_version_response *)dm_msg);
|
||||
break;
|
||||
|
||||
case DM_CAPABILITIES_RESPONSE:
|
||||
@ -1565,7 +1523,7 @@ static void balloon_onchannelcallback(void *context)
|
||||
|
||||
dm->state = DM_BALLOON_DOWN;
|
||||
balloon_down(dm,
|
||||
(struct dm_unballoon_request *)recv_buffer);
|
||||
(struct dm_unballoon_request *)recv_buffer);
|
||||
break;
|
||||
|
||||
case DM_MEM_HOT_ADD_REQUEST:
|
||||
@ -1603,17 +1561,15 @@ static void balloon_onchannelcallback(void *context)
|
||||
|
||||
default:
|
||||
pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#define HV_LARGE_REPORTING_ORDER 9
|
||||
#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
|
||||
HV_LARGE_REPORTING_ORDER)
|
||||
static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
|
||||
struct scatterlist *sgl, unsigned int nents)
|
||||
struct scatterlist *sgl, unsigned int nents)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct hv_memory_hint *hint;
|
||||
@ -1648,7 +1604,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
|
||||
*/
|
||||
|
||||
/* page reporting for pages 2MB or higher */
|
||||
if (order >= HV_LARGE_REPORTING_ORDER ) {
|
||||
if (order >= HV_LARGE_REPORTING_ORDER) {
|
||||
range->page.largepage = 1;
|
||||
range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
|
||||
range->base_large_pfn = page_to_hvpfn(
|
||||
@ -1662,23 +1618,21 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
|
||||
range->page.additional_pages =
|
||||
(sg->length / HV_HYP_PAGE_SIZE) - 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
|
||||
hint, NULL);
|
||||
local_irq_restore(flags);
|
||||
if (!hv_result_success(status)) {
|
||||
|
||||
pr_err("Cold memory discard hypercall failed with status %llx\n",
|
||||
status);
|
||||
status);
|
||||
if (hv_hypercall_multi_failure > 0)
|
||||
hv_hypercall_multi_failure++;
|
||||
|
||||
if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
|
||||
pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
|
||||
pr_err("Defaulting to page_reporting_order %d\n",
|
||||
pageblock_order);
|
||||
pageblock_order);
|
||||
page_reporting_order = pageblock_order;
|
||||
hv_hypercall_multi_failure++;
|
||||
return -EINVAL;
|
||||
@ -1712,7 +1666,7 @@ static void enable_page_reporting(void)
|
||||
pr_err("Failed to enable cold memory discard: %d\n", ret);
|
||||
} else {
|
||||
pr_info("Cold memory discard hint enabled with order %d\n",
|
||||
page_reporting_order);
|
||||
page_reporting_order);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1795,7 +1749,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
|
||||
if (t == 0) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
@ -1831,10 +1785,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
|
||||
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
|
||||
|
||||
/*
|
||||
* Specify our alignment requirements as it relates
|
||||
* memory hot-add. Specify 128MB alignment.
|
||||
* Specify our alignment requirements for memory hot-add. The value is
|
||||
* the log base 2 of the number of megabytes in a chunk. For example,
|
||||
* with 256 MiB chunks, the value is 8. The number of MiB in a chunk
|
||||
* must be a power of 2.
|
||||
*/
|
||||
cap_msg.caps.cap_bits.hot_add_alignment = 7;
|
||||
cap_msg.caps.cap_bits.hot_add_alignment =
|
||||
ilog2(HA_BYTES_IN_CHUNK / SZ_1M);
|
||||
|
||||
/*
|
||||
* Currently the host does not use these
|
||||
@ -1850,7 +1807,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
|
||||
if (t == 0) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
@ -1891,8 +1848,8 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
|
||||
char *sname;
|
||||
|
||||
seq_printf(f, "%-22s: %u.%u\n", "host_version",
|
||||
DYNMEM_MAJOR_VERSION(dm->version),
|
||||
DYNMEM_MINOR_VERSION(dm->version));
|
||||
DYNMEM_MAJOR_VERSION(dm->version),
|
||||
DYNMEM_MINOR_VERSION(dm->version));
|
||||
|
||||
seq_printf(f, "%-22s:", "capabilities");
|
||||
if (ballooning_enabled())
|
||||
@ -1941,10 +1898,10 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
|
||||
seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
|
||||
|
||||
seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
|
||||
get_pages_committed(dm));
|
||||
get_pages_committed(dm));
|
||||
|
||||
seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
|
||||
dm->max_dynamic_page_count);
|
||||
dm->max_dynamic_page_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1954,7 +1911,7 @@ DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
|
||||
static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
|
||||
{
|
||||
debugfs_create_file("hv-balloon", 0444, NULL, b,
|
||||
&hv_balloon_debug_fops);
|
||||
&hv_balloon_debug_fops);
|
||||
}
|
||||
|
||||
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
|
||||
@ -1984,8 +1941,23 @@ static int balloon_probe(struct hv_device *dev,
|
||||
hot_add = false;
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
* Hot-add must operate in chunks that are of size equal to the
|
||||
* memory block size because that's what the core add_memory()
|
||||
* interface requires. The Hyper-V interface requires that the memory
|
||||
* block size be a power of 2, which is guaranteed by the check in
|
||||
* memory_dev_init().
|
||||
*/
|
||||
ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
|
||||
do_hot_add = hot_add;
|
||||
#else
|
||||
/*
|
||||
* Without MEMORY_HOTPLUG, the guest returns a failure status for all
|
||||
* hot add requests from Hyper-V, and the chunk size is used only to
|
||||
* specify alignment to Hyper-V as required by the host/guest protocol.
|
||||
* Somewhat arbitrarily, use 128 MiB.
|
||||
*/
|
||||
ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
|
||||
do_hot_add = false;
|
||||
#endif
|
||||
dm_device.dev = dev;
|
||||
@ -2097,7 +2069,6 @@ static int balloon_suspend(struct hv_device *hv_dev)
|
||||
tasklet_enable(&hv_dev->channel->callback_event);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static int balloon_resume(struct hv_device *dev)
|
||||
@ -2156,7 +2127,6 @@ static struct hv_driver balloon_drv = {
|
||||
|
||||
static int __init init_balloon_drv(void)
|
||||
{
|
||||
|
||||
return vmbus_driver_register(&balloon_drv);
|
||||
}
|
||||
|
||||
|
@ -431,8 +431,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
|
||||
oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
|
||||
|
||||
/* Init the device */
|
||||
oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
|
||||
oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
|
||||
oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
|
||||
struct bnxt_re_sqp_entries *sqp_tbl;
|
||||
};
|
||||
|
||||
#define BNXT_RE_MIN_MSIX 2
|
||||
#define BNXT_RE_MAX_MSIX 9
|
||||
#define BNXT_RE_AEQ_IDX 0
|
||||
#define BNXT_RE_NQ_IDX 1
|
||||
#define BNXT_RE_GEN_P5_MAX_VF 64
|
||||
@ -168,7 +166,7 @@ struct bnxt_re_dev {
|
||||
struct bnxt_qplib_rcfw rcfw;
|
||||
|
||||
/* NQ */
|
||||
struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
|
||||
struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
|
||||
|
||||
/* Device Resources */
|
||||
struct bnxt_qplib_dev_attr dev_attr;
|
||||
|
@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
|
||||
start, iova, length, access_flags);
|
||||
|
||||
access_flags &= ~IB_ACCESS_OPTIONAL;
|
||||
if (access_flags & ~VALID_MR_FLAGS)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -3759,10 +3759,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
spin_lock_init(&dev->dm.lock);
|
||||
dev->dm.dev = mdev;
|
||||
return 0;
|
||||
err:
|
||||
mlx5r_macsec_dealloc_gids(dev);
|
||||
err_mp:
|
||||
mlx5_ib_cleanup_multiport_master(dev);
|
||||
err:
|
||||
mlx5r_macsec_dealloc_gids(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
|
||||
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
|
||||
MLX5_SET(mkc, mkc, access_mode_4_2,
|
||||
(ent->rb_key.access_mode >> 2) & 0x7);
|
||||
MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
|
||||
|
||||
MLX5_SET(mkc, mkc, translations_octword_size,
|
||||
get_mkc_octo_size(ent->rb_key.access_mode,
|
||||
@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
|
||||
new = &((*new)->rb_left);
|
||||
if (cmp < 0)
|
||||
new = &((*new)->rb_right);
|
||||
if (cmp == 0) {
|
||||
mutex_unlock(&cache->rb_lock);
|
||||
if (cmp == 0)
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add new node and rebalance tree. */
|
||||
@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
|
||||
}
|
||||
mr->mmkey.cache_ent = ent;
|
||||
mr->mmkey.type = MLX5_MKEY_MR;
|
||||
mr->mmkey.rb_key = ent->rb_key;
|
||||
mr->mmkey.cacheable = true;
|
||||
init_waitqueue_head(&mr->mmkey.wait);
|
||||
return mr;
|
||||
}
|
||||
@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
|
||||
mr->ibmr.pd = pd;
|
||||
mr->umem = umem;
|
||||
mr->page_shift = order_base_2(page_size);
|
||||
mr->mmkey.cacheable = true;
|
||||
set_mr_fields(dev, mr, umem->length, access_flags, iova);
|
||||
|
||||
return mr;
|
||||
|
@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
|
||||
int err;
|
||||
struct mlx5_srq_attr in = {};
|
||||
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
|
||||
__u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
|
||||
if (init_attr->srq_type != IB_SRQT_BASIC &&
|
||||
init_attr->srq_type != IB_SRQT_XRC &&
|
||||
init_attr->srq_type != IB_SRQT_TM)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Sanity check SRQ size before proceeding */
|
||||
if (init_attr->attr.max_wr >= max_srq_wqes) {
|
||||
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
|
||||
init_attr->attr.max_wr,
|
||||
max_srq_wqes);
|
||||
/* Sanity check SRQ and sge size before proceeding */
|
||||
if (init_attr->attr.max_wr >= max_srq_wqes ||
|
||||
init_attr->attr.max_sge > max_sge_sz) {
|
||||
mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
|
||||
init_attr->attr.max_wr, max_srq_wqes,
|
||||
init_attr->attr.max_sge, max_sge_sz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -344,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
|
||||
* receive buffer later. For rmda operations additional
|
||||
* length checks are performed in check_rkey.
|
||||
*/
|
||||
if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
|
||||
unsigned int payload = payload_size(pkt);
|
||||
unsigned int recv_buffer_len = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
|
||||
recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
|
||||
if (payload + 40 > recv_buffer_len) {
|
||||
rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
|
||||
return RESPST_ERR_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
|
||||
(qp_type(qp) == IB_QPT_UC))) {
|
||||
unsigned int mtu = qp->mtu;
|
||||
|
@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ibwr->num_sge; i++, sge++) {
|
||||
memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
|
||||
memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
|
||||
p += sge->length;
|
||||
}
|
||||
}
|
||||
|
@ -210,6 +210,7 @@ static const struct regmap_access_table axp313a_volatile_table = {
|
||||
|
||||
static const struct regmap_range axp717_writeable_ranges[] = {
|
||||
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
|
||||
regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
|
||||
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
|
||||
};
|
||||
|
||||
|
@ -732,9 +732,6 @@ tx_done:
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
tx_dma_error:
|
||||
if (BNXT_TX_PTP_IS_SET(lflags))
|
||||
atomic_inc(&bp->ptp_cfg->tx_avail);
|
||||
|
||||
last_frag = i;
|
||||
|
||||
/* start back at beginning and unmap skb */
|
||||
@ -756,6 +753,8 @@ tx_dma_error:
|
||||
tx_free:
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_kick_pending:
|
||||
if (BNXT_TX_PTP_IS_SET(lflags))
|
||||
atomic_inc(&bp->ptp_cfg->tx_avail);
|
||||
if (txr->kick_pending)
|
||||
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
|
||||
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
|
||||
@ -8996,6 +8995,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
|
||||
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
|
||||
#endif
|
||||
}
|
||||
bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
|
||||
|
||||
hwrm_func_qcaps_exit:
|
||||
hwrm_req_drop(bp, req);
|
||||
@ -15363,6 +15363,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
|
||||
if (bp->tso_max_segs)
|
||||
netif_set_tso_max_segs(dev, bp->tso_max_segs);
|
||||
|
||||
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
|
||||
NETDEV_XDP_ACT_RX_SG;
|
||||
|
@ -2318,6 +2318,7 @@ struct bnxt {
|
||||
u8 rss_hash_key_updated:1;
|
||||
|
||||
u16 max_mtu;
|
||||
u16 tso_max_segs;
|
||||
u8 max_tc;
|
||||
u8 max_lltc; /* lossless TCs */
|
||||
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
|
||||
|
@ -2,7 +2,7 @@
|
||||
*
|
||||
* Copyright (c) 2014-2016 Broadcom Corporation
|
||||
* Copyright (c) 2014-2018 Broadcom Limited
|
||||
* Copyright (c) 2018-2023 Broadcom Inc.
|
||||
* Copyright (c) 2018-2024 Broadcom Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -500,7 +500,11 @@ struct cmd_nums {
|
||||
#define HWRM_TFC_IF_TBL_GET 0x399UL
|
||||
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
|
||||
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
|
||||
#define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
|
||||
#define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
|
||||
#define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
|
||||
#define HWRM_SV 0x400UL
|
||||
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
|
||||
#define HWRM_DBG_READ_DIRECT 0xff10UL
|
||||
#define HWRM_DBG_READ_INDIRECT 0xff11UL
|
||||
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
|
||||
@ -609,8 +613,8 @@ struct hwrm_err_output {
|
||||
#define HWRM_VERSION_MAJOR 1
|
||||
#define HWRM_VERSION_MINOR 10
|
||||
#define HWRM_VERSION_UPDATE 3
|
||||
#define HWRM_VERSION_RSVD 39
|
||||
#define HWRM_VERSION_STR "1.10.3.39"
|
||||
#define HWRM_VERSION_RSVD 44
|
||||
#define HWRM_VERSION_STR "1.10.3.44"
|
||||
|
||||
/* hwrm_ver_get_input (size:192b/24B) */
|
||||
struct hwrm_ver_get_input {
|
||||
@ -664,6 +668,7 @@ struct hwrm_ver_get_output {
|
||||
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
|
||||
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
|
||||
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
|
||||
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
|
||||
u8 roce_fw_maj_8b;
|
||||
u8 roce_fw_min_8b;
|
||||
u8 roce_fw_bld_8b;
|
||||
@ -843,7 +848,9 @@ struct hwrm_async_event_cmpl {
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
|
||||
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
|
||||
@ -1326,13 +1333,13 @@ struct hwrm_async_event_cmpl_error_report_base {
|
||||
u8 timestamp_lo;
|
||||
__le16 timestamp_hi;
|
||||
__le32 event_data1;
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
|
||||
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
|
||||
@ -1814,6 +1821,9 @@ struct hwrm_func_qcaps_output {
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
|
||||
#define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
|
||||
__le16 tunnel_disable_flag;
|
||||
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
|
||||
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
|
||||
@ -1828,7 +1838,7 @@ struct hwrm_func_qcaps_output {
|
||||
#define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
|
||||
u8 device_serial_number[8];
|
||||
__le16 ctxs_per_partition;
|
||||
u8 unused_2[2];
|
||||
__le16 max_tso_segs;
|
||||
__le32 roce_vf_max_av;
|
||||
__le32 roce_vf_max_cq;
|
||||
__le32 roce_vf_max_mrw;
|
||||
@ -2449,6 +2459,7 @@ struct hwrm_func_drv_rgtr_input {
|
||||
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
|
||||
#define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
|
||||
#define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
|
||||
#define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
|
||||
__le32 enables;
|
||||
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
|
||||
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
|
||||
@ -3660,22 +3671,24 @@ struct hwrm_func_backing_store_cfg_v2_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le16 type;
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
|
||||
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
|
||||
@ -3772,18 +3785,20 @@ struct hwrm_func_backing_store_qcfg_v2_output {
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
__le16 type;
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
|
||||
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
|
||||
@ -3876,22 +3891,24 @@ struct hwrm_func_backing_store_qcaps_v2_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le16 type;
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
|
||||
@ -3911,22 +3928,24 @@ struct hwrm_func_backing_store_qcaps_v2_output {
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
__le16 type;
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
|
||||
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
|
||||
@ -4202,7 +4221,8 @@ struct hwrm_port_phy_cfg_input {
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
|
||||
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
|
||||
__le16 auto_link_speeds2_mask;
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
|
||||
@ -4217,6 +4237,7 @@ struct hwrm_port_phy_cfg_input {
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
|
||||
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
|
||||
u8 unused_2[6];
|
||||
};
|
||||
|
||||
@ -4292,6 +4313,7 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
|
||||
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
|
||||
u8 duplex_cfg;
|
||||
@ -4451,7 +4473,13 @@ struct hwrm_port_phy_qcfg_output {
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
|
||||
#define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
|
||||
u8 media_type;
|
||||
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
|
||||
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
|
||||
@ -5049,33 +5077,43 @@ struct hwrm_port_qstats_ext_output {
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
|
||||
/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
|
||||
struct hwrm_port_lpbk_qstats_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
__le16 seq_id;
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le16 lpbk_stat_size;
|
||||
u8 flags;
|
||||
#define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
|
||||
u8 unused_0[5];
|
||||
__le64 lpbk_stat_host_addr;
|
||||
};
|
||||
|
||||
/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
|
||||
/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
|
||||
struct hwrm_port_lpbk_qstats_output {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
__le16 lpbk_stat_size;
|
||||
u8 unused_0[5];
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* port_lpbk_stats (size:640b/80B) */
|
||||
struct port_lpbk_stats {
|
||||
__le64 lpbk_ucast_frames;
|
||||
__le64 lpbk_mcast_frames;
|
||||
__le64 lpbk_bcast_frames;
|
||||
__le64 lpbk_ucast_bytes;
|
||||
__le64 lpbk_mcast_bytes;
|
||||
__le64 lpbk_bcast_bytes;
|
||||
__le64 tx_stat_discard;
|
||||
__le64 tx_stat_error;
|
||||
__le64 rx_stat_discard;
|
||||
__le64 rx_stat_error;
|
||||
u8 unused_0[7];
|
||||
u8 valid;
|
||||
__le64 lpbk_tx_discards;
|
||||
__le64 lpbk_tx_errors;
|
||||
__le64 lpbk_rx_discards;
|
||||
__le64 lpbk_rx_errors;
|
||||
};
|
||||
|
||||
/* hwrm_port_ecn_qstats_input (size:256b/32B) */
|
||||
@ -5140,13 +5178,15 @@ struct hwrm_port_clr_stats_output {
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
|
||||
/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
|
||||
struct hwrm_port_lpbk_clr_stats_input {
|
||||
__le16 req_type;
|
||||
__le16 cmpl_ring;
|
||||
__le16 seq_id;
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le16 port_id;
|
||||
u8 unused_0[6];
|
||||
};
|
||||
|
||||
/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
|
||||
@ -5287,10 +5327,11 @@ struct hwrm_port_phy_qcaps_output {
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
|
||||
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
|
||||
__le16 flags2;
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
|
||||
#define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
|
||||
u8 internal_port_cnt;
|
||||
u8 unused_0;
|
||||
__le16 supported_speeds2_force_mode;
|
||||
@ -7443,17 +7484,17 @@ struct hwrm_cfa_l2_filter_cfg_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
__le32 flags;
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
|
||||
#define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
|
||||
@ -8520,17 +8561,17 @@ struct hwrm_tunnel_dst_port_query_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
u8 tunnel_type;
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
|
||||
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
|
||||
@ -8576,17 +8617,17 @@ struct hwrm_tunnel_dst_port_alloc_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
u8 tunnel_type;
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
|
||||
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
|
||||
@ -8635,17 +8676,17 @@ struct hwrm_tunnel_dst_port_free_input {
|
||||
__le16 target_id;
|
||||
__le64 resp_addr;
|
||||
u8 tunnel_type;
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
|
||||
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
|
||||
@ -9109,6 +9150,7 @@ struct hwrm_struct_hdr {
|
||||
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
|
||||
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
|
||||
#define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
|
||||
#define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
|
||||
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
|
||||
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
|
||||
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
|
||||
@ -9758,6 +9800,9 @@ struct hwrm_dbg_coredump_initiate_input {
|
||||
__le16 instance;
|
||||
__le16 unused_0;
|
||||
u8 seg_flags;
|
||||
#define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
|
||||
#define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
|
||||
#define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
|
||||
u8 unused_1[7];
|
||||
};
|
||||
|
||||
@ -10433,13 +10478,13 @@ struct hwrm_selftest_irq_output {
|
||||
|
||||
/* dbc_dbc (size:64b/8B) */
|
||||
struct dbc_dbc {
|
||||
u32 index;
|
||||
__le32 index;
|
||||
#define DBC_DBC_INDEX_MASK 0xffffffUL
|
||||
#define DBC_DBC_INDEX_SFT 0
|
||||
#define DBC_DBC_EPOCH 0x1000000UL
|
||||
#define DBC_DBC_TOGGLE_MASK 0x6000000UL
|
||||
#define DBC_DBC_TOGGLE_SFT 25
|
||||
u32 type_path_xid;
|
||||
__le32 type_path_xid;
|
||||
#define DBC_DBC_XID_MASK 0xfffffUL
|
||||
#define DBC_DBC_XID_SFT 0
|
||||
#define DBC_DBC_PATH_MASK 0x3000000UL
|
||||
|
@ -1339,6 +1339,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
bool last = false;
|
||||
int try_cnt = 0;
|
||||
int status;
|
||||
|
||||
bh = (struct ice_buf_hdr *)(bufs + start + i);
|
||||
@ -1346,8 +1347,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
|
||||
if (indicate_last)
|
||||
last = ice_is_last_download_buffer(bh, i, count);
|
||||
|
||||
status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
|
||||
&offset, &info, NULL);
|
||||
while (1) {
|
||||
status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
|
||||
last, &offset, &info,
|
||||
NULL);
|
||||
if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
|
||||
hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
|
||||
break;
|
||||
|
||||
try_cnt++;
|
||||
|
||||
if (try_cnt == 5)
|
||||
break;
|
||||
|
||||
msleep(20);
|
||||
}
|
||||
|
||||
if (try_cnt)
|
||||
dev_dbg(ice_hw_to_dev(hw),
|
||||
"ice_aq_download_pkg number of retries: %d\n",
|
||||
try_cnt);
|
||||
|
||||
/* Save AQ status from download package */
|
||||
if (status) {
|
||||
|
@ -805,6 +805,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
|
||||
}
|
||||
|
||||
switch (vsi->port_info->phy.link_info.link_speed) {
|
||||
case ICE_AQ_LINK_SPEED_200GB:
|
||||
speed = "200 G";
|
||||
break;
|
||||
case ICE_AQ_LINK_SPEED_100GB:
|
||||
speed = "100 G";
|
||||
break;
|
||||
@ -5564,7 +5567,7 @@ static int ice_suspend(struct device *dev)
|
||||
*/
|
||||
disabled = ice_service_task_stop(pf);
|
||||
|
||||
ice_unplug_aux_dev(pf);
|
||||
ice_deinit_rdma(pf);
|
||||
|
||||
/* Already suspended?, then there is nothing to do */
|
||||
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
|
||||
@ -5644,6 +5647,11 @@ static int ice_resume(struct device *dev)
|
||||
if (ret)
|
||||
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
|
||||
|
||||
ret = ice_init_rdma(pf);
|
||||
if (ret)
|
||||
dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
|
||||
ret);
|
||||
|
||||
clear_bit(ICE_DOWN, pf->state);
|
||||
/* Now perform PF reset and rebuild */
|
||||
reset_type = ICE_RESET_PFR;
|
||||
|
@ -1899,7 +1899,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
|
||||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
|
||||
lkup_type == ICE_SW_LKUP_PROMISC ||
|
||||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
|
||||
lkup_type == ICE_SW_LKUP_DFLT) {
|
||||
lkup_type == ICE_SW_LKUP_DFLT ||
|
||||
lkup_type == ICE_SW_LKUP_LAST) {
|
||||
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
|
||||
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
|
||||
if (opc == ice_aqc_opc_alloc_res)
|
||||
@ -2922,7 +2923,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
|
||||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
|
||||
lkup_type == ICE_SW_LKUP_PROMISC ||
|
||||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
|
||||
lkup_type == ICE_SW_LKUP_DFLT)
|
||||
lkup_type == ICE_SW_LKUP_DFLT ||
|
||||
lkup_type == ICE_SW_LKUP_LAST)
|
||||
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
|
||||
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
|
||||
else if (lkup_type == ICE_SW_LKUP_VLAN)
|
||||
|
@ -4014,7 +4014,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
|
||||
}
|
||||
}
|
||||
|
||||
skb = build_skb(data, frag_size);
|
||||
if (frag_size)
|
||||
skb = build_skb(data, frag_size);
|
||||
else
|
||||
skb = slab_build_skb(data);
|
||||
if (!skb) {
|
||||
netdev_warn(port->dev, "skb build failed\n");
|
||||
goto err_drop_frame;
|
||||
|
@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
|
||||
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
|
||||
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
|
||||
otx2_devlink.o qos_sq.o qos.o
|
||||
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
|
||||
rvu_nicvf-y := otx2_vf.o
|
||||
|
||||
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
|
||||
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
|
||||
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
|
||||
|
||||
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
|
||||
|
@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_pfc_txschq_config);
|
||||
|
||||
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
|
||||
{
|
||||
@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
|
||||
|
||||
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
|
||||
{
|
||||
@ -260,6 +262,7 @@ update_sq_smq_map:
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_pfc_txschq_update);
|
||||
|
||||
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
|
||||
{
|
||||
@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_pfc_txschq_stop);
|
||||
|
||||
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
|
||||
{
|
||||
@ -321,6 +325,7 @@ unlock:
|
||||
mutex_unlock(&pfvf->mbox.lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
|
||||
|
||||
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
|
||||
bool pfc_enable)
|
||||
@ -385,6 +390,7 @@ out:
|
||||
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
|
||||
qidx, err);
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
|
||||
|
||||
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
{
|
||||
@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_dcbnl_set_ops);
|
||||
|
@ -113,6 +113,7 @@ err_dl:
|
||||
devlink_free(dl);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_register_dl);
|
||||
|
||||
void otx2_unregister_dl(struct otx2_nic *pfvf)
|
||||
{
|
||||
@ -124,3 +125,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
|
||||
ARRAY_SIZE(otx2_dl_params));
|
||||
devlink_free(dl);
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_unregister_dl);
|
||||
|
@ -1174,8 +1174,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
|
||||
|
||||
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
|
||||
/* Insert vlan tag before giving pkt to tso */
|
||||
if (skb_vlan_tag_present(skb))
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
skb = __vlan_hwaccel_push_inside(skb);
|
||||
if (!skb)
|
||||
return true;
|
||||
}
|
||||
otx2_sq_append_tso(pfvf, sq, skb, qidx);
|
||||
return true;
|
||||
}
|
||||
|
@ -1127,8 +1127,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
|
||||
if (netdev->phydev)
|
||||
phy_ethtool_get_wol(netdev->phydev, wol);
|
||||
|
||||
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
|
||||
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
|
||||
if (wol->supported != adapter->phy_wol_supported)
|
||||
netif_warn(adapter, drv, adapter->netdev,
|
||||
"PHY changed its supported WOL! old=%x, new=%x\n",
|
||||
adapter->phy_wol_supported, wol->supported);
|
||||
|
||||
wol->supported |= MAC_SUPPORTED_WAKES;
|
||||
|
||||
if (adapter->is_pci11x1x)
|
||||
wol->supported |= WAKE_MAGICSECURE;
|
||||
@ -1143,7 +1147,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
|
||||
{
|
||||
struct lan743x_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/* WAKE_MAGICSEGURE is a modifier of and only valid together with
|
||||
* WAKE_MAGIC
|
||||
*/
|
||||
if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
|
||||
return -EINVAL;
|
||||
|
||||
if (netdev->phydev) {
|
||||
struct ethtool_wolinfo phy_wol;
|
||||
int ret;
|
||||
|
||||
phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
|
||||
|
||||
/* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
|
||||
* for PHYs that do not support WAKE_MAGICSECURE
|
||||
*/
|
||||
if (wol->wolopts & WAKE_MAGICSECURE &&
|
||||
!(adapter->phy_wol_supported & WAKE_MAGICSECURE))
|
||||
phy_wol.wolopts &= ~WAKE_MAGIC;
|
||||
|
||||
ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
|
||||
if (ret && (ret != -EOPNOTSUPP))
|
||||
return ret;
|
||||
|
||||
if (ret == -EOPNOTSUPP)
|
||||
adapter->phy_wolopts = 0;
|
||||
else
|
||||
adapter->phy_wolopts = phy_wol.wolopts;
|
||||
} else {
|
||||
adapter->phy_wolopts = 0;
|
||||
}
|
||||
|
||||
adapter->wolopts = 0;
|
||||
wol->wolopts &= ~adapter->phy_wolopts;
|
||||
if (wol->wolopts & WAKE_UCAST)
|
||||
adapter->wolopts |= WAKE_UCAST;
|
||||
if (wol->wolopts & WAKE_MCAST)
|
||||
@ -1164,10 +1200,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
|
||||
memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
|
||||
}
|
||||
|
||||
wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
|
||||
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
|
||||
|
||||
return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
|
||||
: -ENETDOWN;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user