mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: drivers/net/ethernet/broadcom/bnxt/bnxt.c1e7962114c
("bnxt_en: Restore PTP tx_avail count in case of skb_pad() error")165f87691a
("bnxt_en: add timestamping statistics support") No adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
a6ec08beec
@ -5,7 +5,6 @@ root = true
|
||||
[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = tab
|
||||
indent_size = 8
|
||||
@ -13,7 +12,6 @@ indent_size = 8
|
||||
[*.{json,py,rs}]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
@ -26,7 +24,6 @@ indent_size = 8
|
||||
[*.yaml]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
trim_trailing_whitespace = unset
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
1
.mailmap
1
.mailmap
@ -608,6 +608,7 @@ Simon Kelley <simon@thekelleys.org.uk>
|
||||
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
|
||||
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
|
||||
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
|
||||
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
|
||||
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
|
||||
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
|
||||
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
|
||||
|
@ -2192,12 +2192,6 @@
|
||||
Format: 0 | 1
|
||||
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
|
||||
|
||||
init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
|
||||
it was mlock'ed and not explicitly munlock'ed
|
||||
afterwards.
|
||||
Format: 0 | 1
|
||||
Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
|
||||
|
||||
init_pkru= [X86] Specify the default memory protection keys rights
|
||||
register contents for all processes. 0x55555554 by
|
||||
default (disallow access to all but pkey 0). Can
|
||||
|
@ -139,7 +139,7 @@ allOf:
|
||||
Voltage output range of the channel as <minimum, maximum>
|
||||
Required connections:
|
||||
Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V;
|
||||
Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V;
|
||||
Rfb2x for: 0 to 10 V; -2.5 to 7.5V; -5 to 5 V;
|
||||
oneOf:
|
||||
- items:
|
||||
- const: 0
|
||||
|
@ -65,6 +65,7 @@ patternProperties:
|
||||
description: The hard wired USB devices
|
||||
type: object
|
||||
$ref: /schemas/usb/usb-device.yaml
|
||||
additionalProperties: true
|
||||
|
||||
required:
|
||||
- peer-hub
|
||||
|
@ -32,6 +32,7 @@ Security-related interfaces
|
||||
seccomp_filter
|
||||
landlock
|
||||
lsm
|
||||
mfd_noexec
|
||||
spec_ctrl
|
||||
tee
|
||||
|
||||
|
86
Documentation/userspace-api/mfd_noexec.rst
Normal file
86
Documentation/userspace-api/mfd_noexec.rst
Normal file
@ -0,0 +1,86 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==================================
|
||||
Introduction of non-executable mfd
|
||||
==================================
|
||||
:Author:
|
||||
Daniel Verkamp <dverkamp@chromium.org>
|
||||
Jeff Xu <jeffxu@chromium.org>
|
||||
|
||||
:Contributor:
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
|
||||
Since Linux introduced the memfd feature, memfds have always had their
|
||||
execute bit set, and the memfd_create() syscall doesn't allow setting
|
||||
it differently.
|
||||
|
||||
However, in a secure-by-default system, such as ChromeOS, (where all
|
||||
executables should come from the rootfs, which is protected by verified
|
||||
boot), this executable nature of memfd opens a door for NoExec bypass
|
||||
and enables “confused deputy attack”. E.g, in VRP bug [1]: cros_vm
|
||||
process created a memfd to share the content with an external process,
|
||||
however the memfd is overwritten and used for executing arbitrary code
|
||||
and root escalation. [2] lists more VRP of this kind.
|
||||
|
||||
On the other hand, executable memfd has its legit use: runc uses memfd’s
|
||||
seal and executable feature to copy the contents of the binary then
|
||||
execute them. For such a system, we need a solution to differentiate runc's
|
||||
use of executable memfds and an attacker's [3].
|
||||
|
||||
To address those above:
|
||||
- Let memfd_create() set X bit at creation time.
|
||||
- Let memfd be sealed for modifying X bit when NX is set.
|
||||
- Add a new pid namespace sysctl: vm.memfd_noexec to help applications in
|
||||
migrating and enforcing non-executable MFD.
|
||||
|
||||
User API
|
||||
========
|
||||
``int memfd_create(const char *name, unsigned int flags)``
|
||||
|
||||
``MFD_NOEXEC_SEAL``
|
||||
When MFD_NOEXEC_SEAL bit is set in the ``flags``, memfd is created
|
||||
with NX. F_SEAL_EXEC is set and the memfd can't be modified to
|
||||
add X later. MFD_ALLOW_SEALING is also implied.
|
||||
This is the most common case for the application to use memfd.
|
||||
|
||||
``MFD_EXEC``
|
||||
When MFD_EXEC bit is set in the ``flags``, memfd is created with X.
|
||||
|
||||
Note:
|
||||
``MFD_NOEXEC_SEAL`` implies ``MFD_ALLOW_SEALING``. In case that
|
||||
an app doesn't want sealing, it can add F_SEAL_SEAL after creation.
|
||||
|
||||
|
||||
Sysctl:
|
||||
========
|
||||
``pid namespaced sysctl vm.memfd_noexec``
|
||||
|
||||
The new pid namespaced sysctl vm.memfd_noexec has 3 values:
|
||||
|
||||
- 0: MEMFD_NOEXEC_SCOPE_EXEC
|
||||
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
|
||||
MFD_EXEC was set.
|
||||
|
||||
- 1: MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL
|
||||
memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL acts like
|
||||
MFD_NOEXEC_SEAL was set.
|
||||
|
||||
- 2: MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED
|
||||
memfd_create() without MFD_NOEXEC_SEAL will be rejected.
|
||||
|
||||
The sysctl allows finer control of memfd_create for old software that
|
||||
doesn't set the executable bit; for example, a container with
|
||||
vm.memfd_noexec=1 means the old software will create non-executable memfd
|
||||
by default while new software can create executable memfd by setting
|
||||
MFD_EXEC.
|
||||
|
||||
The value of vm.memfd_noexec is passed to child namespace at creation
|
||||
time. In addition, the setting is hierarchical, i.e. during memfd_create,
|
||||
we will search from current ns to root ns and use the most restrictive
|
||||
setting.
|
||||
|
||||
[1] https://crbug.com/1305267
|
||||
|
||||
[2] https://bugs.chromium.org/p/chromium/issues/list?q=type%3Dbug-security%20memfd%20escalation&can=1
|
||||
|
||||
[3] https://lwn.net/Articles/781013/
|
@ -62,12 +62,21 @@ shared page with scale and offset values into user space. User
|
||||
space code performs the same algorithm of reading the TSC and
|
||||
applying the scale and offset to get the constant 10 MHz clock.
|
||||
|
||||
Linux clockevents are based on Hyper-V synthetic timer 0. While
|
||||
Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
|
||||
timer 0. Interrupts from stimer0 are recorded on the "HVS" line in
|
||||
/proc/interrupts. Clockevents based on the virtualized PIT and
|
||||
local APIC timer also work, but the Hyper-V synthetic timer is
|
||||
preferred.
|
||||
Linux clockevents are based on Hyper-V synthetic timer 0 (stimer0).
|
||||
While Hyper-V offers 4 synthetic timers for each CPU, Linux only uses
|
||||
timer 0. In older versions of Hyper-V, an interrupt from stimer0
|
||||
results in a VMBus control message that is demultiplexed by
|
||||
vmbus_isr() as described in the Documentation/virt/hyperv/vmbus.rst
|
||||
documentation. In newer versions of Hyper-V, stimer0 interrupts can
|
||||
be mapped to an architectural interrupt, which is referred to as
|
||||
"Direct Mode". Linux prefers to use Direct Mode when available. Since
|
||||
x86/x64 doesn't support per-CPU interrupts, Direct Mode statically
|
||||
allocates an x86 interrupt vector (HYPERV_STIMER0_VECTOR) across all CPUs
|
||||
and explicitly codes it to call the stimer0 interrupt handler. Hence
|
||||
interrupts from stimer0 are recorded on the "HVS" line in /proc/interrupts
|
||||
rather than being associated with a Linux IRQ. Clockevents based on the
|
||||
virtualized PIT and local APIC timer also work, but Hyper-V stimer0
|
||||
is preferred.
|
||||
|
||||
The driver for the Hyper-V synthetic system clock and timers is
|
||||
drivers/clocksource/hyperv_timer.c.
|
||||
|
@ -40,7 +40,7 @@ Linux guests communicate with Hyper-V in four different ways:
|
||||
arm64, these synthetic registers must be accessed using explicit
|
||||
hypercalls.
|
||||
|
||||
* VMbus: VMbus is a higher-level software construct that is built on
|
||||
* VMBus: VMBus is a higher-level software construct that is built on
|
||||
the other 3 mechanisms. It is a message passing interface between
|
||||
the Hyper-V host and the Linux guest. It uses memory that is shared
|
||||
between Hyper-V and the guest, along with various signaling
|
||||
@ -54,8 +54,8 @@ x86/x64 architecture only.
|
||||
|
||||
.. _Hyper-V Top Level Functional Spec (TLFS): https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/tlfs/tlfs
|
||||
|
||||
VMbus is not documented. This documentation provides a high-level
|
||||
overview of VMbus and how it works, but the details can be discerned
|
||||
VMBus is not documented. This documentation provides a high-level
|
||||
overview of VMBus and how it works, but the details can be discerned
|
||||
only from the code.
|
||||
|
||||
Sharing Memory
|
||||
@ -74,7 +74,7 @@ follows:
|
||||
physical address space. How Hyper-V is told about the GPA or list
|
||||
of GPAs varies. In some cases, a single GPA is written to a
|
||||
synthetic register. In other cases, a GPA or list of GPAs is sent
|
||||
in a VMbus message.
|
||||
in a VMBus message.
|
||||
|
||||
* Hyper-V translates the GPAs into "real" physical memory addresses,
|
||||
and creates a virtual mapping that it can use to access the memory.
|
||||
@ -133,9 +133,9 @@ only the CPUs actually present in the VM, so Linux does not report
|
||||
any hot-add CPUs.
|
||||
|
||||
A Linux guest CPU may be taken offline using the normal Linux
|
||||
mechanisms, provided no VMbus channel interrupts are assigned to
|
||||
the CPU. See the section on VMbus Interrupts for more details
|
||||
on how VMbus channel interrupts can be re-assigned to permit
|
||||
mechanisms, provided no VMBus channel interrupts are assigned to
|
||||
the CPU. See the section on VMBus Interrupts for more details
|
||||
on how VMBus channel interrupts can be re-assigned to permit
|
||||
taking a CPU offline.
|
||||
|
||||
32-bit and 64-bit
|
||||
@ -169,14 +169,14 @@ and functionality. Hyper-V indicates feature/function availability
|
||||
via flags in synthetic MSRs that Hyper-V provides to the guest,
|
||||
and the guest code tests these flags.
|
||||
|
||||
VMbus has its own protocol version that is negotiated during the
|
||||
initial VMbus connection from the guest to Hyper-V. This version
|
||||
VMBus has its own protocol version that is negotiated during the
|
||||
initial VMBus connection from the guest to Hyper-V. This version
|
||||
number is also output to dmesg during boot. This version number
|
||||
is checked in a few places in the code to determine if specific
|
||||
functionality is present.
|
||||
|
||||
Furthermore, each synthetic device on VMbus also has a protocol
|
||||
version that is separate from the VMbus protocol version. Device
|
||||
Furthermore, each synthetic device on VMBus also has a protocol
|
||||
version that is separate from the VMBus protocol version. Device
|
||||
drivers for these synthetic devices typically negotiate the device
|
||||
protocol version, and may test that protocol version to determine
|
||||
if specific device functionality is present.
|
||||
|
@ -1,8 +1,8 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
VMbus
|
||||
VMBus
|
||||
=====
|
||||
VMbus is a software construct provided by Hyper-V to guest VMs. It
|
||||
VMBus is a software construct provided by Hyper-V to guest VMs. It
|
||||
consists of a control path and common facilities used by synthetic
|
||||
devices that Hyper-V presents to guest VMs. The control path is
|
||||
used to offer synthetic devices to the guest VM and, in some cases,
|
||||
@ -12,9 +12,9 @@ and the synthetic device implementation that is part of Hyper-V, and
|
||||
signaling primitives to allow Hyper-V and the guest to interrupt
|
||||
each other.
|
||||
|
||||
VMbus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
|
||||
entry in a running Linux guest. The VMbus driver (drivers/hv/vmbus_drv.c)
|
||||
establishes the VMbus control path with the Hyper-V host, then
|
||||
VMBus is modeled in Linux as a bus, with the expected /sys/bus/vmbus
|
||||
entry in a running Linux guest. The VMBus driver (drivers/hv/vmbus_drv.c)
|
||||
establishes the VMBus control path with the Hyper-V host, then
|
||||
registers itself as a Linux bus driver. It implements the standard
|
||||
bus functions for adding and removing devices to/from the bus.
|
||||
|
||||
@ -49,9 +49,9 @@ synthetic NIC is referred to as "netvsc" and the Linux driver for
|
||||
the synthetic SCSI controller is "storvsc". These drivers contain
|
||||
functions with names like "storvsc_connect_to_vsp".
|
||||
|
||||
VMbus channels
|
||||
VMBus channels
|
||||
--------------
|
||||
An instance of a synthetic device uses VMbus channels to communicate
|
||||
An instance of a synthetic device uses VMBus channels to communicate
|
||||
between the VSP and the VSC. Channels are bi-directional and used
|
||||
for passing messages. Most synthetic devices use a single channel,
|
||||
but the synthetic SCSI controller and synthetic NIC may use multiple
|
||||
@ -73,7 +73,7 @@ write indices and some control flags, followed by the memory for the
|
||||
actual ring. The size of the ring is determined by the VSC in the
|
||||
guest and is specific to each synthetic device. The list of GPAs
|
||||
making up the ring is communicated to the Hyper-V host over the
|
||||
VMbus control path as a GPA Descriptor List (GPADL). See function
|
||||
VMBus control path as a GPA Descriptor List (GPADL). See function
|
||||
vmbus_establish_gpadl().
|
||||
|
||||
Each ring buffer is mapped into contiguous Linux kernel virtual
|
||||
@ -102,10 +102,10 @@ resources. For Windows Server 2019 and later, this limit is
|
||||
approximately 1280 Mbytes. For versions prior to Windows Server
|
||||
2019, the limit is approximately 384 Mbytes.
|
||||
|
||||
VMbus messages
|
||||
--------------
|
||||
All VMbus messages have a standard header that includes the message
|
||||
length, the offset of the message payload, some flags, and a
|
||||
VMBus channel messages
|
||||
----------------------
|
||||
All messages sent in a VMBus channel have a standard header that includes
|
||||
the message length, the offset of the message payload, some flags, and a
|
||||
transactionID. The portion of the message after the header is
|
||||
unique to each VSP/VSC pair.
|
||||
|
||||
@ -137,7 +137,7 @@ control message contains a list of GPAs that describe the data
|
||||
buffer. For example, the storvsc driver uses this approach to
|
||||
specify the data buffers to/from which disk I/O is done.
|
||||
|
||||
Three functions exist to send VMbus messages:
|
||||
Three functions exist to send VMBus channel messages:
|
||||
|
||||
1. vmbus_sendpacket(): Control-only messages and messages with
|
||||
embedded data -- no GPAs
|
||||
@ -154,20 +154,51 @@ Historically, Linux guests have trusted Hyper-V to send well-formed
|
||||
and valid messages, and Linux drivers for synthetic devices did not
|
||||
fully validate messages. With the introduction of processor
|
||||
technologies that fully encrypt guest memory and that allow the
|
||||
guest to not trust the hypervisor (AMD SNP-SEV, Intel TDX), trusting
|
||||
guest to not trust the hypervisor (AMD SEV-SNP, Intel TDX), trusting
|
||||
the Hyper-V host is no longer a valid assumption. The drivers for
|
||||
VMbus synthetic devices are being updated to fully validate any
|
||||
VMBus synthetic devices are being updated to fully validate any
|
||||
values read from memory that is shared with Hyper-V, which includes
|
||||
messages from VMbus devices. To facilitate such validation,
|
||||
messages from VMBus devices. To facilitate such validation,
|
||||
messages read by the guest from the "in" ring buffer are copied to a
|
||||
temporary buffer that is not shared with Hyper-V. Validation is
|
||||
performed in this temporary buffer without the risk of Hyper-V
|
||||
maliciously modifying the message after it is validated but before
|
||||
it is used.
|
||||
|
||||
VMbus interrupts
|
||||
Synthetic Interrupt Controller (synic)
|
||||
--------------------------------------
|
||||
Hyper-V provides each guest CPU with a synthetic interrupt controller
|
||||
that is used by VMBus for host-guest communication. While each synic
|
||||
defines 16 synthetic interrupts (SINT), Linux uses only one of the 16
|
||||
(VMBUS_MESSAGE_SINT). All interrupts related to communication between
|
||||
the Hyper-V host and a guest CPU use that SINT.
|
||||
|
||||
The SINT is mapped to a single per-CPU architectural interrupt (i.e,
|
||||
an 8-bit x86/x64 interrupt vector, or an arm64 PPI INTID). Because
|
||||
each CPU in the guest has a synic and may receive VMBus interrupts,
|
||||
they are best modeled in Linux as per-CPU interrupts. This model works
|
||||
well on arm64 where a single per-CPU Linux IRQ is allocated for
|
||||
VMBUS_MESSAGE_SINT. This IRQ appears in /proc/interrupts as an IRQ labelled
|
||||
"Hyper-V VMbus". Since x86/x64 lacks support for per-CPU IRQs, an x86
|
||||
interrupt vector is statically allocated (HYPERVISOR_CALLBACK_VECTOR)
|
||||
across all CPUs and explicitly coded to call vmbus_isr(). In this case,
|
||||
there's no Linux IRQ, and the interrupts are visible in aggregate in
|
||||
/proc/interrupts on the "HYP" line.
|
||||
|
||||
The synic provides the means to demultiplex the architectural interrupt into
|
||||
one or more logical interrupts and route the logical interrupt to the proper
|
||||
VMBus handler in Linux. This demultiplexing is done by vmbus_isr() and
|
||||
related functions that access synic data structures.
|
||||
|
||||
The synic is not modeled in Linux as an irq chip or irq domain,
|
||||
and the demultiplexed logical interrupts are not Linux IRQs. As such,
|
||||
they don't appear in /proc/interrupts or /proc/irq. The CPU
|
||||
affinity for one of these logical interrupts is controlled via an
|
||||
entry under /sys/bus/vmbus as described below.
|
||||
|
||||
VMBus interrupts
|
||||
----------------
|
||||
VMbus provides a mechanism for the guest to interrupt the host when
|
||||
VMBus provides a mechanism for the guest to interrupt the host when
|
||||
the guest has queued new messages in a ring buffer. The host
|
||||
expects that the guest will send an interrupt only when an "out"
|
||||
ring buffer transitions from empty to non-empty. If the guest sends
|
||||
@ -176,63 +207,55 @@ unnecessary. If a guest sends an excessive number of unnecessary
|
||||
interrupts, the host may throttle that guest by suspending its
|
||||
execution for a few seconds to prevent a denial-of-service attack.
|
||||
|
||||
Similarly, the host will interrupt the guest when it sends a new
|
||||
message on the VMbus control path, or when a VMbus channel "in" ring
|
||||
buffer transitions from empty to non-empty. Each CPU in the guest
|
||||
may receive VMbus interrupts, so they are best modeled as per-CPU
|
||||
interrupts in Linux. This model works well on arm64 where a single
|
||||
per-CPU IRQ is allocated for VMbus. Since x86/x64 lacks support for
|
||||
per-CPU IRQs, an x86 interrupt vector is statically allocated (see
|
||||
HYPERVISOR_CALLBACK_VECTOR) across all CPUs and explicitly coded to
|
||||
call the VMbus interrupt service routine. These interrupts are
|
||||
visible in /proc/interrupts on the "HYP" line.
|
||||
Similarly, the host will interrupt the guest via the synic when
|
||||
it sends a new message on the VMBus control path, or when a VMBus
|
||||
channel "in" ring buffer transitions from empty to non-empty due to
|
||||
the host inserting a new VMBus channel message. The control message stream
|
||||
and each VMBus channel "in" ring buffer are separate logical interrupts
|
||||
that are demultiplexed by vmbus_isr(). It demultiplexes by first checking
|
||||
for channel interrupts by calling vmbus_chan_sched(), which looks at a synic
|
||||
bitmap to determine which channels have pending interrupts on this CPU.
|
||||
If multiple channels have pending interrupts for this CPU, they are
|
||||
processed sequentially. When all channel interrupts have been processed,
|
||||
vmbus_isr() checks for and processes any messages received on the VMBus
|
||||
control path.
|
||||
|
||||
The guest CPU that a VMbus channel will interrupt is selected by the
|
||||
The guest CPU that a VMBus channel will interrupt is selected by the
|
||||
guest when the channel is created, and the host is informed of that
|
||||
selection. VMbus devices are broadly grouped into two categories:
|
||||
selection. VMBus devices are broadly grouped into two categories:
|
||||
|
||||
1. "Slow" devices that need only one VMbus channel. The devices
|
||||
1. "Slow" devices that need only one VMBus channel. The devices
|
||||
(such as keyboard, mouse, heartbeat, and timesync) generate
|
||||
relatively few interrupts. Their VMbus channels are all
|
||||
relatively few interrupts. Their VMBus channels are all
|
||||
assigned to interrupt the VMBUS_CONNECT_CPU, which is always
|
||||
CPU 0.
|
||||
|
||||
2. "High speed" devices that may use multiple VMbus channels for
|
||||
2. "High speed" devices that may use multiple VMBus channels for
|
||||
higher parallelism and performance. These devices include the
|
||||
synthetic SCSI controller and synthetic NIC. Their VMbus
|
||||
synthetic SCSI controller and synthetic NIC. Their VMBus
|
||||
channels interrupts are assigned to CPUs that are spread out
|
||||
among the available CPUs in the VM so that interrupts on
|
||||
multiple channels can be processed in parallel.
|
||||
|
||||
The assignment of VMbus channel interrupts to CPUs is done in the
|
||||
The assignment of VMBus channel interrupts to CPUs is done in the
|
||||
function init_vp_index(). This assignment is done outside of the
|
||||
normal Linux interrupt affinity mechanism, so the interrupts are
|
||||
neither "unmanaged" nor "managed" interrupts.
|
||||
|
||||
The CPU that a VMbus channel will interrupt can be seen in
|
||||
The CPU that a VMBus channel will interrupt can be seen in
|
||||
/sys/bus/vmbus/devices/<deviceGUID>/ channels/<channelRelID>/cpu.
|
||||
When running on later versions of Hyper-V, the CPU can be changed
|
||||
by writing a new value to this sysfs entry. Because the interrupt
|
||||
assignment is done outside of the normal Linux affinity mechanism,
|
||||
there are no entries in /proc/irq corresponding to individual
|
||||
VMbus channel interrupts.
|
||||
by writing a new value to this sysfs entry. Because VMBus channel
|
||||
interrupts are not Linux IRQs, there are no entries in /proc/interrupts
|
||||
or /proc/irq corresponding to individual VMBus channel interrupts.
|
||||
|
||||
An online CPU in a Linux guest may not be taken offline if it has
|
||||
VMbus channel interrupts assigned to it. Any such channel
|
||||
VMBus channel interrupts assigned to it. Any such channel
|
||||
interrupts must first be manually reassigned to another CPU as
|
||||
described above. When no channel interrupts are assigned to the
|
||||
CPU, it can be taken offline.
|
||||
|
||||
When a guest CPU receives a VMbus interrupt from the host, the
|
||||
function vmbus_isr() handles the interrupt. It first checks for
|
||||
channel interrupts by calling vmbus_chan_sched(), which looks at a
|
||||
bitmap setup by the host to determine which channels have pending
|
||||
interrupts on this CPU. If multiple channels have pending
|
||||
interrupts for this CPU, they are processed sequentially. When all
|
||||
channel interrupts have been processed, vmbus_isr() checks for and
|
||||
processes any message received on the VMbus control path.
|
||||
|
||||
The VMbus channel interrupt handling code is designed to work
|
||||
The VMBus channel interrupt handling code is designed to work
|
||||
correctly even if an interrupt is received on a CPU other than the
|
||||
CPU assigned to the channel. Specifically, the code does not use
|
||||
CPU-based exclusion for correctness. In normal operation, Hyper-V
|
||||
@ -242,23 +265,23 @@ when Hyper-V will make the transition. The code must work correctly
|
||||
even if there is a time lag before Hyper-V starts interrupting the
|
||||
new CPU. See comments in target_cpu_store().
|
||||
|
||||
VMbus device creation/deletion
|
||||
VMBus device creation/deletion
|
||||
------------------------------
|
||||
Hyper-V and the Linux guest have a separate message-passing path
|
||||
that is used for synthetic device creation and deletion. This
|
||||
path does not use a VMbus channel. See vmbus_post_msg() and
|
||||
path does not use a VMBus channel. See vmbus_post_msg() and
|
||||
vmbus_on_msg_dpc().
|
||||
|
||||
The first step is for the guest to connect to the generic
|
||||
Hyper-V VMbus mechanism. As part of establishing this connection,
|
||||
the guest and Hyper-V agree on a VMbus protocol version they will
|
||||
Hyper-V VMBus mechanism. As part of establishing this connection,
|
||||
the guest and Hyper-V agree on a VMBus protocol version they will
|
||||
use. This negotiation allows newer Linux kernels to run on older
|
||||
Hyper-V versions, and vice versa.
|
||||
|
||||
The guest then tells Hyper-V to "send offers". Hyper-V sends an
|
||||
offer message to the guest for each synthetic device that the VM
|
||||
is configured to have. Each VMbus device type has a fixed GUID
|
||||
known as the "class ID", and each VMbus device instance is also
|
||||
is configured to have. Each VMBus device type has a fixed GUID
|
||||
known as the "class ID", and each VMBus device instance is also
|
||||
identified by a GUID. The offer message from Hyper-V contains
|
||||
both GUIDs to uniquely (within the VM) identify the device.
|
||||
There is one offer message for each device instance, so a VM with
|
||||
@ -275,7 +298,7 @@ type based on the class ID, and invokes the correct driver to set up
|
||||
the device. Driver/device matching is performed using the standard
|
||||
Linux mechanism.
|
||||
|
||||
The device driver probe function opens the primary VMbus channel to
|
||||
The device driver probe function opens the primary VMBus channel to
|
||||
the corresponding VSP. It allocates guest memory for the channel
|
||||
ring buffers and shares the ring buffer with the Hyper-V host by
|
||||
giving the host a list of GPAs for the ring buffer memory. See
|
||||
@ -285,7 +308,7 @@ Once the ring buffer is set up, the device driver and VSP exchange
|
||||
setup messages via the primary channel. These messages may include
|
||||
negotiating the device protocol version to be used between the Linux
|
||||
VSC and the VSP on the Hyper-V host. The setup messages may also
|
||||
include creating additional VMbus channels, which are somewhat
|
||||
include creating additional VMBus channels, which are somewhat
|
||||
mis-named as "sub-channels" since they are functionally
|
||||
equivalent to the primary channel once they are created.
|
||||
|
||||
|
33
MAINTAINERS
33
MAINTAINERS
@ -3980,7 +3980,7 @@ R: Song Liu <song@kernel.org>
|
||||
R: Yonghong Song <yonghong.song@linux.dev>
|
||||
R: John Fastabend <john.fastabend@gmail.com>
|
||||
R: KP Singh <kpsingh@kernel.org>
|
||||
R: Stanislav Fomichev <sdf@google.com>
|
||||
R: Stanislav Fomichev <sdf@fomichev.me>
|
||||
R: Hao Luo <haoluo@google.com>
|
||||
R: Jiri Olsa <jolsa@kernel.org>
|
||||
L: bpf@vger.kernel.org
|
||||
@ -5295,7 +5295,7 @@ F: drivers/infiniband/hw/usnic/
|
||||
|
||||
CLANG CONTROL FLOW INTEGRITY SUPPORT
|
||||
M: Sami Tolvanen <samitolvanen@google.com>
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Nathan Chancellor <nathan@kernel.org>
|
||||
L: llvm@lists.linux.dev
|
||||
S: Supported
|
||||
@ -8211,7 +8211,7 @@ F: rust/kernel/net/phy.rs
|
||||
|
||||
EXEC & BINFMT API, ELF
|
||||
R: Eric Biederman <ebiederm@xmission.com>
|
||||
R: Kees Cook <keescook@chromium.org>
|
||||
R: Kees Cook <kees@kernel.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
|
||||
@ -8612,7 +8612,7 @@ S: Maintained
|
||||
F: drivers/net/ethernet/nvidia/*
|
||||
|
||||
FORTIFY_SOURCE
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
@ -9102,7 +9102,7 @@ F: include/linux/mfd/gsc.h
|
||||
F: include/linux/platform_data/gsc_hwmon.h
|
||||
|
||||
GCC PLUGINS
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
@ -9236,7 +9236,7 @@ S: Maintained
|
||||
F: drivers/input/touchscreen/resistive-adc-touch.c
|
||||
|
||||
GENERIC STRING LIBRARY
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Andy Shevchenko <andy@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
@ -11034,8 +11034,8 @@ F: include/uapi/drm/i915_drm.h
|
||||
|
||||
INTEL DRM XE DRIVER (Lunar Lake and newer)
|
||||
M: Lucas De Marchi <lucas.demarchi@intel.com>
|
||||
M: Oded Gabbay <ogabbay@kernel.org>
|
||||
M: Thomas Hellström <thomas.hellstrom@linux.intel.com>
|
||||
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
|
||||
L: intel-xe@lists.freedesktop.org
|
||||
S: Supported
|
||||
W: https://drm.pages.freedesktop.org/intel-docs/
|
||||
@ -11950,7 +11950,7 @@ F: scripts/package/
|
||||
F: usr/
|
||||
|
||||
KERNEL HARDENING (not covered by other areas)
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Gustavo A. R. Silva <gustavoars@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Supported
|
||||
@ -12479,7 +12479,7 @@ F: drivers/scsi/53c700*
|
||||
|
||||
LEAKING_ADDRESSES
|
||||
M: Tycho Andersen <tycho@tycho.pizza>
|
||||
R: Kees Cook <keescook@chromium.org>
|
||||
R: Kees Cook <kees@kernel.org>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
@ -12775,7 +12775,7 @@ F: arch/powerpc/platforms/8xx/
|
||||
F: arch/powerpc/platforms/83xx/
|
||||
|
||||
LINUX KERNEL DUMP TEST MODULE (LKDTM)
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
S: Maintained
|
||||
F: drivers/misc/lkdtm/*
|
||||
F: tools/testing/selftests/lkdtm/*
|
||||
@ -12905,7 +12905,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
|
||||
F: drivers/media/usb/dvb-usb-v2/lmedm04*
|
||||
|
||||
LOADPIN SECURITY MODULE
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
F: Documentation/admin-guide/LSM/LoadPin.rst
|
||||
@ -17997,7 +17997,7 @@ F: tools/testing/selftests/proc/
|
||||
|
||||
PROC SYSCTL
|
||||
M: Luis Chamberlain <mcgrof@kernel.org>
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
M: Joel Granados <j.granados@samsung.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
@ -18053,7 +18053,7 @@ F: Documentation/devicetree/bindings/net/pse-pd/
|
||||
F: drivers/net/pse-pd/
|
||||
|
||||
PSTORE FILESYSTEM
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Tony Luck <tony.luck@intel.com>
|
||||
R: Guilherme G. Piccoli <gpiccoli@igalia.com>
|
||||
L: linux-hardening@vger.kernel.org
|
||||
@ -20059,7 +20059,7 @@ F: drivers/media/cec/platform/seco/seco-cec.c
|
||||
F: drivers/media/cec/platform/seco/seco-cec.h
|
||||
|
||||
SECURE COMPUTING
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Andy Lutomirski <luto@amacapital.net>
|
||||
R: Will Drewry <wad@chromium.org>
|
||||
S: Supported
|
||||
@ -22973,7 +22973,7 @@ F: drivers/block/ublk_drv.c
|
||||
F: include/uapi/linux/ublk_cmd.h
|
||||
|
||||
UBSAN
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
R: Marco Elver <elver@google.com>
|
||||
R: Andrey Konovalov <andreyknvl@gmail.com>
|
||||
R: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
||||
@ -23975,7 +23975,6 @@ VMALLOC
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
R: Uladzislau Rezki <urezki@gmail.com>
|
||||
R: Christoph Hellwig <hch@infradead.org>
|
||||
R: Lorenzo Stoakes <lstoakes@gmail.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
@ -24811,7 +24810,7 @@ F: drivers/net/hamradio/yam*
|
||||
F: include/linux/yam.h
|
||||
|
||||
YAMA SECURITY MODULE
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
M: Kees Cook <kees@kernel.org>
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
|
||||
F: Documentation/admin-guide/LSM/Yama.rst
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
void efi_init(void);
|
||||
@ -25,6 +26,18 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, boo
|
||||
#define arch_efi_call_virt_setup() efi_virtmap_load()
|
||||
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
|
||||
|
||||
#ifdef CONFIG_CPU_TTBR0_PAN
|
||||
#undef arch_efi_call_virt
|
||||
#define arch_efi_call_virt(p, f, args...) ({ \
|
||||
unsigned int flags = uaccess_save_and_enable(); \
|
||||
efi_status_t res = _Generic((p)->f(args), \
|
||||
efi_status_t: (p)->f(args), \
|
||||
default: ((p)->f(args), EFI_ABORTED)); \
|
||||
uaccess_restore(flags); \
|
||||
res; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define ARCH_EFI_IRQ_FLAGS_MASK \
|
||||
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
|
||||
PSR_T_BIT | MODE_MASK)
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
@ -213,6 +214,7 @@ l: if (!p) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kmemleak_not_leak(p);
|
||||
efi_rt_stack_top = p + THREAD_SIZE;
|
||||
return 0;
|
||||
}
|
||||
|
@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
|
||||
* RAC flush causes kernel panics on BCM6358 when booting from TP1
|
||||
* because the bootloader is not initializing it properly.
|
||||
*/
|
||||
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
|
||||
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
|
||||
!!BMIPS_GET_CBR();
|
||||
}
|
||||
|
||||
static void bcm6368_quirks(void)
|
||||
|
@ -112,8 +112,8 @@ retry:
|
||||
* gives them time to settle
|
||||
*/
|
||||
if (where == PCI_VENDOR_ID) {
|
||||
if (ret == 0xffffffff || ret == 0x00000000 ||
|
||||
ret == 0x0000ffff || ret == 0xffff0000) {
|
||||
if (*val == 0xffffffff || *val == 0x00000000 ||
|
||||
*val == 0x0000ffff || *val == 0xffff0000) {
|
||||
if (delay > 4)
|
||||
return 0;
|
||||
delay *= 2;
|
||||
|
@ -31,18 +31,17 @@ void flush_cache_all_local(void);
|
||||
void flush_cache_all(void);
|
||||
void flush_cache_mm(struct mm_struct *mm);
|
||||
|
||||
void flush_kernel_dcache_page_addr(const void *addr);
|
||||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
|
||||
/* The only way to flush a vmap range is to flush whole cache */
|
||||
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
|
||||
void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
void flush_cache_vmap(unsigned long start, unsigned long end);
|
||||
#define flush_cache_vmap_early(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
void flush_cache_vunmap(unsigned long start, unsigned long end);
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_folio flush_dcache_folio
|
||||
@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
/* defined in pacache.S exported in cache.c used by flush_anon_page */
|
||||
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ON_KUNMAP
|
||||
static inline void kunmap_flush_on_unmap(const void *addr)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
}
|
||||
void kunmap_flush_on_unmap(const void *addr);
|
||||
|
||||
#endif /* _PARISC_CACHEFLUSH_H */
|
||||
|
||||
|
@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t ptep_get(pte_t *ptep)
|
||||
{
|
||||
return READ_ONCE(*ptep);
|
||||
}
|
||||
#define ptep_get ptep_get
|
||||
|
||||
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
|
||||
if (!pte_young(*ptep))
|
||||
return 0;
|
||||
|
||||
pte = *ptep;
|
||||
pte = ptep_get(ptep);
|
||||
if (!pte_young(pte)) {
|
||||
return 0;
|
||||
}
|
||||
@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
|
||||
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
|
||||
|
||||
struct mm_struct;
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t old_pte;
|
||||
|
||||
old_pte = *ptep;
|
||||
set_pte(ptep, __pte(0));
|
||||
|
||||
return old_pte;
|
||||
}
|
||||
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
set_pte(ptep, pte_wrprotect(*ptep));
|
||||
@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
||||
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/pdc.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -31,20 +32,31 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cachectl.h>
|
||||
|
||||
#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
|
||||
* of page flushes done flush_cache_page_if_present. There are some
|
||||
* pros and cons in using this option. It may increase the risk of
|
||||
* random segmentation faults.
|
||||
*/
|
||||
#define CONFIG_FLUSH_PAGE_ACCESSED 0
|
||||
|
||||
int split_tlb __ro_after_init;
|
||||
int dcache_stride __ro_after_init;
|
||||
int icache_stride __ro_after_init;
|
||||
EXPORT_SYMBOL(dcache_stride);
|
||||
|
||||
/* Internal implementation in arch/parisc/kernel/pacache.S */
|
||||
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
EXPORT_SYMBOL(flush_dcache_page_asm);
|
||||
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
|
||||
/* Internal implementation in arch/parisc/kernel/pacache.S */
|
||||
void flush_data_cache_local(void *); /* flushes local data-cache only */
|
||||
void flush_instruction_cache_local(void); /* flushes local code-cache only */
|
||||
|
||||
static void flush_kernel_dcache_page_addr(const void *addr);
|
||||
|
||||
/* On some machines (i.e., ones with the Merced bus), there can be
|
||||
* only a single PxTLB broadcast at a time; this must be guaranteed
|
||||
* by software. We need a spinlock around all TLB flushes to ensure
|
||||
@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
{
|
||||
if (!static_branch_likely(&parisc_has_cache))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The TLB is the engine of coherence on parisc. The CPU is
|
||||
* entitled to speculate any page with a TLB mapping, so here
|
||||
* we kill the mapping then flush the page along a special flush
|
||||
* only alias mapping. This guarantees that the page is no-longer
|
||||
* in the cache for any process and nor may it be speculatively
|
||||
* read in (until the user or kernel specifically accesses it,
|
||||
* of course).
|
||||
*/
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(physaddr, vmaddr);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
static void flush_kernel_dcache_page_addr(const void *addr)
|
||||
{
|
||||
unsigned long flags, space, pgd, prot;
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
unsigned long pgd_lock;
|
||||
#endif
|
||||
unsigned long vaddr = (unsigned long)addr;
|
||||
unsigned long flags;
|
||||
|
||||
vmaddr &= PAGE_MASK;
|
||||
/* Purge TLB entry to remove translation on all CPUs */
|
||||
purge_tlb_start(flags);
|
||||
pdtlb(SR_KERNEL, addr);
|
||||
purge_tlb_end(flags);
|
||||
|
||||
/* Use tmpalias flush to prevent data cache move-in */
|
||||
preempt_disable();
|
||||
|
||||
/* Set context for flush */
|
||||
local_irq_save(flags);
|
||||
prot = mfctl(8);
|
||||
space = mfsp(SR_USER);
|
||||
pgd = mfctl(25);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
pgd_lock = mfctl(28);
|
||||
#endif
|
||||
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
|
||||
local_irq_restore(flags);
|
||||
|
||||
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
|
||||
/* Restore previous context */
|
||||
local_irq_save(flags);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
mtctl(pgd_lock, 28);
|
||||
#endif
|
||||
mtctl(pgd, 25);
|
||||
mtsp(space, SR_USER);
|
||||
mtctl(prot, 8);
|
||||
local_irq_restore(flags);
|
||||
|
||||
flush_dcache_page_asm(__pa(vaddr), vaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void flush_kernel_icache_page_addr(const void *addr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)addr;
|
||||
unsigned long flags;
|
||||
|
||||
/* Purge TLB entry to remove translation on all CPUs */
|
||||
purge_tlb_start(flags);
|
||||
pdtlb(SR_KERNEL, addr);
|
||||
purge_tlb_end(flags);
|
||||
|
||||
/* Use tmpalias flush to prevent instruction cache move-in */
|
||||
preempt_disable();
|
||||
flush_icache_page_asm(__pa(vaddr), vaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void kunmap_flush_on_unmap(const void *addr)
|
||||
{
|
||||
flush_kernel_dcache_page_addr(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(kunmap_flush_on_unmap);
|
||||
|
||||
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned int nr)
|
||||
{
|
||||
@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
|
||||
|
||||
for (;;) {
|
||||
flush_kernel_dcache_page_addr(kaddr);
|
||||
flush_kernel_icache_page(kaddr);
|
||||
flush_kernel_icache_page_addr(kaddr);
|
||||
if (--nr == 0)
|
||||
break;
|
||||
kaddr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk page directory for MM to find PTEP pointer for address ADDR.
|
||||
*/
|
||||
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
pte_t *ptep = NULL;
|
||||
@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
|
||||
== (_PAGE_PRESENT | _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return user physical address. Returns 0 if page is not present.
|
||||
*/
|
||||
static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
unsigned long flags, space, pgd, prot, pa;
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
unsigned long pgd_lock;
|
||||
#endif
|
||||
|
||||
/* Save context */
|
||||
local_irq_save(flags);
|
||||
prot = mfctl(8);
|
||||
space = mfsp(SR_USER);
|
||||
pgd = mfctl(25);
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
pgd_lock = mfctl(28);
|
||||
#endif
|
||||
|
||||
/* Set context for lpa_user */
|
||||
switch_mm_irqs_off(NULL, mm, NULL);
|
||||
pa = lpa_user(addr);
|
||||
|
||||
/* Restore previous context */
|
||||
#ifdef CONFIG_TLB_PTLOCK
|
||||
mtctl(pgd_lock, 28);
|
||||
#endif
|
||||
mtctl(pgd, 25);
|
||||
mtsp(space, SR_USER);
|
||||
mtctl(prot, 8);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return pa;
|
||||
}
|
||||
|
||||
void flush_dcache_folio(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = folio_flush_mapping(folio);
|
||||
@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
|
||||
if (addr + nr * PAGE_SIZE > vma->vm_end)
|
||||
nr = (vma->vm_end - addr) / PAGE_SIZE;
|
||||
|
||||
if (parisc_requires_coherency()) {
|
||||
for (i = 0; i < nr; i++) {
|
||||
pte_t *ptep = get_ptep(vma->vm_mm,
|
||||
addr + i * PAGE_SIZE);
|
||||
if (!ptep)
|
||||
continue;
|
||||
if (pte_needs_flush(*ptep))
|
||||
flush_user_cache_page(vma,
|
||||
addr + i * PAGE_SIZE);
|
||||
/* Optimise accesses to the same table? */
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* The TLB is the engine of coherence on parisc:
|
||||
* The CPU is entitled to speculate any page
|
||||
* with a TLB mapping, so here we kill the
|
||||
* mapping then flush the page along a special
|
||||
* flush only alias mapping. This guarantees that
|
||||
* the page is no-longer in the cache for any
|
||||
* process and nor may it be speculatively read
|
||||
* in (until the user or kernel specifically
|
||||
* accesses it, of course)
|
||||
*/
|
||||
for (i = 0; i < nr; i++)
|
||||
flush_tlb_page(vma, addr + i * PAGE_SIZE);
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
|
||||
!= (addr & (SHM_COLOUR - 1))) {
|
||||
for (i = 0; i < nr; i++)
|
||||
__flush_cache_page(vma,
|
||||
addr + i * PAGE_SIZE,
|
||||
(pfn + i) * PAGE_SIZE);
|
||||
/*
|
||||
* Software is allowed to have any number
|
||||
* of private mappings to a page.
|
||||
*/
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
continue;
|
||||
if (old_addr)
|
||||
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
|
||||
old_addr, addr, vma->vm_file);
|
||||
if (nr == folio_nr_pages(folio))
|
||||
old_addr = addr;
|
||||
}
|
||||
for (i = 0; i < nr; i++)
|
||||
__flush_cache_page(vma,
|
||||
addr + i * PAGE_SIZE,
|
||||
(pfn + i) * PAGE_SIZE);
|
||||
/*
|
||||
* Software is allowed to have any number
|
||||
* of private mappings to a page.
|
||||
*/
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
continue;
|
||||
if (old_addr)
|
||||
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
|
||||
old_addr, addr, vma->vm_file);
|
||||
if (nr == folio_nr_pages(folio))
|
||||
old_addr = addr;
|
||||
}
|
||||
WARN_ON(++count == 4096);
|
||||
}
|
||||
@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
|
||||
extern void clear_user_page_asm(void *, unsigned long);
|
||||
extern void copy_user_page_asm(void *, void *, unsigned long);
|
||||
|
||||
void flush_kernel_dcache_page_addr(const void *addr)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flush_kernel_dcache_page_asm(addr);
|
||||
purge_tlb_start(flags);
|
||||
pdtlb(SR_KERNEL, addr);
|
||||
purge_tlb_end(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
|
||||
|
||||
static void flush_cache_page_if_present(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr, unsigned long pfn)
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
#if CONFIG_FLUSH_PAGE_ACCESSED
|
||||
bool needs_flush = false;
|
||||
pte_t *ptep;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
/*
|
||||
* The pte check is racy and sometimes the flush will trigger
|
||||
* a non-access TLB miss. Hopefully, the page has already been
|
||||
* flushed.
|
||||
*/
|
||||
ptep = get_ptep(vma->vm_mm, vmaddr);
|
||||
if (ptep) {
|
||||
needs_flush = pte_needs_flush(*ptep);
|
||||
pte = ptep_get(ptep);
|
||||
needs_flush = pte_needs_flush(pte);
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
if (needs_flush)
|
||||
flush_cache_page(vma, vmaddr, pfn);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
|
||||
#else
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long physaddr = get_upa(mm, vmaddr);
|
||||
|
||||
if (physaddr)
|
||||
__flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
|
||||
#endif
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
|
||||
kfrom = kmap_local_page(from);
|
||||
kto = kmap_local_page(to);
|
||||
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
|
||||
__flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
|
||||
copy_page_asm(kto, kfrom);
|
||||
kunmap_local(kto);
|
||||
kunmap_local(kfrom);
|
||||
@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
|
||||
memcpy(dst, src, len);
|
||||
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
|
||||
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long user_vaddr, void *dst, void *src, int len)
|
||||
{
|
||||
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
|
||||
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
|
||||
memcpy(dst, src, len);
|
||||
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
|
||||
}
|
||||
|
||||
/* __flush_tlb_range()
|
||||
@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
|
||||
|
||||
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr, pfn;
|
||||
pte_t *ptep;
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
bool needs_flush = false;
|
||||
/*
|
||||
* The vma can contain pages that aren't present. Although
|
||||
* the pte search is expensive, we need the pte to find the
|
||||
* page pfn and to check whether the page should be flushed.
|
||||
*/
|
||||
ptep = get_ptep(vma->vm_mm, addr);
|
||||
if (ptep) {
|
||||
needs_flush = pte_needs_flush(*ptep);
|
||||
pfn = pte_pfn(*ptep);
|
||||
pte_unmap(ptep);
|
||||
}
|
||||
if (needs_flush) {
|
||||
if (parisc_requires_coherency()) {
|
||||
flush_user_cache_page(vma, addr);
|
||||
} else {
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
}
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE)
|
||||
flush_cache_page_if_present(vma, addr);
|
||||
}
|
||||
|
||||
static inline unsigned long mm_total_size(struct mm_struct *mm)
|
||||
@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
|
||||
return;
|
||||
flush_tlb_range(vma, start, end);
|
||||
flush_cache_all();
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_cache_all();
|
||||
else
|
||||
flush_data_cache();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_cache_pages(vma, start, end);
|
||||
flush_cache_pages(vma, start & PAGE_MASK, end);
|
||||
}
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
if (WARN_ON(!pfn_valid(pfn)))
|
||||
return;
|
||||
if (parisc_requires_coherency())
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
else
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
||||
@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
|
||||
if (!PageAnon(page))
|
||||
return;
|
||||
|
||||
if (parisc_requires_coherency()) {
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_user_cache_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
|
||||
}
|
||||
|
||||
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
pte_t pte = ptep_get(ptep);
|
||||
|
||||
if (!pte_young(pte))
|
||||
return 0;
|
||||
set_pte(ptep, pte_mkold(pte));
|
||||
#if CONFIG_FLUSH_PAGE_ACCESSED
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* After a PTE is cleared, we have no way to flush the cache for
|
||||
* the physical page. On PA8800 and PA8900 processors, these lines
|
||||
* can cause random cache corruption. Thus, we must flush the cache
|
||||
* as well as the TLB when clearing a PTE that's valid.
|
||||
*/
|
||||
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
struct mm_struct *mm = (vma)->vm_mm;
|
||||
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
unsigned long pfn = pte_pfn(pte);
|
||||
|
||||
if (pfn_valid(pfn))
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
else if (pte_accessible(mm, pte))
|
||||
flush_tlb_page(vma, addr);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
/*
|
||||
* The physical address for pages in the ioremap case can be obtained
|
||||
* from the vm_struct struct. I wasn't able to successfully handle the
|
||||
* vmalloc and vmap cases. We have an array of struct page pointers in
|
||||
* the uninitialized vmalloc case but the flush failed using page_to_pfn.
|
||||
*/
|
||||
void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr, physaddr;
|
||||
struct vm_struct *vm;
|
||||
|
||||
/* Prevent cache move-in */
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
if (end - start >= parisc_cache_flush_threshold) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(page_to_phys(page), vmaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
vm = find_vm_area((void *)start);
|
||||
if (WARN_ON_ONCE(!vm)) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
/* The physical addresses of IOREMAP regions are contiguous */
|
||||
if (vm->flags & VM_IOREMAP) {
|
||||
physaddr = vm->phys_addr;
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
preempt_disable();
|
||||
flush_dcache_page_asm(physaddr, start);
|
||||
flush_icache_page_asm(physaddr, start);
|
||||
preempt_enable();
|
||||
physaddr += PAGE_SIZE;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
flush_cache_all();
|
||||
}
|
||||
EXPORT_SYMBOL(flush_cache_vmap);
|
||||
|
||||
/*
|
||||
* The vm_struct has been retired and the page table is set up. The
|
||||
* last page in the range is a guard page. Its physical address can't
|
||||
* be determined using lpa, so there is no way to flush the range
|
||||
* using flush_dcache_page_asm.
|
||||
*/
|
||||
void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
{
|
||||
/* Prevent cache move-in */
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
}
|
||||
EXPORT_SYMBOL(flush_cache_vunmap);
|
||||
|
||||
/*
|
||||
* On systems with PA8800/PA8900 processors, there is no way to flush
|
||||
* a vmap range other than using the architected loop to flush the
|
||||
* entire cache. The page directory is not set up, so we can't use
|
||||
* fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
|
||||
* L2 is physically indexed but FDCE/FICE instructions in virtual
|
||||
* mode output their virtual address on the core bus, not their
|
||||
* real address. As a result, the L2 cache index formed from the
|
||||
* virtual address will most likely not be the same as the L2 index
|
||||
* formed from the real address.
|
||||
*/
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
unsigned long end = start + size;
|
||||
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
(unsigned long)size >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
if (!static_branch_likely(&parisc_has_dcache))
|
||||
return;
|
||||
|
||||
/* If interrupts are disabled, we can only do local flush */
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
|
||||
flush_data_cache_local(NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
flush_kernel_dcache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
/* Ensure DMA is complete */
|
||||
asm_syncdma();
|
||||
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
(unsigned long)size >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
if (!static_branch_likely(&parisc_has_dcache))
|
||||
return;
|
||||
|
||||
/* If interrupts are disabled, we can only do local flush */
|
||||
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
|
||||
flush_data_cache_local(NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
purge_kernel_dcache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
||||
|
@ -384,7 +384,7 @@ static void fixup_vmlinux_info(void)
|
||||
void startup_kernel(void)
|
||||
{
|
||||
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
|
||||
unsigned long nokaslr_offset_phys = mem_safe_offset();
|
||||
unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
|
||||
unsigned long amode31_lma = 0;
|
||||
unsigned long max_physmem_end;
|
||||
unsigned long asce_limit;
|
||||
@ -393,6 +393,12 @@ void startup_kernel(void)
|
||||
|
||||
fixup_vmlinux_info();
|
||||
setup_lpp();
|
||||
|
||||
/*
|
||||
* Non-randomized kernel physical start address must be _SEGMENT_SIZE
|
||||
* aligned (see blow).
|
||||
*/
|
||||
nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
|
||||
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
|
||||
|
||||
/*
|
||||
@ -425,10 +431,25 @@ void startup_kernel(void)
|
||||
save_ipl_cert_comp_list();
|
||||
rescue_initrd(safe_addr, ident_map_size);
|
||||
|
||||
if (kaslr_enabled())
|
||||
__kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size);
|
||||
/*
|
||||
* __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
|
||||
* 20 bits (the offset within a large page) are zero. Copy the last
|
||||
* 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
|
||||
* __kaslr_offset_phys.
|
||||
*
|
||||
* With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
|
||||
* are identical, which is required to allow for large mappings of the
|
||||
* kernel image.
|
||||
*/
|
||||
kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
|
||||
if (kaslr_enabled()) {
|
||||
unsigned long end = ident_map_size - kaslr_large_page_offset;
|
||||
|
||||
__kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
|
||||
}
|
||||
if (!__kaslr_offset_phys)
|
||||
__kaslr_offset_phys = nokaslr_offset_phys;
|
||||
__kaslr_offset_phys |= kaslr_large_page_offset;
|
||||
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
|
||||
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
|
||||
deploy_kernel((void *)__kaslr_offset_phys);
|
||||
|
@ -261,21 +261,27 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
|
||||
|
||||
static bool large_allowed(enum populate_mode mode)
|
||||
{
|
||||
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
|
||||
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
|
||||
}
|
||||
|
||||
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
unsigned long size = end - addr;
|
||||
|
||||
return machine.has_edat2 && large_allowed(mode) &&
|
||||
IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
|
||||
IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
|
||||
IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
|
||||
}
|
||||
|
||||
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
|
||||
enum populate_mode mode)
|
||||
{
|
||||
unsigned long size = end - addr;
|
||||
|
||||
return machine.has_edat1 && large_allowed(mode) &&
|
||||
IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
|
||||
IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
|
||||
IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
|
||||
}
|
||||
|
||||
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
|
@ -109,6 +109,7 @@ SECTIONS
|
||||
#ifdef CONFIG_KERNEL_UNCOMPRESSED
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. += AMODE31_SIZE; /* .amode31 section */
|
||||
. = ALIGN(1 << 20); /* _SEGMENT_SIZE */
|
||||
#else
|
||||
. = ALIGN(8);
|
||||
#endif
|
||||
|
@ -43,7 +43,6 @@ CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
@ -51,6 +50,7 @@ CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_CERT_STORE=y
|
||||
CONFIG_EXPOLINE=y
|
||||
# CONFIG_EXPOLINE_EXTERN is not set
|
||||
CONFIG_EXPOLINE_AUTO=y
|
||||
CONFIG_CHSC_SCH=y
|
||||
CONFIG_VFIO_CCW=m
|
||||
@ -76,6 +76,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
@ -100,7 +101,6 @@ CONFIG_MEMORY_HOTPLUG=y
|
||||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CMA_DEBUG=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_CMA_SYSFS=y
|
||||
CONFIG_CMA_AREAS=7
|
||||
@ -119,6 +119,7 @@ CONFIG_UNIX_DIAG=m
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_SMC_LO=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
@ -133,7 +134,6 @@ CONFIG_IP_MROUTE=y
|
||||
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_PIMSM_V1=y
|
||||
CONFIG_IP_PIMSM_V2=y
|
||||
CONFIG_SYN_COOKIES=y
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
@ -167,6 +167,7 @@ CONFIG_BRIDGE_NETFILTER=m
|
||||
CONFIG_NETFILTER_NETLINK_HOOK=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_ZONES=y
|
||||
CONFIG_NF_CONNTRACK_PROCFS=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_TIMEOUT=y
|
||||
@ -183,17 +184,39 @@ CONFIG_NF_CONNTRACK_SIP=m
|
||||
CONFIG_NF_CONNTRACK_TFTP=m
|
||||
CONFIG_NF_CT_NETLINK=m
|
||||
CONFIG_NF_CT_NETLINK_TIMEOUT=m
|
||||
CONFIG_NF_CT_NETLINK_HELPER=m
|
||||
CONFIG_NETFILTER_NETLINK_GLUE_CT=y
|
||||
CONFIG_NF_TABLES=m
|
||||
CONFIG_NF_TABLES_INET=y
|
||||
CONFIG_NF_TABLES_NETDEV=y
|
||||
CONFIG_NFT_NUMGEN=m
|
||||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_FLOW_OFFLOAD=m
|
||||
CONFIG_NFT_CONNLIMIT=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_MASQ=m
|
||||
CONFIG_NFT_REDIR=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_TUNNEL=m
|
||||
CONFIG_NFT_QUEUE=m
|
||||
CONFIG_NFT_QUOTA=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
CONFIG_NFT_FIB_INET=m
|
||||
CONFIG_NETFILTER_XTABLES_COMPAT=y
|
||||
CONFIG_NFT_XFRM=m
|
||||
CONFIG_NFT_SOCKET=m
|
||||
CONFIG_NFT_OSF=m
|
||||
CONFIG_NFT_TPROXY=m
|
||||
CONFIG_NFT_SYNPROXY=m
|
||||
CONFIG_NFT_DUP_NETDEV=m
|
||||
CONFIG_NFT_FWD_NETDEV=m
|
||||
CONFIG_NFT_FIB_NETDEV=m
|
||||
CONFIG_NFT_REJECT_NETDEV=m
|
||||
CONFIG_NF_FLOW_TABLE_INET=m
|
||||
CONFIG_NF_FLOW_TABLE=m
|
||||
CONFIG_NF_FLOW_TABLE_PROCFS=y
|
||||
CONFIG_NETFILTER_XT_SET=m
|
||||
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
|
||||
@ -206,8 +229,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
||||
@ -216,6 +241,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CGROUP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
|
||||
@ -230,6 +256,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPVS=m
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
|
||||
@ -247,6 +274,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
|
||||
CONFIG_NETFILTER_XT_MATCH_REALM=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RECENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=m
|
||||
@ -302,7 +330,6 @@ CONFIG_IP_NF_TARGET_ECN=m
|
||||
CONFIG_IP_NF_TARGET_TTL=m
|
||||
CONFIG_IP_NF_RAW=m
|
||||
CONFIG_IP_NF_SECURITY=m
|
||||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
@ -373,7 +400,6 @@ CONFIG_NET_ACT_POLICE=m
|
||||
CONFIG_NET_ACT_GACT=m
|
||||
CONFIG_GACT_PROB=y
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_IPT=m
|
||||
CONFIG_NET_ACT_NAT=m
|
||||
CONFIG_NET_ACT_PEDIT=m
|
||||
CONFIG_NET_ACT_SIMP=m
|
||||
@ -462,6 +488,7 @@ CONFIG_DM_VERITY=m
|
||||
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
|
||||
CONFIG_DM_SWITCH=m
|
||||
CONFIG_DM_INTEGRITY=m
|
||||
CONFIG_DM_VDO=m
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_BONDING=m
|
||||
CONFIG_DUMMY=m
|
||||
@ -574,7 +601,6 @@ CONFIG_WATCHDOG=y
|
||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
CONFIG_SOFT_WATCHDOG=m
|
||||
CONFIG_DIAG288_WATCHDOG=m
|
||||
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
|
||||
CONFIG_FB=y
|
||||
# CONFIG_FB_DEVICE is not set
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
@ -645,7 +671,6 @@ CONFIG_MSDOS_FS=m
|
||||
CONFIG_VFAT_FS=m
|
||||
CONFIG_EXFAT_FS=m
|
||||
CONFIG_NTFS_FS=m
|
||||
CONFIG_NTFS_RW=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
@ -663,6 +688,7 @@ CONFIG_SQUASHFS_XZ=y
|
||||
CONFIG_SQUASHFS_ZSTD=y
|
||||
CONFIG_ROMFS_FS=m
|
||||
CONFIG_NFS_FS=m
|
||||
CONFIG_NFS_V2=m
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NFS_V4=m
|
||||
CONFIG_NFS_SWAP=y
|
||||
@ -879,6 +905,5 @@ CONFIG_RBTREE_TEST=y
|
||||
CONFIG_INTERVAL_TREE_TEST=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_STRING_SELFTEST=y
|
||||
CONFIG_TEST_BITOPS=m
|
||||
CONFIG_TEST_BPF=m
|
||||
|
@ -41,7 +41,6 @@ CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
@ -49,6 +48,7 @@ CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_CERT_STORE=y
|
||||
CONFIG_EXPOLINE=y
|
||||
# CONFIG_EXPOLINE_EXTERN is not set
|
||||
CONFIG_EXPOLINE_AUTO=y
|
||||
CONFIG_CHSC_SCH=y
|
||||
CONFIG_VFIO_CCW=m
|
||||
@ -71,6 +71,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
@ -110,6 +111,7 @@ CONFIG_UNIX_DIAG=m
|
||||
CONFIG_XFRM_USER=m
|
||||
CONFIG_NET_KEY=m
|
||||
CONFIG_SMC_DIAG=m
|
||||
CONFIG_SMC_LO=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
@ -124,7 +126,6 @@ CONFIG_IP_MROUTE=y
|
||||
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_PIMSM_V1=y
|
||||
CONFIG_IP_PIMSM_V2=y
|
||||
CONFIG_SYN_COOKIES=y
|
||||
CONFIG_NET_IPVTI=m
|
||||
CONFIG_INET_AH=m
|
||||
CONFIG_INET_ESP=m
|
||||
@ -158,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m
|
||||
CONFIG_NETFILTER_NETLINK_HOOK=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_ZONES=y
|
||||
CONFIG_NF_CONNTRACK_PROCFS=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_TIMEOUT=y
|
||||
@ -174,17 +176,39 @@ CONFIG_NF_CONNTRACK_SIP=m
|
||||
CONFIG_NF_CONNTRACK_TFTP=m
|
||||
CONFIG_NF_CT_NETLINK=m
|
||||
CONFIG_NF_CT_NETLINK_TIMEOUT=m
|
||||
CONFIG_NF_CT_NETLINK_HELPER=m
|
||||
CONFIG_NETFILTER_NETLINK_GLUE_CT=y
|
||||
CONFIG_NF_TABLES=m
|
||||
CONFIG_NF_TABLES_INET=y
|
||||
CONFIG_NF_TABLES_NETDEV=y
|
||||
CONFIG_NFT_NUMGEN=m
|
||||
CONFIG_NFT_CT=m
|
||||
CONFIG_NFT_FLOW_OFFLOAD=m
|
||||
CONFIG_NFT_CONNLIMIT=m
|
||||
CONFIG_NFT_LOG=m
|
||||
CONFIG_NFT_LIMIT=m
|
||||
CONFIG_NFT_MASQ=m
|
||||
CONFIG_NFT_REDIR=m
|
||||
CONFIG_NFT_NAT=m
|
||||
CONFIG_NFT_TUNNEL=m
|
||||
CONFIG_NFT_QUEUE=m
|
||||
CONFIG_NFT_QUOTA=m
|
||||
CONFIG_NFT_REJECT=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NFT_HASH=m
|
||||
CONFIG_NFT_FIB_INET=m
|
||||
CONFIG_NETFILTER_XTABLES_COMPAT=y
|
||||
CONFIG_NFT_XFRM=m
|
||||
CONFIG_NFT_SOCKET=m
|
||||
CONFIG_NFT_OSF=m
|
||||
CONFIG_NFT_TPROXY=m
|
||||
CONFIG_NFT_SYNPROXY=m
|
||||
CONFIG_NFT_DUP_NETDEV=m
|
||||
CONFIG_NFT_FWD_NETDEV=m
|
||||
CONFIG_NFT_FIB_NETDEV=m
|
||||
CONFIG_NFT_REJECT_NETDEV=m
|
||||
CONFIG_NF_FLOW_TABLE_INET=m
|
||||
CONFIG_NF_FLOW_TABLE=m
|
||||
CONFIG_NF_FLOW_TABLE_PROCFS=y
|
||||
CONFIG_NETFILTER_XT_SET=m
|
||||
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
|
||||
@ -197,8 +221,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NETMAP=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
||||
@ -207,6 +233,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CGROUP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
|
||||
@ -221,6 +248,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_IPVS=m
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
|
||||
@ -238,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
|
||||
CONFIG_NETFILTER_XT_MATCH_REALM=m
|
||||
CONFIG_NETFILTER_XT_MATCH_RECENT=m
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=m
|
||||
@ -293,7 +322,6 @@ CONFIG_IP_NF_TARGET_ECN=m
|
||||
CONFIG_IP_NF_TARGET_TTL=m
|
||||
CONFIG_IP_NF_RAW=m
|
||||
CONFIG_IP_NF_SECURITY=m
|
||||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
@ -363,7 +391,6 @@ CONFIG_NET_ACT_POLICE=m
|
||||
CONFIG_NET_ACT_GACT=m
|
||||
CONFIG_GACT_PROB=y
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_IPT=m
|
||||
CONFIG_NET_ACT_NAT=m
|
||||
CONFIG_NET_ACT_PEDIT=m
|
||||
CONFIG_NET_ACT_SIMP=m
|
||||
@ -452,6 +479,7 @@ CONFIG_DM_VERITY=m
|
||||
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
|
||||
CONFIG_DM_SWITCH=m
|
||||
CONFIG_DM_INTEGRITY=m
|
||||
CONFIG_DM_VDO=m
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_BONDING=m
|
||||
CONFIG_DUMMY=m
|
||||
@ -630,7 +658,6 @@ CONFIG_MSDOS_FS=m
|
||||
CONFIG_VFAT_FS=m
|
||||
CONFIG_EXFAT_FS=m
|
||||
CONFIG_NTFS_FS=m
|
||||
CONFIG_NTFS_RW=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
@ -649,6 +676,7 @@ CONFIG_SQUASHFS_XZ=y
|
||||
CONFIG_SQUASHFS_ZSTD=y
|
||||
CONFIG_ROMFS_FS=m
|
||||
CONFIG_NFS_FS=m
|
||||
CONFIG_NFS_V2=m
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NFS_V4=m
|
||||
CONFIG_NFS_SWAP=y
|
||||
|
@ -9,25 +9,22 @@ CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_HZ_100=y
|
||||
# CONFIG_CHSC_SCH is not set
|
||||
# CONFIG_SCM_BUS is not set
|
||||
# CONFIG_AP is not set
|
||||
# CONFIG_PFAULT is not set
|
||||
# CONFIG_S390_HYPFS is not set
|
||||
# CONFIG_VIRTUALIZATION is not set
|
||||
# CONFIG_S390_GUEST is not set
|
||||
# CONFIG_SECCOMP is not set
|
||||
# CONFIG_GCC_PLUGINS is not set
|
||||
# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_SWAP is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
# CONFIG_COMPACTION is not set
|
||||
# CONFIG_MIGRATION is not set
|
||||
CONFIG_NET=y
|
||||
# CONFIG_IUCV is not set
|
||||
# CONFIG_PCPU_DEV_REFCNT is not set
|
||||
|
@ -105,9 +105,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
|
||||
|
||||
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
|
||||
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
|
||||
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||
|
@ -401,7 +401,6 @@ extern int __init efi_memmap_alloc(unsigned int num_entries,
|
||||
struct efi_memory_map_data *data);
|
||||
extern void __efi_memmap_free(u64 phys, unsigned long size,
|
||||
unsigned long flags);
|
||||
#define __efi_memmap_free __efi_memmap_free
|
||||
|
||||
extern int __init efi_memmap_install(struct efi_memory_map_data *data);
|
||||
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
|
||||
|
@ -78,10 +78,10 @@ extern int __get_user_bad(void);
|
||||
int __ret_gu; \
|
||||
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
|
||||
__chk_user_ptr(ptr); \
|
||||
asm volatile("call __" #fn "_%c4" \
|
||||
asm volatile("call __" #fn "_%c[size]" \
|
||||
: "=a" (__ret_gu), "=r" (__val_gu), \
|
||||
ASM_CALL_CONSTRAINT \
|
||||
: "0" (ptr), "i" (sizeof(*(ptr)))); \
|
||||
: "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
|
||||
instrument_get_user(__val_gu); \
|
||||
(x) = (__force __typeof__(*(ptr))) __val_gu; \
|
||||
__builtin_expect(__ret_gu, 0); \
|
||||
|
@ -44,7 +44,11 @@
|
||||
or %rdx, %rax
|
||||
.else
|
||||
cmp $TASK_SIZE_MAX-\size+1, %eax
|
||||
.if \size != 8
|
||||
jae .Lbad_get_user
|
||||
.else
|
||||
jae .Lbad_get_user_8
|
||||
.endif
|
||||
sbb %edx, %edx /* array_index_mask_nospec() */
|
||||
and %edx, %eax
|
||||
.endif
|
||||
@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
|
||||
#ifdef CONFIG_X86_32
|
||||
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
|
||||
ASM_CLAC
|
||||
bad_get_user_8:
|
||||
.Lbad_get_user_8:
|
||||
xor %edx,%edx
|
||||
xor %ecx,%ecx
|
||||
mov $(-EFAULT),%_ASM_AX
|
||||
|
@ -92,12 +92,22 @@ int __init efi_memmap_alloc(unsigned int num_entries,
|
||||
*/
|
||||
int __init efi_memmap_install(struct efi_memory_map_data *data)
|
||||
{
|
||||
unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
|
||||
unsigned long flags = efi.memmap.flags;
|
||||
u64 phys = efi.memmap.phys_map;
|
||||
int ret;
|
||||
|
||||
efi_memmap_unmap();
|
||||
|
||||
if (efi_enabled(EFI_PARAVIRT))
|
||||
return 0;
|
||||
|
||||
return __efi_memmap_init(data);
|
||||
ret = __efi_memmap_init(data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
__efi_memmap_free(phys, size, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -144,16 +144,38 @@ void bio_integrity_free(struct bio *bio)
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
|
||||
if (bip->bip_flags & BIP_INTEGRITY_USER)
|
||||
return;
|
||||
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
|
||||
kfree(bvec_virt(bip->bip_vec));
|
||||
else if (bip->bip_flags & BIP_INTEGRITY_USER)
|
||||
bio_integrity_unmap_user(bip);
|
||||
|
||||
__bio_integrity_free(bs, bip);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_unmap_free_user - Unmap and free bio user integrity payload
|
||||
* @bio: bio containing bip to be unmapped and freed
|
||||
*
|
||||
* Description: Used to unmap and free the user mapped integrity portion of a
|
||||
* bio. Submitter attaching the user integrity buffer is responsible for
|
||||
* unmapping and freeing it during completion.
|
||||
*/
|
||||
void bio_integrity_unmap_free_user(struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
|
||||
if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER)))
|
||||
return;
|
||||
bio_integrity_unmap_user(bip);
|
||||
__bio_integrity_free(bs, bip);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_unmap_free_user);
|
||||
|
||||
/**
|
||||
* bio_integrity_add_page - Attach integrity metadata
|
||||
* @bio: bio to update
|
||||
|
@ -185,7 +185,7 @@ static void blk_flush_complete_seq(struct request *rq,
|
||||
/* queue for flush */
|
||||
if (list_empty(pending))
|
||||
fq->flush_pending_since = jiffies;
|
||||
list_move_tail(&rq->queuelist, pending);
|
||||
list_add_tail(&rq->queuelist, pending);
|
||||
break;
|
||||
|
||||
case REQ_FSEQ_DATA:
|
||||
@ -263,6 +263,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
|
||||
unsigned int seq = blk_flush_cur_seq(rq);
|
||||
|
||||
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_flush_complete_seq(rq, fq, seq, error);
|
||||
}
|
||||
|
||||
|
@ -1552,6 +1552,9 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
|
||||
|
||||
void disk_free_zone_resources(struct gendisk *disk)
|
||||
{
|
||||
if (!disk->zone_wplugs_pool)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&disk->zone_wplugs_work);
|
||||
|
||||
if (disk->zone_wplugs_wq) {
|
||||
|
@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
|
||||
&key_type_user, key_name, true);
|
||||
|
||||
if (IS_ERR(kref))
|
||||
ret = PTR_ERR(kref);
|
||||
return PTR_ERR(kref);
|
||||
|
||||
key = key_ref_to_ptr(kref);
|
||||
down_read(&key->sem);
|
||||
|
@ -191,6 +191,10 @@ void
|
||||
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
|
||||
acpi_adr_space_type space_id, u32 function);
|
||||
|
||||
void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
|
||||
acpi_adr_space_type space_id);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
|
||||
|
||||
|
@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[];
|
||||
|
||||
/* Local prototypes */
|
||||
|
||||
static void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
|
||||
acpi_adr_space_type space_id);
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_reg_run(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value);
|
||||
@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
void
|
||||
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
|
||||
acpi_adr_space_type space_id)
|
||||
{
|
||||
|
@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_execute_orphan_reg_method
|
||||
*
|
||||
* PARAMETERS: device - Handle for the device
|
||||
* space_id - The address space ID
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
|
||||
* device. This is a _REG method that has no corresponding region
|
||||
* within the device's scope.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
|
||||
{
|
||||
struct acpi_namespace_node *node;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
|
||||
|
||||
/* Parameter validation */
|
||||
|
||||
if (!device) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Convert and validate the device handle */
|
||||
|
||||
node = acpi_ns_validate_handle(device);
|
||||
if (node) {
|
||||
|
||||
/*
|
||||
* If an "orphan" _REG method is present in the device's scope
|
||||
* for the given address space ID, run it.
|
||||
*/
|
||||
|
||||
acpi_ev_execute_orphan_reg_method(node, space_id);
|
||||
} else {
|
||||
status = AE_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
|
||||
|
@ -1507,6 +1507,9 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
|
||||
|
||||
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
|
||||
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
|
||||
if (scope_handle != ec->handle)
|
||||
acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
|
||||
|
||||
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
|
||||
}
|
||||
|
||||
|
@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
|
||||
|
||||
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
|
||||
{
|
||||
int temp;
|
||||
|
||||
if (temp_deci_k == THERMAL_TEMP_INVALID)
|
||||
return THERMAL_TEMP_INVALID;
|
||||
|
||||
return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
|
||||
temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
|
||||
tz->kelvin_offset);
|
||||
if (temp <= 0)
|
||||
return THERMAL_TEMP_INVALID;
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
|
||||
|
@ -206,16 +206,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD systems from Renoir and Lucienne *require* that the NVME controller
|
||||
* AMD systems from Renoir onwards *require* that the NVME controller
|
||||
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
|
||||
*
|
||||
* This is "typically" accomplished using the `StorageD3Enable`
|
||||
* property in the _DSD that is checked via the `acpi_storage_d3` function
|
||||
* but this property was introduced after many of these systems launched
|
||||
* and most OEM systems don't have it in their BIOS.
|
||||
* but some OEM systems still don't have it in their BIOS.
|
||||
*
|
||||
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
|
||||
* a hardcoded allowlist for D3 support, which was used for these platforms.
|
||||
* a hardcoded allowlist for D3 support as well as a registry key to override
|
||||
* the BIOS, which has been used for these cases.
|
||||
*
|
||||
* This allows quirking on Linux in a similar fashion.
|
||||
*
|
||||
@ -228,19 +228,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
|
||||
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
|
||||
disk in the system.
|
||||
* disk in the system.
|
||||
* 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
|
||||
* NVME device.
|
||||
*/
|
||||
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
|
||||
{}
|
||||
};
|
||||
|
||||
bool force_storage_d3(void)
|
||||
{
|
||||
return x86_match_cpu(storage_d3_cpu_ids);
|
||||
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
|
||||
return false;
|
||||
return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1831,11 +1831,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
|
||||
2
|
||||
};
|
||||
|
||||
/* set scsi removable (RMB) bit per ata bit, or if the
|
||||
* AHCI port says it's external (Hotplug-capable, eSATA).
|
||||
/*
|
||||
* Set the SCSI Removable Media Bit (RMB) if the ATA removable media
|
||||
* device bit (obsolete since ATA-8 ACS) is set.
|
||||
*/
|
||||
if (ata_id_removable(args->id) ||
|
||||
(args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
|
||||
if (ata_id_removable(args->id))
|
||||
hdr[1] |= (1 << 7);
|
||||
|
||||
if (args->dev->class == ATA_DEV_ZAC) {
|
||||
|
@ -2739,8 +2739,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Synchronize with really_probe() */
|
||||
device_lock(dev);
|
||||
/* let the kset specific function add its keys */
|
||||
retval = kset->uevent_ops->uevent(&dev->kobj, env);
|
||||
device_unlock(dev);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
@ -2845,15 +2848,6 @@ static void devm_attr_group_remove(struct device *dev, void *res)
|
||||
sysfs_remove_group(&dev->kobj, group);
|
||||
}
|
||||
|
||||
static void devm_attr_groups_remove(struct device *dev, void *res)
|
||||
{
|
||||
union device_attr_group_devres *devres = res;
|
||||
const struct attribute_group **groups = devres->groups;
|
||||
|
||||
dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
|
||||
sysfs_remove_groups(&dev->kobj, groups);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_device_add_group - given a device, create a managed attribute group
|
||||
* @dev: The device to create the group for
|
||||
@ -2886,42 +2880,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_device_add_group);
|
||||
|
||||
/**
|
||||
* devm_device_add_groups - create a bunch of managed attribute groups
|
||||
* @dev: The device to create the group for
|
||||
* @groups: The attribute groups to create, NULL terminated
|
||||
*
|
||||
* This function creates a bunch of managed attribute groups. If an error
|
||||
* occurs when creating a group, all previously created groups will be
|
||||
* removed, unwinding everything back to the original state when this
|
||||
* function was called. It will explicitly warn and error if any of the
|
||||
* attribute files being created already exist.
|
||||
*
|
||||
* Returns 0 on success or error code from sysfs_create_group on failure.
|
||||
*/
|
||||
int devm_device_add_groups(struct device *dev,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
union device_attr_group_devres *devres;
|
||||
int error;
|
||||
|
||||
devres = devres_alloc(devm_attr_groups_remove,
|
||||
sizeof(*devres), GFP_KERNEL);
|
||||
if (!devres)
|
||||
return -ENOMEM;
|
||||
|
||||
error = sysfs_create_groups(&dev->kobj, groups);
|
||||
if (error) {
|
||||
devres_free(devres);
|
||||
return error;
|
||||
}
|
||||
|
||||
devres->groups = groups;
|
||||
devres_add(dev, devres);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_device_add_groups);
|
||||
|
||||
static int device_add_attrs(struct device *dev)
|
||||
{
|
||||
const struct class *class = dev->class;
|
||||
|
@ -302,6 +302,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void loop_clear_limits(struct loop_device *lo, int mode)
|
||||
{
|
||||
struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
|
||||
|
||||
if (mode & FALLOC_FL_ZERO_RANGE)
|
||||
lim.max_write_zeroes_sectors = 0;
|
||||
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
lim.max_hw_discard_sectors = 0;
|
||||
lim.discard_granularity = 0;
|
||||
}
|
||||
|
||||
queue_limits_commit_update(lo->lo_queue, &lim);
|
||||
}
|
||||
|
||||
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
|
||||
int mode)
|
||||
{
|
||||
@ -320,6 +335,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
|
||||
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
|
||||
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* We initially configure the limits in a hope that fallocate is
|
||||
* supported and clear them here if that turns out not to be true.
|
||||
*/
|
||||
if (unlikely(ret == -EOPNOTSUPP))
|
||||
loop_clear_limits(lo, mode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -589,10 +589,11 @@ static inline int was_interrupted(int result)
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
|
||||
* -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
|
||||
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
|
||||
* Returns BLK_STS_IOERR if sending failed.
|
||||
*/
|
||||
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
|
||||
int index)
|
||||
{
|
||||
struct request *req = blk_mq_rq_from_pdu(cmd);
|
||||
struct nbd_config *config = nbd->config;
|
||||
@ -614,13 +615,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
|
||||
type = req_to_nbd_cmd_type(req);
|
||||
if (type == U32_MAX)
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
if (rq_data_dir(req) == WRITE &&
|
||||
(config->flags & NBD_FLAG_READ_ONLY)) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Write on read-only\n");
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
if (req->cmd_flags & REQ_FUA)
|
||||
@ -674,11 +675,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
nsock->sent = sent;
|
||||
}
|
||||
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
|
||||
return (__force int)BLK_STS_RESOURCE;
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Send control failed (result %d)\n", result);
|
||||
return -EAGAIN;
|
||||
goto requeue;
|
||||
}
|
||||
send_pages:
|
||||
if (type != NBD_CMD_WRITE)
|
||||
@ -715,12 +716,12 @@ send_pages:
|
||||
nsock->pending = req;
|
||||
nsock->sent = sent;
|
||||
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
|
||||
return (__force int)BLK_STS_RESOURCE;
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
"Send data failed (result %d)\n",
|
||||
result);
|
||||
return -EAGAIN;
|
||||
goto requeue;
|
||||
}
|
||||
/*
|
||||
* The completion might already have come in,
|
||||
@ -737,7 +738,16 @@ out:
|
||||
trace_nbd_payload_sent(req, handle);
|
||||
nsock->pending = NULL;
|
||||
nsock->sent = 0;
|
||||
return 0;
|
||||
__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
|
||||
return BLK_STS_OK;
|
||||
|
||||
requeue:
|
||||
/* retry on a different socket */
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Request send failed, requeueing\n");
|
||||
nbd_mark_nsock_dead(nbd, nsock, 1);
|
||||
nbd_requeue_cmd(cmd);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
|
||||
@ -1018,7 +1028,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
struct nbd_device *nbd = cmd->nbd;
|
||||
struct nbd_config *config;
|
||||
struct nbd_sock *nsock;
|
||||
int ret;
|
||||
blk_status_t ret;
|
||||
|
||||
lockdep_assert_held(&cmd->lock);
|
||||
|
||||
@ -1072,28 +1082,11 @@ again:
|
||||
ret = BLK_STS_OK;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Some failures are related to the link going down, so anything that
|
||||
* returns EAGAIN can be retried on a different socket.
|
||||
*/
|
||||
ret = nbd_send_cmd(nbd, cmd, index);
|
||||
/*
|
||||
* Access to this flag is protected by cmd->lock, thus it's safe to set
|
||||
* the flag after nbd_send_cmd() succeed to send request to server.
|
||||
*/
|
||||
if (!ret)
|
||||
__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
|
||||
else if (ret == -EAGAIN) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Request send failed, requeueing\n");
|
||||
nbd_mark_nsock_dead(nbd, nsock, 1);
|
||||
nbd_requeue_cmd(cmd);
|
||||
ret = BLK_STS_OK;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
nbd_config_put(nbd);
|
||||
return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
@ -1302,12 +1302,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
|
||||
|
||||
no_turbo = !!clamp_t(int, input, 0, 1);
|
||||
|
||||
if (no_turbo == global.no_turbo)
|
||||
goto unlock_driver;
|
||||
|
||||
if (global.turbo_disabled) {
|
||||
pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
|
||||
if (global.turbo_disabled && !no_turbo) {
|
||||
pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
count = -EPERM;
|
||||
if (global.no_turbo)
|
||||
goto unlock_driver;
|
||||
else
|
||||
no_turbo = 1;
|
||||
}
|
||||
|
||||
if (no_turbo == global.no_turbo) {
|
||||
goto unlock_driver;
|
||||
}
|
||||
|
||||
@ -1762,7 +1767,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
|
||||
u32 vid;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
|
||||
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
vid_fp = cpudata->vid.min + mul_fp(
|
||||
@ -1927,7 +1932,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
|
||||
u64 val;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
|
||||
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
return val;
|
||||
|
@ -11,7 +11,7 @@ config FIREWIRE
|
||||
This is the new-generation IEEE 1394 (FireWire) driver stack
|
||||
a.k.a. Juju, a new implementation designed for robustness and
|
||||
simplicity.
|
||||
See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
|
||||
See http://ieee1394.docs.kernel.org/en/latest/migration.html
|
||||
for information about migration from the older Linux 1394 stack
|
||||
to the new driver stack.
|
||||
|
||||
|
@ -222,14 +222,14 @@ static int reset_bus(struct fw_card *card, bool short_reset)
|
||||
int reg = short_reset ? 5 : 1;
|
||||
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
|
||||
|
||||
trace_bus_reset_initiate(card->generation, short_reset);
|
||||
trace_bus_reset_initiate(card->index, card->generation, short_reset);
|
||||
|
||||
return card->driver->update_phy_reg(card, reg, 0, bit);
|
||||
}
|
||||
|
||||
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
|
||||
{
|
||||
trace_bus_reset_schedule(card->generation, short_reset);
|
||||
trace_bus_reset_schedule(card->index, card->generation, short_reset);
|
||||
|
||||
/* We don't try hard to sort out requests of long vs. short resets. */
|
||||
card->br_short = short_reset;
|
||||
@ -249,7 +249,7 @@ static void br_work(struct work_struct *work)
|
||||
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
|
||||
if (card->reset_jiffies != 0 &&
|
||||
time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
|
||||
trace_bus_reset_postpone(card->generation, card->br_short);
|
||||
trace_bus_reset_postpone(card->index, card->generation, card->br_short);
|
||||
|
||||
if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
|
||||
fw_card_put(card);
|
||||
|
@ -1559,7 +1559,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
|
||||
struct client *e_client = e->client;
|
||||
u32 rcode;
|
||||
|
||||
trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation,
|
||||
trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation,
|
||||
packet->timestamp);
|
||||
|
||||
switch (status) {
|
||||
@ -1659,8 +1659,8 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
|
||||
memcpy(pp->data, a->data, sizeof(a->data));
|
||||
}
|
||||
|
||||
trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1],
|
||||
e->p.header[2]);
|
||||
trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation,
|
||||
e->p.header[1], e->p.header[2]);
|
||||
|
||||
card->driver->send_request(card, &e->p);
|
||||
|
||||
|
@ -508,7 +508,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
||||
struct fw_node *local_node;
|
||||
unsigned long flags;
|
||||
|
||||
trace_bus_reset_handle(generation, node_id, bm_abdicate, self_ids, self_id_count);
|
||||
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
|
@ -174,8 +174,8 @@ static void transmit_complete_callback(struct fw_packet *packet,
|
||||
struct fw_transaction *t =
|
||||
container_of(packet, struct fw_transaction, packet);
|
||||
|
||||
trace_async_request_outbound_complete((uintptr_t)t, packet->generation, packet->speed,
|
||||
status, packet->timestamp);
|
||||
trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation,
|
||||
packet->speed, status, packet->timestamp);
|
||||
|
||||
switch (status) {
|
||||
case ACK_COMPLETE:
|
||||
@ -398,7 +398,8 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
|
||||
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
trace_async_request_outbound_initiate((uintptr_t)t, generation, speed, t->packet.header, payload,
|
||||
trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
|
||||
t->packet.header, payload,
|
||||
tcode_is_read_request(tcode) ? 0 : length / 4);
|
||||
|
||||
card->driver->send_request(card, &t->packet);
|
||||
@ -463,7 +464,7 @@ static DECLARE_COMPLETION(phy_config_done);
|
||||
static void transmit_phy_packet_callback(struct fw_packet *packet,
|
||||
struct fw_card *card, int status)
|
||||
{
|
||||
trace_async_phy_outbound_complete((uintptr_t)packet, packet->generation, status,
|
||||
trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status,
|
||||
packet->timestamp);
|
||||
complete(&phy_config_done);
|
||||
}
|
||||
@ -503,7 +504,7 @@ void fw_send_phy_config(struct fw_card *card,
|
||||
phy_config_packet.generation = generation;
|
||||
reinit_completion(&phy_config_done);
|
||||
|
||||
trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet,
|
||||
trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index,
|
||||
phy_config_packet.generation, phy_config_packet.header[1],
|
||||
phy_config_packet.header[2]);
|
||||
|
||||
@ -674,7 +675,7 @@ static void free_response_callback(struct fw_packet *packet,
|
||||
{
|
||||
struct fw_request *request = container_of(packet, struct fw_request, response);
|
||||
|
||||
trace_async_response_outbound_complete((uintptr_t)request, packet->generation,
|
||||
trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation,
|
||||
packet->speed, status, packet->timestamp);
|
||||
|
||||
// Decrease the reference count since not at in-flight.
|
||||
@ -879,9 +880,10 @@ void fw_send_response(struct fw_card *card,
|
||||
// Increase the reference count so that the object is kept during in-flight.
|
||||
fw_request_get(request);
|
||||
|
||||
trace_async_response_outbound_initiate((uintptr_t)request, request->response.generation,
|
||||
request->response.speed, request->response.header,
|
||||
data, data ? data_length / 4 : 0);
|
||||
trace_async_response_outbound_initiate((uintptr_t)request, card->index,
|
||||
request->response.generation, request->response.speed,
|
||||
request->response.header, data,
|
||||
data ? data_length / 4 : 0);
|
||||
|
||||
card->driver->send_response(card, &request->response);
|
||||
}
|
||||
@ -995,7 +997,7 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
|
||||
|
||||
tcode = async_header_get_tcode(p->header);
|
||||
if (tcode_is_link_internal(tcode)) {
|
||||
trace_async_phy_inbound((uintptr_t)p, p->generation, p->ack, p->timestamp,
|
||||
trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp,
|
||||
p->header[1], p->header[2]);
|
||||
fw_cdev_handle_phy_packet(card, p);
|
||||
return;
|
||||
@ -1007,8 +1009,8 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
|
||||
return;
|
||||
}
|
||||
|
||||
trace_async_request_inbound((uintptr_t)request, p->generation, p->speed, p->ack,
|
||||
p->timestamp, p->header, request->data,
|
||||
trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed,
|
||||
p->ack, p->timestamp, p->header, request->data,
|
||||
tcode_is_read_request(tcode) ? 0 : request->length / 4);
|
||||
|
||||
offset = async_header_get_offset(p->header);
|
||||
@ -1078,8 +1080,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
|
||||
}
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
trace_async_response_inbound((uintptr_t)t, p->generation, p->speed, p->ack, p->timestamp,
|
||||
p->header, data, data_length / 4);
|
||||
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
|
||||
p->timestamp, p->header, data, data_length / 4);
|
||||
|
||||
if (!t) {
|
||||
timed_out:
|
||||
|
@ -15,10 +15,6 @@
|
||||
#include <asm/early_ioremap.h>
|
||||
#include <asm/efi.h>
|
||||
|
||||
#ifndef __efi_memmap_free
|
||||
#define __efi_memmap_free(phys, size, flags) do { } while (0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __efi_memmap_init - Common code for mapping the EFI memory map
|
||||
* @data: EFI memory map data
|
||||
@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
|
||||
__efi_memmap_free(efi.memmap.phys_map,
|
||||
efi.memmap.desc_size * efi.memmap.nr_map,
|
||||
efi.memmap.flags);
|
||||
|
||||
map.phys_map = data->phys_map;
|
||||
map.nr_map = data->size / data->desc_size;
|
||||
map.map_end = map.map + data->size;
|
||||
|
@ -450,6 +450,7 @@ config DRM_PRIVACY_SCREEN
|
||||
config DRM_WERROR
|
||||
bool "Compile the drm subsystem with warnings as errors"
|
||||
depends on DRM && EXPERT
|
||||
depends on !WERROR
|
||||
default n
|
||||
help
|
||||
A kernel build should not cause any compiler warnings, and this
|
||||
|
@ -108,6 +108,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
*obj = NULL;
|
||||
flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
|
||||
|
||||
bp.size = size;
|
||||
bp.byte_align = alignment;
|
||||
|
@ -604,8 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
if (!amdgpu_bo_support_uswc(bo->flags))
|
||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
|
||||
bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
|
||||
|
||||
bo->tbo.bdev = &adev->mman.bdev;
|
||||
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
|
||||
AMDGPU_GEM_DOMAIN_GDS))
|
||||
|
@ -12,10 +12,8 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
@ -43,7 +41,6 @@ static int komeda_register_show(struct seq_file *sf, void *x)
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(komeda_register);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void komeda_debugfs_init(struct komeda_dev *mdev)
|
||||
{
|
||||
if (!debugfs_initialized())
|
||||
@ -55,7 +52,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
|
||||
debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root,
|
||||
&mdev->err_verbosity);
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t
|
||||
core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
@ -265,9 +261,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
|
||||
|
||||
mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
komeda_debugfs_init(mdev);
|
||||
#endif
|
||||
|
||||
return mdev;
|
||||
|
||||
@ -286,9 +280,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
|
||||
|
||||
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
debugfs_remove_recursive(mdev->debugfs_root);
|
||||
#endif
|
||||
|
||||
if (mdev->aclk)
|
||||
clk_prepare_enable(mdev->aclk);
|
||||
|
@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
|
||||
u32 avail_scalers;
|
||||
|
||||
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
|
||||
if (!pipe_st)
|
||||
if (IS_ERR_OR_NULL(pipe_st))
|
||||
return NULL;
|
||||
|
||||
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
|
||||
|
@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
|
||||
|
||||
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
|
||||
{
|
||||
struct drm_bridge **bridge = res;
|
||||
struct drm_bridge *bridge = *(struct drm_bridge **)res;
|
||||
|
||||
drm_panel_bridge_remove(*bridge);
|
||||
if (!bridge)
|
||||
return;
|
||||
|
||||
drm_bridge_remove(bridge);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* AYA NEO KUN */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1600x2560_rightside_up,
|
||||
}, { /* Chuwi HiBook (CWI514) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
|
||||
|
@ -285,7 +285,6 @@ struct platform_driver dp_driver = {
|
||||
.remove_new = exynos_dp_remove,
|
||||
.driver = {
|
||||
.name = "exynos-dp",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = pm_ptr(&exynos_dp_pm_ops),
|
||||
.of_match_table = exynos_dp_match,
|
||||
},
|
||||
|
@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
|
||||
struct vidi_context *ctx = ctx_from_connector(connector);
|
||||
struct edid *edid;
|
||||
int edid_len;
|
||||
int count;
|
||||
|
||||
/*
|
||||
* the edid data comes from user side and it would be set
|
||||
@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
|
||||
|
||||
drm_connector_update_edid_property(connector, edid);
|
||||
|
||||
return drm_add_edid_modes(connector, edid);
|
||||
count = drm_add_edid_modes(connector, edid);
|
||||
|
||||
kfree(edid);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
|
||||
|
@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
|
||||
int ret;
|
||||
|
||||
if (!hdata->ddc_adpt)
|
||||
return 0;
|
||||
goto no_edid;
|
||||
|
||||
edid = drm_get_edid(connector, hdata->ddc_adpt);
|
||||
if (!edid)
|
||||
return 0;
|
||||
goto no_edid;
|
||||
|
||||
hdata->dvi_mode = !connector->display_info.is_hdmi;
|
||||
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
|
||||
@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
|
||||
kfree(edid);
|
||||
|
||||
return ret;
|
||||
|
||||
no_edid:
|
||||
return drm_add_modes_noedid(connector, 640, 480);
|
||||
}
|
||||
|
||||
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
|
||||
|
@ -952,6 +952,13 @@ static void mtk_drm_remove(struct platform_device *pdev)
|
||||
of_node_put(private->comp_node[i]);
|
||||
}
|
||||
|
||||
static void mtk_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct mtk_drm_private *private = platform_get_drvdata(pdev);
|
||||
|
||||
drm_atomic_helper_shutdown(private->drm);
|
||||
}
|
||||
|
||||
static int mtk_drm_sys_prepare(struct device *dev)
|
||||
{
|
||||
struct mtk_drm_private *private = dev_get_drvdata(dev);
|
||||
@ -983,6 +990,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
|
||||
static struct platform_driver mtk_drm_platform_driver = {
|
||||
.probe = mtk_drm_probe,
|
||||
.remove_new = mtk_drm_remove,
|
||||
.shutdown = mtk_drm_shutdown,
|
||||
.driver = {
|
||||
.name = "mediatek-drm",
|
||||
.pm = &mtk_drm_pm_ops,
|
||||
|
@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
|
||||
if (nv_two_heads(dev))
|
||||
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
|
||||
|
||||
if (!runtime)
|
||||
if (!runtime && !drm->headless)
|
||||
cancel_work_sync(&drm->hpd_work);
|
||||
|
||||
if (!suspend)
|
||||
|
@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
|
||||
nv50_mstm_fini(nouveau_encoder(encoder));
|
||||
}
|
||||
|
||||
if (!runtime)
|
||||
if (!runtime && !drm->headless)
|
||||
cancel_work_sync(&drm->hpd_work);
|
||||
}
|
||||
|
||||
|
@ -43,11 +43,6 @@
|
||||
#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
|
||||
#define LOG_OLD_VALUE(x)
|
||||
|
||||
struct init_exec {
|
||||
bool execute;
|
||||
bool repeat;
|
||||
};
|
||||
|
||||
static bool nv_cksum(const uint8_t *data, unsigned int length)
|
||||
{
|
||||
/*
|
||||
|
@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
if (drm->headless)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&drm->hpd_lock);
|
||||
drm->hpd_pending = ~0;
|
||||
spin_unlock_irq(&drm->hpd_lock);
|
||||
@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
if (!runtime)
|
||||
if (!runtime && !drm->headless)
|
||||
cancel_work_sync(&drm->hpd_work);
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev)
|
||||
/* no display hw */
|
||||
if (ret == -ENODEV) {
|
||||
ret = 0;
|
||||
drm->headless = true;
|
||||
goto disp_create_err;
|
||||
}
|
||||
|
||||
|
@ -276,6 +276,7 @@ struct nouveau_drm {
|
||||
/* modesetting */
|
||||
struct nvbios vbios;
|
||||
struct nouveau_display *display;
|
||||
bool headless;
|
||||
struct work_struct hpd_work;
|
||||
spinlock_t hpd_lock;
|
||||
u32 hpd_pending;
|
||||
|
@ -171,6 +171,13 @@ static void shmob_drm_remove(struct platform_device *pdev)
|
||||
drm_kms_helper_poll_fini(ddev);
|
||||
}
|
||||
|
||||
static void shmob_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
|
||||
|
||||
drm_atomic_helper_shutdown(&sdev->ddev);
|
||||
}
|
||||
|
||||
static int shmob_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
|
||||
@ -273,6 +280,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
|
||||
static struct platform_driver shmob_drm_platform_driver = {
|
||||
.probe = shmob_drm_probe,
|
||||
.remove_new = shmob_drm_remove,
|
||||
.shutdown = shmob_drm_shutdown,
|
||||
.driver = {
|
||||
.name = "shmob-drm",
|
||||
.of_match_table = of_match_ptr(shmob_drm_of_table),
|
||||
|
@ -147,6 +147,13 @@ static const struct attribute *gt_idle_attrs[] = {
|
||||
static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
|
||||
{
|
||||
struct kobject *kobj = arg;
|
||||
struct xe_gt *gt = kobj_to_gt(kobj->parent);
|
||||
|
||||
if (gt_to_xe(gt)->info.skip_guc_pc) {
|
||||
XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
|
||||
xe_gt_idle_disable_c6(gt);
|
||||
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
||||
}
|
||||
|
||||
sysfs_remove_files(kobj, gt_idle_attrs);
|
||||
kobject_put(kobj);
|
||||
@ -199,7 +206,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
|
||||
void xe_gt_idle_disable_c6(struct xe_gt *gt)
|
||||
{
|
||||
xe_device_assert_mem_access(gt_to_xe(gt));
|
||||
xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
|
||||
|
||||
xe_mmio_write32(gt, PG_ENABLE, 0);
|
||||
xe_mmio_write32(gt, RC_CONTROL, 0);
|
||||
|
@ -1274,6 +1274,9 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
|
||||
struct xe_tile *tile;
|
||||
unsigned int tid;
|
||||
|
||||
xe_assert(xe, IS_DGFX(xe));
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
for_each_tile(tile, xe, tid) {
|
||||
lmtt = &tile->sriov.pf.lmtt;
|
||||
xe_lmtt_drop_pages(lmtt, vfid);
|
||||
@ -1292,6 +1295,9 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
|
||||
unsigned int tid;
|
||||
int err;
|
||||
|
||||
xe_assert(xe, IS_DGFX(xe));
|
||||
xe_assert(xe, IS_SRIOV_PF(xe));
|
||||
|
||||
total = 0;
|
||||
for_each_tile(tile, xe, tid)
|
||||
total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
|
||||
@ -1337,6 +1343,7 @@ fail:
|
||||
|
||||
static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
|
||||
{
|
||||
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
|
||||
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
|
||||
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
|
||||
|
||||
@ -1355,6 +1362,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
|
||||
int err;
|
||||
|
||||
xe_gt_assert(gt, vfid);
|
||||
xe_gt_assert(gt, IS_DGFX(xe));
|
||||
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
|
||||
|
||||
size = round_up(size, pf_get_lmem_alignment(gt));
|
||||
@ -1745,11 +1753,14 @@ static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *c
|
||||
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
|
||||
{
|
||||
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
if (!xe_gt_is_media_type(gt)) {
|
||||
pf_release_vf_config_ggtt(gt, config);
|
||||
pf_release_vf_config_lmem(gt, config);
|
||||
pf_update_vf_lmtt(gt_to_xe(gt), vfid);
|
||||
if (IS_DGFX(xe)) {
|
||||
pf_release_vf_config_lmem(gt, config);
|
||||
pf_update_vf_lmtt(xe, vfid);
|
||||
}
|
||||
}
|
||||
pf_release_config_ctxs(gt, config);
|
||||
pf_release_config_dbs(gt, config);
|
||||
|
@ -895,12 +895,6 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
|
||||
static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
|
||||
{
|
||||
struct xe_guc_pc *pc = arg;
|
||||
struct xe_device *xe = pc_to_xe(pc);
|
||||
|
||||
if (xe->info.skip_guc_pc) {
|
||||
xe_gt_idle_disable_c6(pc_to_gt(pc));
|
||||
return;
|
||||
}
|
||||
|
||||
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
|
||||
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
|
||||
|
@ -80,6 +80,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
|
||||
return i;
|
||||
}
|
||||
|
||||
static int emit_flush_dw(u32 *dw, int i)
|
||||
{
|
||||
dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
|
||||
dw[i++] = 0;
|
||||
dw[i++] = 0;
|
||||
dw[i++] = 0;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
|
||||
u32 *dw, int i)
|
||||
{
|
||||
@ -234,10 +244,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
|
||||
|
||||
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
|
||||
|
||||
if (job->user_fence.used)
|
||||
if (job->user_fence.used) {
|
||||
i = emit_flush_dw(dw, i);
|
||||
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
|
||||
job->user_fence.value,
|
||||
dw, i);
|
||||
}
|
||||
|
||||
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
|
||||
|
||||
@ -293,10 +305,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
|
||||
|
||||
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
|
||||
|
||||
if (job->user_fence.used)
|
||||
if (job->user_fence.used) {
|
||||
i = emit_flush_dw(dw, i);
|
||||
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
|
||||
job->user_fence.value,
|
||||
dw, i);
|
||||
}
|
||||
|
||||
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
|
||||
|
||||
|
@ -45,8 +45,8 @@ int hv_init(void)
|
||||
* This involves a hypercall.
|
||||
*/
|
||||
int hv_post_message(union hv_connection_id connection_id,
|
||||
enum hv_message_type message_type,
|
||||
void *payload, size_t payload_size)
|
||||
enum hv_message_type message_type,
|
||||
void *payload, size_t payload_size)
|
||||
{
|
||||
struct hv_input_post_message *aligned_msg;
|
||||
unsigned long flags;
|
||||
@ -86,7 +86,7 @@ int hv_post_message(union hv_connection_id connection_id,
|
||||
status = HV_STATUS_INVALID_PARAMETER;
|
||||
} else {
|
||||
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
|
||||
aligned_msg, NULL);
|
||||
aligned_msg, NULL);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
@ -111,7 +111,7 @@ int hv_synic_alloc(void)
|
||||
|
||||
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
|
||||
GFP_KERNEL);
|
||||
if (hv_context.hv_numa_map == NULL) {
|
||||
if (!hv_context.hv_numa_map) {
|
||||
pr_err("Unable to allocate NUMA map\n");
|
||||
goto err;
|
||||
}
|
||||
@ -120,11 +120,11 @@ int hv_synic_alloc(void)
|
||||
hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
tasklet_init(&hv_cpu->msg_dpc,
|
||||
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
|
||||
vmbus_on_msg_dpc, (unsigned long)hv_cpu);
|
||||
|
||||
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
|
||||
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->post_msg_page == NULL) {
|
||||
if (!hv_cpu->post_msg_page) {
|
||||
pr_err("Unable to allocate post msg page\n");
|
||||
goto err;
|
||||
}
|
||||
@ -147,14 +147,14 @@ int hv_synic_alloc(void)
|
||||
if (!ms_hyperv.paravisor_present && !hv_root_partition) {
|
||||
hv_cpu->synic_message_page =
|
||||
(void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->synic_message_page == NULL) {
|
||||
if (!hv_cpu->synic_message_page) {
|
||||
pr_err("Unable to allocate SYNIC message page\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
hv_cpu->synic_event_page =
|
||||
(void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->synic_event_page == NULL) {
|
||||
if (!hv_cpu->synic_event_page) {
|
||||
pr_err("Unable to allocate SYNIC event page\n");
|
||||
|
||||
free_page((unsigned long)hv_cpu->synic_message_page);
|
||||
@ -203,14 +203,13 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void hv_synic_free(void)
|
||||
{
|
||||
int cpu, ret;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
struct hv_per_cpu_context *hv_cpu =
|
||||
per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
/* It's better to leak the page if the encryption fails. */
|
||||
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
|
||||
@ -262,8 +261,8 @@ void hv_synic_free(void)
|
||||
*/
|
||||
void hv_synic_enable_regs(unsigned int cpu)
|
||||
{
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
struct hv_per_cpu_context *hv_cpu =
|
||||
per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
union hv_synic_simp simp;
|
||||
union hv_synic_siefp siefp;
|
||||
union hv_synic_sint shared_sint;
|
||||
@ -277,8 +276,8 @@ void hv_synic_enable_regs(unsigned int cpu)
|
||||
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
|
||||
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
|
||||
~ms_hyperv.shared_gpa_boundary;
|
||||
hv_cpu->synic_message_page
|
||||
= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
hv_cpu->synic_message_page =
|
||||
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
if (!hv_cpu->synic_message_page)
|
||||
pr_err("Fail to map synic message page.\n");
|
||||
} else {
|
||||
@ -296,8 +295,8 @@ void hv_synic_enable_regs(unsigned int cpu)
|
||||
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
|
||||
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
|
||||
~ms_hyperv.shared_gpa_boundary;
|
||||
hv_cpu->synic_event_page
|
||||
= (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
hv_cpu->synic_event_page =
|
||||
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
|
||||
if (!hv_cpu->synic_event_page)
|
||||
pr_err("Fail to map synic event page.\n");
|
||||
} else {
|
||||
@ -348,8 +347,8 @@ int hv_synic_init(unsigned int cpu)
|
||||
*/
|
||||
void hv_synic_disable_regs(unsigned int cpu)
|
||||
{
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
struct hv_per_cpu_context *hv_cpu =
|
||||
per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
union hv_synic_sint shared_sint;
|
||||
union hv_synic_simp simp;
|
||||
union hv_synic_siefp siefp;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/page_reporting.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <linux/hyperv.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
@ -41,8 +42,6 @@
|
||||
* Begin protocol definitions.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Protocol versions. The low word is the minor version, the high word the major
|
||||
* version.
|
||||
@ -71,8 +70,6 @@ enum {
|
||||
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
|
||||
};
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Message Types
|
||||
*/
|
||||
@ -101,7 +98,6 @@ enum dm_message_type {
|
||||
DM_VERSION_1_MAX = 12
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Structures defining the dynamic memory management
|
||||
* protocol.
|
||||
@ -115,7 +111,6 @@ union dm_version {
|
||||
__u32 version;
|
||||
} __packed;
|
||||
|
||||
|
||||
union dm_caps {
|
||||
struct {
|
||||
__u64 balloon:1;
|
||||
@ -148,8 +143,6 @@ union dm_mem_page_range {
|
||||
__u64 page_range;
|
||||
} __packed;
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The header for all dynamic memory messages:
|
||||
*
|
||||
@ -174,7 +167,6 @@ struct dm_message {
|
||||
__u8 data[]; /* enclosed message */
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Specific message types supporting the dynamic memory protocol.
|
||||
*/
|
||||
@ -271,7 +263,6 @@ struct dm_status {
|
||||
__u32 io_diff;
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Message to ask the guest to allocate memory - balloon up message.
|
||||
* This message is sent from the host to the guest. The guest may not be
|
||||
@ -286,14 +277,13 @@ struct dm_balloon {
|
||||
__u32 reservedz;
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Balloon response message; this message is sent from the guest
|
||||
* to the host in response to the balloon message.
|
||||
*
|
||||
* reservedz: Reserved; must be set to zero.
|
||||
* more_pages: If FALSE, this is the last message of the transaction.
|
||||
* if TRUE there will atleast one more message from the guest.
|
||||
* if TRUE there will be at least one more message from the guest.
|
||||
*
|
||||
* range_count: The number of ranges in the range array.
|
||||
*
|
||||
@ -314,7 +304,7 @@ struct dm_balloon_response {
|
||||
* to the guest to give guest more memory.
|
||||
*
|
||||
* more_pages: If FALSE, this is the last message of the transaction.
|
||||
* if TRUE there will atleast one more message from the guest.
|
||||
* if TRUE there will be at least one more message from the guest.
|
||||
*
|
||||
* reservedz: Reserved; must be set to zero.
|
||||
*
|
||||
@ -342,7 +332,6 @@ struct dm_unballoon_response {
|
||||
struct dm_header hdr;
|
||||
} __packed;
|
||||
|
||||
|
||||
/*
|
||||
* Hot add request message. Message sent from the host to the guest.
|
||||
*
|
||||
@ -390,7 +379,6 @@ enum dm_info_type {
|
||||
MAX_INFO_TYPE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Header for the information message.
|
||||
*/
|
||||
@ -425,11 +413,11 @@ struct dm_info_msg {
|
||||
* The range start_pfn : end_pfn specifies the range
|
||||
* that the host has asked us to hot add. The range
|
||||
* start_pfn : ha_end_pfn specifies the range that we have
|
||||
* currently hot added. We hot add in multiples of 128M
|
||||
* chunks; it is possible that we may not be able to bring
|
||||
* online all the pages in the region. The range
|
||||
* currently hot added. We hot add in chunks equal to the
|
||||
* memory block size; it is possible that we may not be able
|
||||
* to bring online all the pages in the region. The range
|
||||
* covered_start_pfn:covered_end_pfn defines the pages that can
|
||||
* be brough online.
|
||||
* be brought online.
|
||||
*/
|
||||
|
||||
struct hv_hotadd_state {
|
||||
@ -480,10 +468,10 @@ static unsigned long last_post_time;
|
||||
|
||||
static int hv_hypercall_multi_failure;
|
||||
|
||||
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
|
||||
module_param(hot_add, bool, 0644);
|
||||
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
|
||||
|
||||
module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
|
||||
module_param(pressure_report_delay, uint, 0644);
|
||||
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
|
||||
static atomic_t trans_id = ATOMIC_INIT(0);
|
||||
|
||||
@ -502,11 +490,13 @@ enum hv_dm_state {
|
||||
DM_INIT_ERROR
|
||||
};
|
||||
|
||||
|
||||
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
|
||||
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
|
||||
|
||||
static unsigned long ha_pages_in_chunk;
|
||||
#define HA_BYTES_IN_CHUNK (ha_pages_in_chunk << PAGE_SHIFT)
|
||||
|
||||
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
|
||||
#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
|
||||
|
||||
struct hv_dynmem_device {
|
||||
struct hv_device *dev;
|
||||
@ -595,12 +585,12 @@ static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
|
||||
struct hv_hotadd_gap *gap;
|
||||
|
||||
/* The page is not backed. */
|
||||
if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
|
||||
if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
|
||||
return false;
|
||||
|
||||
/* Check for gaps. */
|
||||
list_for_each_entry(gap, &has->gap_list, list) {
|
||||
if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
|
||||
if (pfn >= gap->start_pfn && pfn < gap->end_pfn)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -724,28 +714,21 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||
unsigned long processed_pfn;
|
||||
unsigned long total_pfn = pfn_count;
|
||||
|
||||
for (i = 0; i < (size/HA_CHUNK); i++) {
|
||||
start_pfn = start + (i * HA_CHUNK);
|
||||
for (i = 0; i < (size/ha_pages_in_chunk); i++) {
|
||||
start_pfn = start + (i * ha_pages_in_chunk);
|
||||
|
||||
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||
has->ha_end_pfn += HA_CHUNK;
|
||||
|
||||
if (total_pfn > HA_CHUNK) {
|
||||
processed_pfn = HA_CHUNK;
|
||||
total_pfn -= HA_CHUNK;
|
||||
} else {
|
||||
processed_pfn = total_pfn;
|
||||
total_pfn = 0;
|
||||
}
|
||||
|
||||
has->covered_end_pfn += processed_pfn;
|
||||
has->ha_end_pfn += ha_pages_in_chunk;
|
||||
processed_pfn = umin(total_pfn, ha_pages_in_chunk);
|
||||
total_pfn -= processed_pfn;
|
||||
has->covered_end_pfn += processed_pfn;
|
||||
}
|
||||
|
||||
reinit_completion(&dm_device.ol_waitevent);
|
||||
|
||||
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
|
||||
ret = add_memory(nid, PFN_PHYS((start_pfn)),
|
||||
(HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE);
|
||||
HA_BYTES_IN_CHUNK, MHP_MERGE_RESOURCE);
|
||||
|
||||
if (ret) {
|
||||
pr_err("hot_add memory failed error is %d\n", ret);
|
||||
@ -760,7 +743,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
|
||||
do_hot_add = false;
|
||||
}
|
||||
scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
|
||||
has->ha_end_pfn -= HA_CHUNK;
|
||||
has->ha_end_pfn -= ha_pages_in_chunk;
|
||||
has->covered_end_pfn -= processed_pfn;
|
||||
}
|
||||
break;
|
||||
@ -787,8 +770,8 @@ static void hv_online_page(struct page *pg, unsigned int order)
|
||||
guard(spinlock_irqsave)(&dm_device.ha_lock);
|
||||
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
||||
/* The page belongs to a different HAS. */
|
||||
if ((pfn < has->start_pfn) ||
|
||||
(pfn + (1UL << order) > has->end_pfn))
|
||||
if (pfn < has->start_pfn ||
|
||||
(pfn + (1UL << order) > has->end_pfn))
|
||||
continue;
|
||||
|
||||
hv_bring_pgs_online(has, pfn, 1UL << order);
|
||||
@ -800,7 +783,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||
{
|
||||
struct hv_hotadd_state *has;
|
||||
struct hv_hotadd_gap *gap;
|
||||
unsigned long residual, new_inc;
|
||||
unsigned long residual;
|
||||
int ret = 0;
|
||||
|
||||
guard(spinlock_irqsave)(&dm_device.ha_lock);
|
||||
@ -836,15 +819,9 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||
* our current limit; extend it.
|
||||
*/
|
||||
if ((start_pfn + pfn_cnt) > has->end_pfn) {
|
||||
/* Extend the region by multiples of ha_pages_in_chunk */
|
||||
residual = (start_pfn + pfn_cnt - has->end_pfn);
|
||||
/*
|
||||
* Extend the region by multiples of HA_CHUNK.
|
||||
*/
|
||||
new_inc = (residual / HA_CHUNK) * HA_CHUNK;
|
||||
if (residual % HA_CHUNK)
|
||||
new_inc += HA_CHUNK;
|
||||
|
||||
has->end_pfn += new_inc;
|
||||
has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
@ -855,7 +832,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
|
||||
}
|
||||
|
||||
static unsigned long handle_pg_range(unsigned long pg_start,
|
||||
unsigned long pg_count)
|
||||
unsigned long pg_count)
|
||||
{
|
||||
unsigned long start_pfn = pg_start;
|
||||
unsigned long pfn_cnt = pg_count;
|
||||
@ -866,7 +843,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
||||
unsigned long res = 0, flags;
|
||||
|
||||
pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
|
||||
pg_start);
|
||||
pg_start);
|
||||
|
||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
||||
list_for_each_entry(has, &dm_device.ha_region_list, list) {
|
||||
@ -902,22 +879,19 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
||||
if (start_pfn > has->start_pfn &&
|
||||
online_section_nr(pfn_to_section_nr(start_pfn)))
|
||||
hv_bring_pgs_online(has, start_pfn, pgs_ol);
|
||||
|
||||
}
|
||||
|
||||
if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
|
||||
if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) {
|
||||
/*
|
||||
* We have some residual hot add range
|
||||
* that needs to be hot added; hot add
|
||||
* it now. Hot add a multiple of
|
||||
* HA_CHUNK that fully covers the pages
|
||||
* ha_pages_in_chunk that fully covers the pages
|
||||
* we have.
|
||||
*/
|
||||
size = (has->end_pfn - has->ha_end_pfn);
|
||||
if (pfn_cnt <= size) {
|
||||
size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
|
||||
if (pfn_cnt % HA_CHUNK)
|
||||
size += HA_CHUNK;
|
||||
size = ALIGN(pfn_cnt, ha_pages_in_chunk);
|
||||
} else {
|
||||
pfn_cnt = size;
|
||||
}
|
||||
@ -1010,10 +984,7 @@ static void hot_add_req(struct work_struct *dummy)
|
||||
rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
|
||||
rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
|
||||
|
||||
if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
|
||||
unsigned long region_size;
|
||||
unsigned long region_start;
|
||||
|
||||
if (rg_start == 0 && !dm->host_specified_ha_region) {
|
||||
/*
|
||||
* The host has not specified the hot-add region.
|
||||
* Based on the hot-add page range being specified,
|
||||
@ -1021,19 +992,13 @@ static void hot_add_req(struct work_struct *dummy)
|
||||
* that need to be hot-added while ensuring the alignment
|
||||
* and size requirements of Linux as it relates to hot-add.
|
||||
*/
|
||||
region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
|
||||
if (pfn_cnt % HA_CHUNK)
|
||||
region_size += HA_CHUNK;
|
||||
|
||||
region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
|
||||
|
||||
rg_start = region_start;
|
||||
rg_sz = region_size;
|
||||
rg_start = ALIGN_DOWN(pg_start, ha_pages_in_chunk);
|
||||
rg_sz = ALIGN(pfn_cnt, ha_pages_in_chunk);
|
||||
}
|
||||
|
||||
if (do_hot_add)
|
||||
resp.page_count = process_hot_add(pg_start, pfn_cnt,
|
||||
rg_start, rg_sz);
|
||||
rg_start, rg_sz);
|
||||
|
||||
dm->num_pages_added += resp.page_count;
|
||||
#endif
|
||||
@ -1211,11 +1176,10 @@ static void post_status(struct hv_dynmem_device *dm)
|
||||
sizeof(struct dm_status),
|
||||
(unsigned long)NULL,
|
||||
VM_PKT_DATA_INBAND, 0);
|
||||
|
||||
}
|
||||
|
||||
static void free_balloon_pages(struct hv_dynmem_device *dm,
|
||||
union dm_mem_page_range *range_array)
|
||||
union dm_mem_page_range *range_array)
|
||||
{
|
||||
int num_pages = range_array->finfo.page_cnt;
|
||||
__u64 start_frame = range_array->finfo.start_page;
|
||||
@ -1231,8 +1195,6 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
|
||||
unsigned int num_pages,
|
||||
struct dm_balloon_response *bl_resp,
|
||||
@ -1278,7 +1240,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
|
||||
page_to_pfn(pg);
|
||||
bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
|
||||
bl_resp->hdr.size += sizeof(union dm_mem_page_range);
|
||||
|
||||
}
|
||||
|
||||
return i * alloc_unit;
|
||||
@ -1332,7 +1293,7 @@ static void balloon_up(struct work_struct *dummy)
|
||||
|
||||
if (num_ballooned == 0 || num_ballooned == num_pages) {
|
||||
pr_debug("Ballooned %u out of %u requested pages.\n",
|
||||
num_pages, dm_device.balloon_wrk.num_pages);
|
||||
num_pages, dm_device.balloon_wrk.num_pages);
|
||||
|
||||
bl_resp->more_pages = 0;
|
||||
done = true;
|
||||
@ -1366,16 +1327,15 @@ static void balloon_up(struct work_struct *dummy)
|
||||
|
||||
for (i = 0; i < bl_resp->range_count; i++)
|
||||
free_balloon_pages(&dm_device,
|
||||
&bl_resp->range_array[i]);
|
||||
&bl_resp->range_array[i]);
|
||||
|
||||
done = true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void balloon_down(struct hv_dynmem_device *dm,
|
||||
struct dm_unballoon_request *req)
|
||||
struct dm_unballoon_request *req)
|
||||
{
|
||||
union dm_mem_page_range *range_array = req->range_array;
|
||||
int range_count = req->range_count;
|
||||
@ -1389,7 +1349,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
|
||||
}
|
||||
|
||||
pr_debug("Freed %u ballooned pages.\n",
|
||||
prev_pages_ballooned - dm->num_pages_ballooned);
|
||||
prev_pages_ballooned - dm->num_pages_ballooned);
|
||||
|
||||
if (req->more_pages == 1)
|
||||
return;
|
||||
@ -1414,8 +1374,7 @@ static int dm_thread_func(void *dm_dev)
|
||||
struct hv_dynmem_device *dm = dm_dev;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
wait_for_completion_interruptible_timeout(
|
||||
&dm_device.config_event, 1*HZ);
|
||||
wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
|
||||
/*
|
||||
* The host expects us to post information on the memory
|
||||
* pressure every second.
|
||||
@ -1439,9 +1398,8 @@ static int dm_thread_func(void *dm_dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void version_resp(struct hv_dynmem_device *dm,
|
||||
struct dm_version_response *vresp)
|
||||
struct dm_version_response *vresp)
|
||||
{
|
||||
struct dm_version_request version_req;
|
||||
int ret;
|
||||
@ -1502,7 +1460,7 @@ version_error:
|
||||
}
|
||||
|
||||
static void cap_resp(struct hv_dynmem_device *dm,
|
||||
struct dm_capabilities_resp_msg *cap_resp)
|
||||
struct dm_capabilities_resp_msg *cap_resp)
|
||||
{
|
||||
if (!cap_resp->is_accepted) {
|
||||
pr_err("Capabilities not accepted by host\n");
|
||||
@ -1535,7 +1493,7 @@ static void balloon_onchannelcallback(void *context)
|
||||
switch (dm_hdr->type) {
|
||||
case DM_VERSION_RESPONSE:
|
||||
version_resp(dm,
|
||||
(struct dm_version_response *)dm_msg);
|
||||
(struct dm_version_response *)dm_msg);
|
||||
break;
|
||||
|
||||
case DM_CAPABILITIES_RESPONSE:
|
||||
@ -1565,7 +1523,7 @@ static void balloon_onchannelcallback(void *context)
|
||||
|
||||
dm->state = DM_BALLOON_DOWN;
|
||||
balloon_down(dm,
|
||||
(struct dm_unballoon_request *)recv_buffer);
|
||||
(struct dm_unballoon_request *)recv_buffer);
|
||||
break;
|
||||
|
||||
case DM_MEM_HOT_ADD_REQUEST:
|
||||
@ -1603,17 +1561,15 @@ static void balloon_onchannelcallback(void *context)
|
||||
|
||||
default:
|
||||
pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#define HV_LARGE_REPORTING_ORDER 9
|
||||
#define HV_LARGE_REPORTING_LEN (HV_HYP_PAGE_SIZE << \
|
||||
HV_LARGE_REPORTING_ORDER)
|
||||
static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
|
||||
struct scatterlist *sgl, unsigned int nents)
|
||||
struct scatterlist *sgl, unsigned int nents)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct hv_memory_hint *hint;
|
||||
@ -1648,7 +1604,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
|
||||
*/
|
||||
|
||||
/* page reporting for pages 2MB or higher */
|
||||
if (order >= HV_LARGE_REPORTING_ORDER ) {
|
||||
if (order >= HV_LARGE_REPORTING_ORDER) {
|
||||
range->page.largepage = 1;
|
||||
range->page_size = HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB;
|
||||
range->base_large_pfn = page_to_hvpfn(
|
||||
@ -1662,23 +1618,21 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
|
||||
range->page.additional_pages =
|
||||
(sg->length / HV_HYP_PAGE_SIZE) - 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
status = hv_do_rep_hypercall(HV_EXT_CALL_MEMORY_HEAT_HINT, nents, 0,
|
||||
hint, NULL);
|
||||
local_irq_restore(flags);
|
||||
if (!hv_result_success(status)) {
|
||||
|
||||
pr_err("Cold memory discard hypercall failed with status %llx\n",
|
||||
status);
|
||||
status);
|
||||
if (hv_hypercall_multi_failure > 0)
|
||||
hv_hypercall_multi_failure++;
|
||||
|
||||
if (hv_result(status) == HV_STATUS_INVALID_PARAMETER) {
|
||||
pr_err("Underlying Hyper-V does not support order less than 9. Hypercall failed\n");
|
||||
pr_err("Defaulting to page_reporting_order %d\n",
|
||||
pageblock_order);
|
||||
pageblock_order);
|
||||
page_reporting_order = pageblock_order;
|
||||
hv_hypercall_multi_failure++;
|
||||
return -EINVAL;
|
||||
@ -1712,7 +1666,7 @@ static void enable_page_reporting(void)
|
||||
pr_err("Failed to enable cold memory discard: %d\n", ret);
|
||||
} else {
|
||||
pr_info("Cold memory discard hint enabled with order %d\n",
|
||||
page_reporting_order);
|
||||
page_reporting_order);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1795,7 +1749,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
|
||||
if (t == 0) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
@ -1831,10 +1785,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
|
||||
cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
|
||||
|
||||
/*
|
||||
* Specify our alignment requirements as it relates
|
||||
* memory hot-add. Specify 128MB alignment.
|
||||
* Specify our alignment requirements for memory hot-add. The value is
|
||||
* the log base 2 of the number of megabytes in a chunk. For example,
|
||||
* with 256 MiB chunks, the value is 8. The number of MiB in a chunk
|
||||
* must be a power of 2.
|
||||
*/
|
||||
cap_msg.caps.cap_bits.hot_add_alignment = 7;
|
||||
cap_msg.caps.cap_bits.hot_add_alignment =
|
||||
ilog2(HA_BYTES_IN_CHUNK / SZ_1M);
|
||||
|
||||
/*
|
||||
* Currently the host does not use these
|
||||
@ -1850,7 +1807,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
|
||||
t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
|
||||
if (t == 0) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
@ -1891,8 +1848,8 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
|
||||
char *sname;
|
||||
|
||||
seq_printf(f, "%-22s: %u.%u\n", "host_version",
|
||||
DYNMEM_MAJOR_VERSION(dm->version),
|
||||
DYNMEM_MINOR_VERSION(dm->version));
|
||||
DYNMEM_MAJOR_VERSION(dm->version),
|
||||
DYNMEM_MINOR_VERSION(dm->version));
|
||||
|
||||
seq_printf(f, "%-22s:", "capabilities");
|
||||
if (ballooning_enabled())
|
||||
@ -1941,10 +1898,10 @@ static int hv_balloon_debug_show(struct seq_file *f, void *offset)
|
||||
seq_printf(f, "%-22s: %u\n", "pages_ballooned", dm->num_pages_ballooned);
|
||||
|
||||
seq_printf(f, "%-22s: %lu\n", "total_pages_committed",
|
||||
get_pages_committed(dm));
|
||||
get_pages_committed(dm));
|
||||
|
||||
seq_printf(f, "%-22s: %llu\n", "max_dynamic_page_count",
|
||||
dm->max_dynamic_page_count);
|
||||
dm->max_dynamic_page_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1954,7 +1911,7 @@ DEFINE_SHOW_ATTRIBUTE(hv_balloon_debug);
|
||||
static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
|
||||
{
|
||||
debugfs_create_file("hv-balloon", 0444, NULL, b,
|
||||
&hv_balloon_debug_fops);
|
||||
&hv_balloon_debug_fops);
|
||||
}
|
||||
|
||||
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
|
||||
@ -1984,8 +1941,23 @@ static int balloon_probe(struct hv_device *dev,
|
||||
hot_add = false;
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
* Hot-add must operate in chunks that are of size equal to the
|
||||
* memory block size because that's what the core add_memory()
|
||||
* interface requires. The Hyper-V interface requires that the memory
|
||||
* block size be a power of 2, which is guaranteed by the check in
|
||||
* memory_dev_init().
|
||||
*/
|
||||
ha_pages_in_chunk = memory_block_size_bytes() / PAGE_SIZE;
|
||||
do_hot_add = hot_add;
|
||||
#else
|
||||
/*
|
||||
* Without MEMORY_HOTPLUG, the guest returns a failure status for all
|
||||
* hot add requests from Hyper-V, and the chunk size is used only to
|
||||
* specify alignment to Hyper-V as required by the host/guest protocol.
|
||||
* Somewhat arbitrarily, use 128 MiB.
|
||||
*/
|
||||
ha_pages_in_chunk = SZ_128M / PAGE_SIZE;
|
||||
do_hot_add = false;
|
||||
#endif
|
||||
dm_device.dev = dev;
|
||||
@ -2097,7 +2069,6 @@ static int balloon_suspend(struct hv_device *hv_dev)
|
||||
tasklet_enable(&hv_dev->channel->callback_event);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static int balloon_resume(struct hv_device *dev)
|
||||
@ -2156,7 +2127,6 @@ static struct hv_driver balloon_drv = {
|
||||
|
||||
static int __init init_balloon_drv(void)
|
||||
{
|
||||
|
||||
return vmbus_driver_register(&balloon_drv);
|
||||
}
|
||||
|
||||
|
@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
|
||||
|
||||
static u32 at91_twi_func(struct i2c_adapter *adapter)
|
||||
{
|
||||
return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
|
||||
| I2C_FUNC_SMBUS_READ_BLOCK_DATA;
|
||||
return I2C_FUNC_SLAVE;
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm at91_twi_algorithm_slave = {
|
||||
|
@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
|
||||
|
||||
void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
|
||||
{
|
||||
dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
|
||||
dev->functionality = I2C_FUNC_SLAVE;
|
||||
|
||||
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
|
||||
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
|
||||
|
@ -145,6 +145,7 @@ struct ad7173_device_info {
|
||||
unsigned int id;
|
||||
char *name;
|
||||
bool has_temp;
|
||||
bool has_input_buf;
|
||||
bool has_int_ref;
|
||||
bool has_ref2;
|
||||
u8 num_gpios;
|
||||
@ -212,18 +213,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
|
||||
.num_configs = 4,
|
||||
.num_gpios = 2,
|
||||
.has_temp = true,
|
||||
.has_input_buf = true,
|
||||
.has_int_ref = true,
|
||||
.clock = 2 * HZ_PER_MHZ,
|
||||
.sinc5_data_rates = ad7173_sinc5_data_rates,
|
||||
.num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
|
||||
},
|
||||
[ID_AD7172_4] = {
|
||||
.name = "ad7172-4",
|
||||
.id = AD7172_4_ID,
|
||||
.num_inputs = 9,
|
||||
.num_channels = 8,
|
||||
.num_configs = 8,
|
||||
.num_gpios = 4,
|
||||
.has_temp = false,
|
||||
.has_input_buf = true,
|
||||
.has_ref2 = true,
|
||||
.clock = 2 * HZ_PER_MHZ,
|
||||
.sinc5_data_rates = ad7173_sinc5_data_rates,
|
||||
@ -237,6 +241,7 @@ static const struct ad7173_device_info ad7173_device_info[] = {
|
||||
.num_configs = 8,
|
||||
.num_gpios = 4,
|
||||
.has_temp = true,
|
||||
.has_input_buf = true,
|
||||
.has_int_ref = true,
|
||||
.has_ref2 = true,
|
||||
.clock = 2 * HZ_PER_MHZ,
|
||||
@ -251,18 +256,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
|
||||
.num_configs = 4,
|
||||
.num_gpios = 2,
|
||||
.has_temp = true,
|
||||
.has_input_buf = true,
|
||||
.has_int_ref = true,
|
||||
.clock = 16 * HZ_PER_MHZ,
|
||||
.sinc5_data_rates = ad7175_sinc5_data_rates,
|
||||
.num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
|
||||
},
|
||||
[ID_AD7175_8] = {
|
||||
.name = "ad7175-8",
|
||||
.id = AD7175_8_ID,
|
||||
.num_inputs = 17,
|
||||
.num_channels = 16,
|
||||
.num_configs = 8,
|
||||
.num_gpios = 4,
|
||||
.has_temp = true,
|
||||
.has_input_buf = true,
|
||||
.has_int_ref = true,
|
||||
.has_ref2 = true,
|
||||
.clock = 16 * HZ_PER_MHZ,
|
||||
@ -277,18 +285,21 @@ static const struct ad7173_device_info ad7173_device_info[] = {
|
||||
.num_configs = 4,
|
||||
.num_gpios = 2,
|
||||
.has_temp = false,
|
||||
.has_input_buf = false,
|
||||
.has_int_ref = true,
|
||||
.clock = 16 * HZ_PER_MHZ,
|
||||
.sinc5_data_rates = ad7175_sinc5_data_rates,
|
||||
.num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
|
||||
},
|
||||
[ID_AD7177_2] = {
|
||||
.name = "ad7177-2",
|
||||
.id = AD7177_ID,
|
||||
.num_inputs = 5,
|
||||
.num_channels = 4,
|
||||
.num_configs = 4,
|
||||
.num_gpios = 2,
|
||||
.has_temp = true,
|
||||
.has_input_buf = true,
|
||||
.has_int_ref = true,
|
||||
.clock = 16 * HZ_PER_MHZ,
|
||||
.odr_start_value = AD7177_ODR_START_VALUE,
|
||||
@ -532,6 +543,7 @@ static int ad7173_append_status(struct ad_sigma_delta *sd, bool append)
|
||||
unsigned int interface_mode = st->interface_mode;
|
||||
int ret;
|
||||
|
||||
interface_mode &= ~AD7173_INTERFACE_DATA_STAT;
|
||||
interface_mode |= AD7173_INTERFACE_DATA_STAT_EN(append);
|
||||
ret = ad_sd_write_reg(&st->sd, AD7173_REG_INTERFACE_MODE, 2, interface_mode);
|
||||
if (ret)
|
||||
@ -705,7 +717,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
|
||||
{
|
||||
struct ad7173_state *st = iio_priv(indio_dev);
|
||||
struct ad7173_channel_config *cfg;
|
||||
unsigned int freq, i, reg;
|
||||
unsigned int freq, i;
|
||||
int ret;
|
||||
|
||||
ret = iio_device_claim_direct_mode(indio_dev);
|
||||
@ -721,16 +733,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
|
||||
|
||||
cfg = &st->channels[chan->address].cfg;
|
||||
cfg->odr = i;
|
||||
|
||||
if (!cfg->live)
|
||||
break;
|
||||
|
||||
ret = ad_sd_read_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, ®);
|
||||
if (ret)
|
||||
break;
|
||||
reg &= ~AD7173_FILTER_ODR0_MASK;
|
||||
reg |= FIELD_PREP(AD7173_FILTER_ODR0_MASK, i);
|
||||
ret = ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, reg);
|
||||
cfg->live = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -792,8 +795,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
|
||||
.type = IIO_VOLTAGE,
|
||||
.indexed = 1,
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
|
||||
BIT(IIO_CHAN_INFO_SCALE),
|
||||
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
|
||||
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.realbits = 24,
|
||||
@ -804,12 +806,11 @@ static const struct iio_chan_spec ad7173_channel_template = {
|
||||
|
||||
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
|
||||
.type = IIO_TEMP,
|
||||
.indexed = 1,
|
||||
.channel = AD7173_AIN_TEMP_POS,
|
||||
.channel2 = AD7173_AIN_TEMP_NEG,
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
|
||||
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
|
||||
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
|
||||
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) |
|
||||
BIT(IIO_CHAN_INFO_SAMP_FREQ),
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.realbits = 24,
|
||||
@ -932,7 +933,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
|
||||
AD7173_CH_ADDRESS(chan_arr[chan_index].channel,
|
||||
chan_arr[chan_index].channel2);
|
||||
chan_st_priv->cfg.bipolar = false;
|
||||
chan_st_priv->cfg.input_buf = true;
|
||||
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
|
||||
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
|
||||
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
|
||||
|
||||
@ -989,7 +990,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
|
||||
|
||||
chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]);
|
||||
chan_st_priv->chan_reg = chan_index;
|
||||
chan_st_priv->cfg.input_buf = true;
|
||||
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
|
||||
chan_st_priv->cfg.odr = 0;
|
||||
|
||||
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
|
||||
|
@ -243,11 +243,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
|
||||
}
|
||||
|
||||
static const struct iio_chan_spec ad9434_channels[] = {
|
||||
AD9467_CHAN(0, 0, 12, 'S'),
|
||||
AD9467_CHAN(0, 0, 12, 's'),
|
||||
};
|
||||
|
||||
static const struct iio_chan_spec ad9467_channels[] = {
|
||||
AD9467_CHAN(0, 0, 16, 'S'),
|
||||
AD9467_CHAN(0, 0, 16, 's'),
|
||||
};
|
||||
|
||||
static const struct ad9467_chip_info ad9467_chip_tbl = {
|
||||
|
@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
|
||||
int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
|
||||
uint32_t period, bool fifo)
|
||||
{
|
||||
uint32_t mult;
|
||||
|
||||
/* when FIFO is on, prevent odr change if one is already pending */
|
||||
if (fifo && ts->new_mult != 0)
|
||||
return -EAGAIN;
|
||||
|
||||
ts->new_mult = period / ts->chip.clock_period;
|
||||
mult = period / ts->chip.clock_period;
|
||||
if (mult != ts->mult)
|
||||
ts->new_mult = mult;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
|
||||
s64 tmp = *val * (3767897513LL / 25LL);
|
||||
*val = div_s64_rem(tmp, 1000000000LL, val2);
|
||||
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
return IIO_VAL_INT_PLUS_NANO;
|
||||
}
|
||||
|
||||
mutex_lock(&st->lock);
|
||||
|
@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
|
||||
&data->buffer.channels,
|
||||
ARRAY_SIZE(data->buffer.channels));
|
||||
if (ret)
|
||||
return IRQ_NONE;
|
||||
goto out;
|
||||
} else {
|
||||
for_each_set_bit(bit, indio_dev->active_scan_mask,
|
||||
BMI323_CHAN_MAX) {
|
||||
@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
|
||||
&data->buffer.channels[index++],
|
||||
BMI323_BYTES_PER_SAMPLE);
|
||||
if (ret)
|
||||
return IRQ_NONE;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
|
||||
iio_get_time_ns(indio_dev));
|
||||
|
||||
out:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -130,10 +130,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
|
||||
/* update data FIFO write */
|
||||
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
|
||||
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = inv_icm42600_buffer_update_watermark(st);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&st->lock);
|
||||
|
@ -222,10 +222,15 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
|
||||
latency_accel = period_accel * wm_accel;
|
||||
|
||||
/* 0 value for watermark means that the sensor is turned off */
|
||||
if (wm_gyro == 0 && wm_accel == 0)
|
||||
return 0;
|
||||
|
||||
if (latency_gyro == 0) {
|
||||
watermark = wm_accel;
|
||||
st->fifo.watermark.eff_accel = wm_accel;
|
||||
} else if (latency_accel == 0) {
|
||||
watermark = wm_gyro;
|
||||
st->fifo.watermark.eff_gyro = wm_gyro;
|
||||
} else {
|
||||
/* compute the smallest latency that is a multiple of both */
|
||||
if (latency_gyro <= latency_accel)
|
||||
@ -241,6 +246,13 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
|
||||
watermark = latency / period;
|
||||
if (watermark < 1)
|
||||
watermark = 1;
|
||||
/* update effective watermark */
|
||||
st->fifo.watermark.eff_gyro = latency / period_gyro;
|
||||
if (st->fifo.watermark.eff_gyro < 1)
|
||||
st->fifo.watermark.eff_gyro = 1;
|
||||
st->fifo.watermark.eff_accel = latency / period_accel;
|
||||
if (st->fifo.watermark.eff_accel < 1)
|
||||
st->fifo.watermark.eff_accel = 1;
|
||||
}
|
||||
|
||||
/* compute watermark value in bytes */
|
||||
@ -514,7 +526,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
|
||||
/* handle gyroscope timestamp and FIFO data parsing */
|
||||
if (st->fifo.nb.gyro > 0) {
|
||||
ts = &gyro_st->ts;
|
||||
inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro,
|
||||
inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_gyro,
|
||||
st->timestamp.gyro);
|
||||
ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro);
|
||||
if (ret)
|
||||
@ -524,7 +536,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
|
||||
/* handle accelerometer timestamp and FIFO data parsing */
|
||||
if (st->fifo.nb.accel > 0) {
|
||||
ts = &accel_st->ts;
|
||||
inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel,
|
||||
inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_accel,
|
||||
st->timestamp.accel);
|
||||
ret = inv_icm42600_accel_parse_fifo(st->indio_accel);
|
||||
if (ret)
|
||||
@ -577,6 +589,9 @@ int inv_icm42600_buffer_init(struct inv_icm42600_state *st)
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
st->fifo.watermark.eff_gyro = 1;
|
||||
st->fifo.watermark.eff_accel = 1;
|
||||
|
||||
/*
|
||||
* Default FIFO configuration (bits 7 to 5)
|
||||
* - use invalid value
|
||||
|
@ -32,6 +32,8 @@ struct inv_icm42600_fifo {
|
||||
struct {
|
||||
unsigned int gyro;
|
||||
unsigned int accel;
|
||||
unsigned int eff_gyro;
|
||||
unsigned int eff_accel;
|
||||
} watermark;
|
||||
size_t count;
|
||||
struct {
|
||||
|
@ -537,6 +537,7 @@ static int inv_icm42600_irq_init(struct inv_icm42600_state *st, int irq,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
irq_type |= IRQF_ONESHOT;
|
||||
return devm_request_threaded_irq(dev, irq, inv_icm42600_irq_timestamp,
|
||||
inv_icm42600_irq_handler, irq_type,
|
||||
"inv_icm42600", st);
|
||||
|
@ -130,10 +130,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
|
||||
/* update data FIFO write */
|
||||
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
|
||||
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = inv_icm42600_buffer_update_watermark(st);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&st->lock);
|
||||
|
@ -100,8 +100,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
|
||||
goto end_session;
|
||||
/* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
|
||||
fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
|
||||
inv_sensors_timestamp_interrupt(&st->timestamp, nb, pf->timestamp);
|
||||
inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0);
|
||||
inv_sensors_timestamp_interrupt(&st->timestamp, 1, pf->timestamp);
|
||||
inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, 1, 0);
|
||||
|
||||
/* clear internal data buffer for avoiding kernel data leak */
|
||||
memset(data, 0, sizeof(data));
|
||||
|
@ -300,6 +300,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type)
|
||||
if (!st->trig)
|
||||
return -ENOMEM;
|
||||
|
||||
irq_type |= IRQF_ONESHOT;
|
||||
ret = devm_request_threaded_irq(&indio_dev->dev, st->irq,
|
||||
&inv_mpu6050_interrupt_timestamp,
|
||||
&inv_mpu6050_interrupt_handle,
|
||||
|
@ -721,7 +721,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
|
||||
return ret;
|
||||
*val *= scale;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
} else {
|
||||
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
|
||||
if (ret < 0)
|
||||
|
@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
|
||||
|
||||
/*
|
||||
* Temperature is returned in Celsius degrees in fractional
|
||||
* form down 2^16. We rescale by x1000 to return milli Celsius
|
||||
* to respect IIO ABI.
|
||||
* form down 2^16. We rescale by x1000 to return millidegrees
|
||||
* Celsius to respect IIO ABI.
|
||||
*/
|
||||
*val = raw_temp * 1000;
|
||||
*val2 = 16;
|
||||
return IIO_VAL_FRACTIONAL_LOG2;
|
||||
raw_temp = sign_extend32(raw_temp, 23);
|
||||
*val = ((s64)raw_temp * 1000) / (1 << 16);
|
||||
return IIO_VAL_INT;
|
||||
}
|
||||
|
||||
static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
|
||||
|
@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client)
|
||||
"failed to allocate regmap\n");
|
||||
|
||||
regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
|
||||
if (IS_ERR(regmap))
|
||||
return dev_err_probe(&client->dev, PTR_ERR(regmap),
|
||||
"failed to allocate regmap\n");
|
||||
if (IS_ERR(regmap_ee))
|
||||
return dev_err_probe(&client->dev, PTR_ERR(regmap_ee),
|
||||
"failed to allocate EEPROM regmap\n");
|
||||
|
||||
mlx90635 = iio_priv(indio_dev);
|
||||
i2c_set_clientdata(client, indio_dev);
|
||||
|
@ -3362,7 +3362,7 @@ int amd_iommu_reenable(int mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init amd_iommu_enable_faulting(unsigned int cpu)
|
||||
int amd_iommu_enable_faulting(unsigned int cpu)
|
||||
{
|
||||
/* We enable MSI later when PCI is initialized */
|
||||
return 0;
|
||||
|
@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent,
|
||||
led_init_core(led_cdev);
|
||||
|
||||
#ifdef CONFIG_LEDS_TRIGGERS
|
||||
/*
|
||||
* If no default trigger was given and hw_control_trigger is set,
|
||||
* make it the default trigger.
|
||||
*/
|
||||
if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
|
||||
led_cdev->default_trigger = led_cdev->hw_control_trigger;
|
||||
led_trigger_set_default(led_cdev);
|
||||
#endif
|
||||
|
||||
|
@ -210,6 +210,7 @@ static const struct regmap_access_table axp313a_volatile_table = {
|
||||
|
||||
static const struct regmap_range axp717_writeable_ranges[] = {
|
||||
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
|
||||
regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
|
||||
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
|
||||
};
|
||||
|
||||
|
@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
||||
|
||||
aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
|
||||
GFP_KERNEL);
|
||||
if (!aux_bus->aux_device_wrapper[1])
|
||||
return -ENOMEM;
|
||||
if (!aux_bus->aux_device_wrapper[1]) {
|
||||
retval = -ENOMEM;
|
||||
goto err_aux_dev_add_0;
|
||||
}
|
||||
|
||||
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
|
||||
if (retval < 0)
|
||||
@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
||||
|
||||
err_aux_dev_add_1:
|
||||
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
|
||||
goto err_aux_dev_add_0;
|
||||
|
||||
err_aux_dev_init_1:
|
||||
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
|
||||
@ -120,6 +123,7 @@ err_ida_alloc_1:
|
||||
|
||||
err_aux_dev_add_0:
|
||||
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
|
||||
goto err_ret;
|
||||
|
||||
err_aux_dev_init_0:
|
||||
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
|
||||
@ -127,6 +131,7 @@ err_aux_dev_init_0:
|
||||
err_ida_alloc_0:
|
||||
kfree(aux_bus->aux_device_wrapper[0]);
|
||||
|
||||
err_ret:
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
|
||||
}
|
||||
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
cl_err(dev, cl, "is not connected");
|
||||
cl_dbg(dev, cl, "is not connected");
|
||||
rets = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device)
|
||||
}
|
||||
|
||||
err = mei_restart(dev);
|
||||
if (err)
|
||||
if (err) {
|
||||
free_irq(pdev->irq, dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Start timer if stopped in suspend */
|
||||
schedule_delayed_work(&dev->timer_work, HZ);
|
||||
|
@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev)
|
||||
|
||||
static int mei_vsc_suspend(struct device *dev)
|
||||
{
|
||||
struct mei_device *mei_dev = dev_get_drvdata(dev);
|
||||
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
|
||||
struct mei_device *mei_dev;
|
||||
int ret = 0;
|
||||
|
||||
mei_stop(mei_dev);
|
||||
mei_dev = dev_get_drvdata(dev);
|
||||
if (!mei_dev)
|
||||
return -ENODEV;
|
||||
|
||||
mei_disable_interrupts(mei_dev);
|
||||
mutex_lock(&mei_dev->device_lock);
|
||||
|
||||
vsc_tp_free_irq(hw->tp);
|
||||
if (!mei_write_is_idle(mei_dev))
|
||||
ret = -EAGAIN;
|
||||
|
||||
return 0;
|
||||
mutex_unlock(&mei_dev->device_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mei_vsc_resume(struct device *dev)
|
||||
{
|
||||
struct mei_device *mei_dev = dev_get_drvdata(dev);
|
||||
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
|
||||
int ret;
|
||||
struct mei_device *mei_dev;
|
||||
|
||||
ret = vsc_tp_request_irq(hw->tp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mei_restart(mei_dev);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
/* start timer if stopped in suspend */
|
||||
schedule_delayed_work(&mei_dev->timer_work, HZ);
|
||||
mei_dev = dev_get_drvdata(dev);
|
||||
if (!mei_dev)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
vsc_tp_free_irq(hw->tp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
|
||||
|
@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
|
||||
union acpi_object obj = {
|
||||
.type = ACPI_TYPE_INTEGER,
|
||||
.integer.type = ACPI_TYPE_INTEGER,
|
||||
.integer.value = 1,
|
||||
};
|
||||
struct acpi_object_list arg_list = {
|
||||
|
@ -736,11 +736,6 @@ tx_done:
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
tx_dma_error:
|
||||
if (BNXT_TX_PTP_IS_SET(lflags)) {
|
||||
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
|
||||
atomic_inc(&bp->ptp_cfg->tx_avail);
|
||||
}
|
||||
|
||||
last_frag = i;
|
||||
|
||||
/* start back at beginning and unmap skb */
|
||||
@ -762,6 +757,10 @@ tx_dma_error:
|
||||
tx_free:
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_kick_pending:
|
||||
if (BNXT_TX_PTP_IS_SET(lflags)) {
|
||||
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
|
||||
atomic_inc(&bp->ptp_cfg->tx_avail);
|
||||
}
|
||||
if (txr->kick_pending)
|
||||
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
|
||||
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
|
||||
@ -9004,6 +9003,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
|
||||
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
|
||||
#endif
|
||||
}
|
||||
bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
|
||||
|
||||
hwrm_func_qcaps_exit:
|
||||
hwrm_req_drop(bp, req);
|
||||
@ -15371,6 +15371,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
|
||||
if (bp->tso_max_segs)
|
||||
netif_set_tso_max_segs(dev, bp->tso_max_segs);
|
||||
|
||||
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
|
||||
NETDEV_XDP_ACT_RX_SG;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user