mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 08:44:14 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge conflict of mlx5 resolved using instructions in merge
commit 9566e650bf
.
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
446bf64b61
@ -39,7 +39,6 @@ Table : Subdirectories in /proc/sys/net
|
||||
802 E802 protocol ax25 AX25
|
||||
ethernet Ethernet protocol rose X.25 PLP layer
|
||||
ipv4 IP version 4 x25 X.25 protocol
|
||||
ipx IPX token-ring IBM token ring
|
||||
bridge Bridging decnet DEC net
|
||||
ipv6 IP version 6 tipc TIPC
|
||||
========= =================== = ========== ==================
|
||||
@ -401,33 +400,7 @@ interface.
|
||||
(network) that the route leads to, the router (may be directly connected), the
|
||||
route flags, and the device the route is using.
|
||||
|
||||
|
||||
5. IPX
|
||||
------
|
||||
|
||||
The IPX protocol has no tunable values in proc/sys/net.
|
||||
|
||||
The IPX protocol does, however, provide proc/net/ipx. This lists each IPX
|
||||
socket giving the local and remote addresses in Novell format (that is
|
||||
network:node:port). In accordance with the strange Novell tradition,
|
||||
everything but the port is in hex. Not_Connected is displayed for sockets that
|
||||
are not tied to a specific remote address. The Tx and Rx queue sizes indicate
|
||||
the number of bytes pending for transmission and reception. The state
|
||||
indicates the state the socket is in and the uid is the owning uid of the
|
||||
socket.
|
||||
|
||||
The /proc/net/ipx_interface file lists all IPX interfaces. For each interface
|
||||
it gives the network number, the node number, and indicates if the network is
|
||||
the primary network. It also indicates which device it is bound to (or
|
||||
Internal for internal networks) and the Frame Type if appropriate. Linux
|
||||
supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for
|
||||
IPX.
|
||||
|
||||
The /proc/net/ipx_route table holds a list of IPX routes. For each route it
|
||||
gives the destination network, the router node (or Directly) and the network
|
||||
address of the router (or Connected) for internal networks.
|
||||
|
||||
6. TIPC
|
||||
5. TIPC
|
||||
-------
|
||||
|
||||
tipc_rmem
|
||||
|
@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA $@
|
||||
|
||||
DT_DOCS = $(shell \
|
||||
cd $(srctree)/$(src) && \
|
||||
find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
|
||||
find * \( -name '*.yaml' ! \
|
||||
-name $(DT_TMP_SCHEMA) ! \
|
||||
-name '*.example.dt.yaml' \) \
|
||||
)
|
||||
|
||||
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
|
||||
|
@ -7,18 +7,6 @@ Required properties:
|
||||
- phy-mode : See ethernet.txt file in the same directory
|
||||
|
||||
Optional properties:
|
||||
- phy-reset-gpios : Should specify the gpio for phy reset
|
||||
- phy-reset-duration : Reset duration in milliseconds. Should present
|
||||
only if property "phy-reset-gpios" is available. Missing the property
|
||||
will have the duration be 1 millisecond. Numbers greater than 1000 are
|
||||
invalid and 1 millisecond will be used instead.
|
||||
- phy-reset-active-high : If present then the reset sequence using the GPIO
|
||||
specified in the "phy-reset-gpios" property is reversed (H=reset state,
|
||||
L=operation state).
|
||||
- phy-reset-post-delay : Post reset delay in milliseconds. If present then
|
||||
a delay of phy-reset-post-delay milliseconds will be observed after the
|
||||
phy-reset-gpios has been toggled. Can be omitted thus no delay is
|
||||
observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
|
||||
- phy-supply : regulator that powers the Ethernet PHY.
|
||||
- phy-handle : phandle to the PHY device connected to this device.
|
||||
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
|
||||
@ -47,11 +35,27 @@ Optional properties:
|
||||
For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
|
||||
per second interrupt associated with 1588 precision time protocol(PTP).
|
||||
|
||||
|
||||
Optional subnodes:
|
||||
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
|
||||
according to phy.txt in the same directory
|
||||
|
||||
Deprecated optional properties:
|
||||
To avoid these, create a phy node according to phy.txt in the same
|
||||
directory, and point the fec's "phy-handle" property to it. Then use
|
||||
the phy's reset binding, again described by phy.txt.
|
||||
- phy-reset-gpios : Should specify the gpio for phy reset
|
||||
- phy-reset-duration : Reset duration in milliseconds. Should present
|
||||
only if property "phy-reset-gpios" is available. Missing the property
|
||||
will have the duration be 1 millisecond. Numbers greater than 1000 are
|
||||
invalid and 1 millisecond will be used instead.
|
||||
- phy-reset-active-high : If present then the reset sequence using the GPIO
|
||||
specified in the "phy-reset-gpios" property is reversed (H=reset state,
|
||||
L=operation state).
|
||||
- phy-reset-post-delay : Post reset delay in milliseconds. If present then
|
||||
a delay of phy-reset-post-delay milliseconds will be observed after the
|
||||
phy-reset-gpios has been toggled. Can be omitted thus no delay is
|
||||
observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
|
||||
|
||||
Example:
|
||||
|
||||
ethernet@83fec000 {
|
||||
|
@ -37,7 +37,8 @@ properties:
|
||||
hwlocks: true
|
||||
|
||||
st,syscfg:
|
||||
$ref: "/schemas/types.yaml#/definitions/phandle-array"
|
||||
allOf:
|
||||
- $ref: "/schemas/types.yaml#/definitions/phandle-array"
|
||||
description: Should be phandle/offset/mask
|
||||
items:
|
||||
- description: Phandle to the syscon node which includes IRQ mux selection.
|
||||
|
@ -1,162 +0,0 @@
|
||||
===================
|
||||
RISC-V CPU Bindings
|
||||
===================
|
||||
|
||||
The device tree allows to describe the layout of CPUs in a system through
|
||||
the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
|
||||
defining properties for every cpu.
|
||||
|
||||
Bindings for CPU nodes follow the Devicetree Specification, available from:
|
||||
|
||||
https://www.devicetree.org/specifications/
|
||||
|
||||
with updates for 32-bit and 64-bit RISC-V systems provided in this document.
|
||||
|
||||
===========
|
||||
Terminology
|
||||
===========
|
||||
|
||||
This document uses some terminology common to the RISC-V community that is not
|
||||
widely used, the definitions of which are listed here:
|
||||
|
||||
* hart: A hardware execution context, which contains all the state mandated by
|
||||
the RISC-V ISA: a PC and some registers. This terminology is designed to
|
||||
disambiguate software's view of execution contexts from any particular
|
||||
microarchitectural implementation strategy. For example, my Intel laptop is
|
||||
described as having one socket with two cores, each of which has two hyper
|
||||
threads. Therefore this system has four harts.
|
||||
|
||||
=====================================
|
||||
cpus and cpu node bindings definition
|
||||
=====================================
|
||||
|
||||
The RISC-V architecture, in accordance with the Devicetree Specification,
|
||||
requires the cpus and cpu nodes to be present and contain the properties
|
||||
described below.
|
||||
|
||||
- cpus node
|
||||
|
||||
Description: Container of cpu nodes
|
||||
|
||||
The node name must be "cpus".
|
||||
|
||||
A cpus node must define the following properties:
|
||||
|
||||
- #address-cells
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: must be set to 1
|
||||
- #size-cells
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: must be set to 0
|
||||
|
||||
- cpu node
|
||||
|
||||
Description: Describes a hart context
|
||||
|
||||
PROPERTIES
|
||||
|
||||
- device_type
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: must be "cpu"
|
||||
- reg
|
||||
Usage: required
|
||||
Value type: <u32>
|
||||
Definition: The hart ID of this CPU node
|
||||
- compatible:
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: must contain "riscv", may contain one of
|
||||
"sifive,rocket0"
|
||||
- mmu-type:
|
||||
Usage: optional
|
||||
Value type: <string>
|
||||
Definition: Specifies the CPU's MMU type. Possible values are
|
||||
"riscv,sv32"
|
||||
"riscv,sv39"
|
||||
"riscv,sv48"
|
||||
- riscv,isa:
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: Contains the RISC-V ISA string of this hart. These
|
||||
ISA strings are defined by the RISC-V ISA manual.
|
||||
|
||||
Example: SiFive Freedom U540G Development Kit
|
||||
---------------------------------------------
|
||||
|
||||
This system contains two harts: a hart marked as disabled that's used for
|
||||
low-level system tasks and should be ignored by Linux, and a second hart that
|
||||
Linux is allowed to run on.
|
||||
|
||||
cpus {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
timebase-frequency = <1000000>;
|
||||
cpu@0 {
|
||||
clock-frequency = <1600000000>;
|
||||
compatible = "sifive,rocket0", "riscv";
|
||||
device_type = "cpu";
|
||||
i-cache-block-size = <64>;
|
||||
i-cache-sets = <128>;
|
||||
i-cache-size = <16384>;
|
||||
next-level-cache = <&L15 &L0>;
|
||||
reg = <0>;
|
||||
riscv,isa = "rv64imac";
|
||||
status = "disabled";
|
||||
L10: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,cpu-intc";
|
||||
interrupt-controller;
|
||||
};
|
||||
};
|
||||
cpu@1 {
|
||||
clock-frequency = <1600000000>;
|
||||
compatible = "sifive,rocket0", "riscv";
|
||||
d-cache-block-size = <64>;
|
||||
d-cache-sets = <64>;
|
||||
d-cache-size = <32768>;
|
||||
d-tlb-sets = <1>;
|
||||
d-tlb-size = <32>;
|
||||
device_type = "cpu";
|
||||
i-cache-block-size = <64>;
|
||||
i-cache-sets = <64>;
|
||||
i-cache-size = <32768>;
|
||||
i-tlb-sets = <1>;
|
||||
i-tlb-size = <32>;
|
||||
mmu-type = "riscv,sv39";
|
||||
next-level-cache = <&L15 &L0>;
|
||||
reg = <1>;
|
||||
riscv,isa = "rv64imafdc";
|
||||
status = "okay";
|
||||
tlb-split;
|
||||
L13: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,cpu-intc";
|
||||
interrupt-controller;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
Example: Spike ISA Simulator with 1 Hart
|
||||
----------------------------------------
|
||||
|
||||
This device tree matches the Spike ISA golden model as run with `spike -p1`.
|
||||
|
||||
cpus {
|
||||
cpu@0 {
|
||||
device_type = "cpu";
|
||||
reg = <0x00000000>;
|
||||
status = "okay";
|
||||
compatible = "riscv";
|
||||
riscv,isa = "rv64imafdc";
|
||||
mmu-type = "riscv,sv48";
|
||||
clock-frequency = <0x3b9aca00>;
|
||||
interrupt-controller {
|
||||
#interrupt-cells = <0x00000001>;
|
||||
interrupt-controller;
|
||||
compatible = "riscv,cpu-intc";
|
||||
}
|
||||
}
|
||||
}
|
@ -10,6 +10,18 @@ maintainers:
|
||||
- Paul Walmsley <paul.walmsley@sifive.com>
|
||||
- Palmer Dabbelt <palmer@sifive.com>
|
||||
|
||||
description: |
|
||||
This document uses some terminology common to the RISC-V community
|
||||
that is not widely used, the definitions of which are listed here:
|
||||
|
||||
hart: A hardware execution context, which contains all the state
|
||||
mandated by the RISC-V ISA: a PC and some registers. This
|
||||
terminology is designed to disambiguate software's view of execution
|
||||
contexts from any particular microarchitectural implementation
|
||||
strategy. For example, an Intel laptop containing one socket with
|
||||
two cores, each of which has two hyperthreads, could be described as
|
||||
having four harts.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
@ -50,6 +62,10 @@ properties:
|
||||
User-Level ISA document, available from
|
||||
https://riscv.org/specifications/
|
||||
|
||||
While the isa strings in ISA specification are case
|
||||
insensitive, letters in the riscv,isa string must be all
|
||||
lowercase to simplify parsing.
|
||||
|
||||
timebase-frequency:
|
||||
type: integer
|
||||
minimum: 1
|
||||
|
@ -19,7 +19,7 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- sifive,freedom-unleashed-a00
|
||||
- sifive,hifive-unleashed-a00
|
||||
- const: sifive,fu540-c000
|
||||
- const: sifive,fu540
|
||||
...
|
||||
|
@ -13,7 +13,8 @@ a) SMB3 (and SMB3.1.1) missing optional features:
|
||||
- T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
|
||||
currently the only two server side copy mechanisms supported)
|
||||
|
||||
b) improved sparse file support
|
||||
b) improved sparse file support (fiemap and SEEK_HOLE are implemented
|
||||
but additional features would be supportable by the protocol).
|
||||
|
||||
c) Directory entry caching relies on a 1 second timer, rather than
|
||||
using Directory Leases, currently only the root file handle is cached longer
|
||||
@ -21,9 +22,13 @@ using Directory Leases, currently only the root file handle is cached longer
|
||||
d) quota support (needs minor kernel change since quota calls
|
||||
to make it to network filesystems or deviceless filesystems)
|
||||
|
||||
e) Additional use cases where we use "compoounding" (e.g. open/query/close
|
||||
and open/setinfo/close) to reduce the number of roundtrips, and also
|
||||
open to reduce redundant opens (using deferred close and reference counts more).
|
||||
e) Additional use cases can be optimized to use "compounding"
|
||||
(e.g. open/query/close and open/setinfo/close) to reduce the number
|
||||
of roundtrips to the server and improve performance. Various cases
|
||||
(stat, statfs, create, unlink, mkdir) already have been improved by
|
||||
using compounding but more can be done. In addition we could significantly
|
||||
reduce redundant opens by using deferred close (with handle caching leases)
|
||||
and better using reference counters on file handles.
|
||||
|
||||
f) Finish inotify support so kde and gnome file list windows
|
||||
will autorefresh (partially complete by Asser). Needs minor kernel
|
||||
@ -43,18 +48,17 @@ mount or a per server basis to client UIDs or nobody if no mapping
|
||||
exists. Also better integration with winbind for resolving SID owners
|
||||
|
||||
k) Add tools to take advantage of more smb3 specific ioctls and features
|
||||
(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server
|
||||
is in progress, and a passthrough query_info call is already implemented
|
||||
in cifs.ko to allow smb3 info levels queries to be sent from userspace)
|
||||
(passthrough ioctl/fsctl is now implemented in cifs.ko to allow sending
|
||||
various SMB3 fsctls and query info and set info calls directly from user space)
|
||||
Add tools to make setting various non-POSIX metadata attributes easier
|
||||
from tools (e.g. extending what was done in smb-info tool).
|
||||
|
||||
l) encrypted file support
|
||||
|
||||
m) improved stats gathering tools (perhaps integration with nfsometer?)
|
||||
to extend and make easier to use what is currently in /proc/fs/cifs/Stats
|
||||
|
||||
n) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed
|
||||
file attribute via chflags) and improve user space tools for managing and
|
||||
viewing them.
|
||||
n) Add support for claims based ACLs ("DAC")
|
||||
|
||||
o) mount helper GUI (to simplify the various configuration options on mount)
|
||||
|
||||
@ -82,6 +86,8 @@ so far).
|
||||
w) Add support for additional strong encryption types, and additional spnego
|
||||
authentication mechanisms (see MS-SMB2)
|
||||
|
||||
x) Finish support for SMB3.1.1 compression
|
||||
|
||||
KNOWN BUGS
|
||||
====================================
|
||||
See http://bugzilla.samba.org - search on product "CifsVFS" for
|
||||
|
@ -506,21 +506,3 @@ Drivers should ignore the changes to TLS the device feature flags.
|
||||
These flags will be acted upon accordingly by the core ``ktls`` code.
|
||||
TLS device feature flags only control adding of new TLS connection
|
||||
offloads, old connections will remain active after flags are cleared.
|
||||
|
||||
Known bugs
|
||||
==========
|
||||
|
||||
skb_orphan() leaks clear text
|
||||
-----------------------------
|
||||
|
||||
Currently drivers depend on the :c:member:`sk` member of
|
||||
:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
|
||||
encryption. Any operation which removes or does not preserve the socket
|
||||
association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
|
||||
will cause the driver to miss the packets and lead to clear text leaks.
|
||||
|
||||
Redirects leak clear text
|
||||
-------------------------
|
||||
|
||||
In the RX direction, if segment has already been decrypted by the device
|
||||
and it gets redirected or mirrored - clear text will be transmitted out.
|
||||
|
@ -204,8 +204,8 @@ Ethernet device, which instead of receiving packets from a physical
|
||||
media, receives them from user space program and instead of sending
|
||||
packets via physical media sends them to the user space program.
|
||||
|
||||
Let's say that you configured IPX on the tap0, then whenever
|
||||
the kernel sends an IPX packet to tap0, it is passed to the application
|
||||
Let's say that you configured IPv6 on the tap0, then whenever
|
||||
the kernel sends an IPv6 packet to tap0, it is passed to the application
|
||||
(VTun for example). The application encrypts, compresses and sends it to
|
||||
the other side over TCP or UDP. The application on the other side decompresses
|
||||
and decrypts the data received and writes the packet to the TAP device,
|
||||
|
29
MAINTAINERS
29
MAINTAINERS
@ -183,7 +183,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com>
|
||||
M: Heiner Kallweit <hkallweit1@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/realtek/r8169.c
|
||||
F: drivers/net/ethernet/realtek/r8169*
|
||||
|
||||
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
@ -6078,7 +6078,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
M: Heiner Kallweit <hkallweit1@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-bus-mdio
|
||||
F: Documentation/ABI/testing/sysfs-class-net-phydev
|
||||
F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
|
||||
F: Documentation/devicetree/bindings/net/mdio*
|
||||
F: Documentation/networking/phy.rst
|
||||
@ -6357,7 +6357,7 @@ FPGA MANAGER FRAMEWORK
|
||||
M: Moritz Fischer <mdf@kernel.org>
|
||||
L: linux-fpga@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
|
||||
Q: http://patchwork.kernel.org/project/linux-fpga/list/
|
||||
F: Documentation/fpga/
|
||||
F: Documentation/driver-api/fpga/
|
||||
@ -6390,7 +6390,7 @@ FRAMEBUFFER LAYER
|
||||
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
T: git git://github.com/bzolnier/linux.git
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
|
||||
S: Maintained
|
||||
F: Documentation/fb/
|
||||
@ -6454,6 +6454,14 @@ S: Maintained
|
||||
F: drivers/perf/fsl_imx8_ddr_perf.c
|
||||
F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
|
||||
|
||||
FREESCALE IMX I2C DRIVER
|
||||
M: Oleksij Rempel <o.rempel@pengutronix.de>
|
||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/i2c/busses/i2c-imx.c
|
||||
F: Documentation/devicetree/bindings/i2c/i2c-imx.txt
|
||||
|
||||
FREESCALE IMX LPI2C DRIVER
|
||||
M: Dong Aisheng <aisheng.dong@nxp.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
@ -7465,7 +7473,7 @@ F: drivers/net/hyperv/
|
||||
F: drivers/scsi/storvsc_drv.c
|
||||
F: drivers/uio/uio_hv_generic.c
|
||||
F: drivers/video/fbdev/hyperv_fb.c
|
||||
F: drivers/iommu/hyperv_iommu.c
|
||||
F: drivers/iommu/hyperv-iommu.c
|
||||
F: net/vmw_vsock/hyperv_transport.c
|
||||
F: include/clocksource/hyperv_timer.h
|
||||
F: include/linux/hyperv.h
|
||||
@ -8055,6 +8063,7 @@ S: Maintained
|
||||
F: drivers/video/fbdev/i810/
|
||||
|
||||
INTEL ASoC DRIVERS
|
||||
M: Cezary Rojewski <cezary.rojewski@intel.com>
|
||||
M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
|
||||
M: Liam Girdwood <liam.r.girdwood@linux.intel.com>
|
||||
M: Jie Yang <yang.jie@linux.intel.com>
|
||||
@ -8076,6 +8085,13 @@ T: git git://git.code.sf.net/p/intel-sas/isci
|
||||
S: Supported
|
||||
F: drivers/scsi/isci/
|
||||
|
||||
INTEL CPU family model numbers
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
M: x86@kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/include/asm/intel-family.h
|
||||
|
||||
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
|
||||
M: Jani Nikula <jani.nikula@linux.intel.com>
|
||||
M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
|
||||
@ -8427,7 +8443,6 @@ L: linux-xfs@vger.kernel.org
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
|
||||
S: Supported
|
||||
F: fs/iomap.c
|
||||
F: fs/iomap/
|
||||
F: include/linux/iomap.h
|
||||
|
||||
@ -16097,7 +16112,7 @@ S: Maintained
|
||||
F: drivers/net/ethernet/ti/netcp*
|
||||
|
||||
TI PCM3060 ASoC CODEC DRIVER
|
||||
M: Kirill Marinushkin <kmarinushkin@birdec.tech>
|
||||
M: Kirill Marinushkin <kmarinushkin@birdec.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/pcm3060.txt
|
||||
|
24
Makefile
24
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Bobtail Squid
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -419,6 +419,7 @@ NM = $(CROSS_COMPILE)nm
|
||||
STRIP = $(CROSS_COMPILE)strip
|
||||
OBJCOPY = $(CROSS_COMPILE)objcopy
|
||||
OBJDUMP = $(CROSS_COMPILE)objdump
|
||||
OBJSIZE = $(CROSS_COMPILE)size
|
||||
PAHOLE = pahole
|
||||
LEX = flex
|
||||
YACC = bison
|
||||
@ -475,9 +476,9 @@ GCC_PLUGINS_CFLAGS :=
|
||||
CLANG_FLAGS :=
|
||||
|
||||
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
|
||||
export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
|
||||
export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
|
||||
export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
|
||||
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL
|
||||
export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
|
||||
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
|
||||
|
||||
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
|
||||
@ -845,7 +846,7 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
|
||||
KBUILD_CFLAGS += -Wdeclaration-after-statement
|
||||
|
||||
# Warn about unmarked fall-throughs in switch statement.
|
||||
KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=3,)
|
||||
KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
|
||||
|
||||
# Variable Length Arrays (VLAs) should not be used anywhere in the kernel
|
||||
KBUILD_CFLAGS += -Wvla
|
||||
@ -1002,6 +1003,8 @@ endif
|
||||
|
||||
PHONY += prepare0
|
||||
|
||||
export MODORDER := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order
|
||||
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
|
||||
|
||||
@ -1771,13 +1774,22 @@ build-dir = $(patsubst %/,%,$(dir $(build-target)))
|
||||
$(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
|
||||
%.symtypes: prepare FORCE
|
||||
$(Q)$(MAKE) $(build)=$(build-dir) $(build-target)
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
# For the single build of an in-tree module, use a temporary file to avoid
|
||||
# the situation of modules_install installing an invalid modules.order.
|
||||
%.ko: MODORDER := .modules.tmp
|
||||
endif
|
||||
%.ko: prepare FORCE
|
||||
$(Q)$(MAKE) $(build)=$(build-dir) $(build-target:.ko=.mod)
|
||||
$(Q)echo $(build-target) > $(MODORDER)
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
||||
|
||||
# Modules
|
||||
PHONY += /
|
||||
/: ./
|
||||
|
||||
%/: prepare FORCE
|
||||
$(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir)
|
||||
$(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir) need-modorder=1
|
||||
|
||||
# FIXME Should go into a make.lib or something
|
||||
# ===========================================================================
|
||||
|
@ -544,6 +544,7 @@ static int arch_build_bp_info(struct perf_event *bp,
|
||||
if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
|
||||
&& max_watchpoint_len >= 8)
|
||||
break;
|
||||
/* Else, fall through */
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -608,10 +609,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
|
||||
/* Allow halfword watchpoints and breakpoints. */
|
||||
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
|
||||
break;
|
||||
/* Else, fall through */
|
||||
case 3:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
/* Else, fall through */
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
@ -861,6 +864,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||
break;
|
||||
case ARM_ENTRY_ASYNC_WATCHPOINT:
|
||||
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
|
||||
/* Fall through */
|
||||
case ARM_ENTRY_SYNC_WATCHPOINT:
|
||||
watchpoint_handler(addr, fsr, regs);
|
||||
break;
|
||||
@ -909,6 +913,7 @@ static bool core_has_os_save_restore(void)
|
||||
ARM_DBG_READ(c1, c1, 4, oslsr);
|
||||
if (oslsr & ARM_OSLSR_OSLM0)
|
||||
return true;
|
||||
/* Else, fall through */
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -596,6 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall)
|
||||
switch (retval) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
restart -= 2;
|
||||
/* Fall through */
|
||||
case -ERESTARTNOHAND:
|
||||
case -ERESTARTSYS:
|
||||
case -ERESTARTNOINTR:
|
||||
|
@ -651,13 +651,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
}
|
||||
|
||||
static void reset_coproc_regs(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_reg *table, size_t num)
|
||||
const struct coproc_reg *table, size_t num,
|
||||
unsigned long *bmap)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
if (table[i].reset)
|
||||
if (table[i].reset) {
|
||||
int reg = table[i].reg;
|
||||
|
||||
table[i].reset(vcpu, &table[i]);
|
||||
if (reg > 0 && reg < NR_CP15_REGS) {
|
||||
set_bit(reg, bmap);
|
||||
if (table[i].is_64bit)
|
||||
set_bit(reg + 1, bmap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
|
||||
@ -1432,17 +1441,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
size_t num;
|
||||
const struct coproc_reg *table;
|
||||
|
||||
/* Catch someone adding a register without putting in reset entry. */
|
||||
memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
|
||||
DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
|
||||
|
||||
/* Generic chip reset first (so target could override). */
|
||||
reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
|
||||
reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
|
||||
|
||||
table = get_target_table(vcpu->arch.target, &num);
|
||||
reset_coproc_regs(vcpu, table, num);
|
||||
reset_coproc_regs(vcpu, table, num, bmap);
|
||||
|
||||
for (num = 1; num < NR_CP15_REGS; num++)
|
||||
WARN(vcpu_cp15(vcpu, num) == 0x42424242,
|
||||
WARN(!test_bit(num, bmap),
|
||||
"Didn't reset vcpu_cp15(vcpu, %zi)", num);
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
|
||||
* FALLTHROUGH: Ensure we don't try to overwrite our newly
|
||||
* initialised state information on the first fault.
|
||||
*/
|
||||
/* Fall through */
|
||||
|
||||
case THREAD_NOTIFY_EXIT:
|
||||
crunch_task_release(thread);
|
||||
|
@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void)
|
||||
switch (err) {
|
||||
case -ENOSYS:
|
||||
tegra_cpu_reset_handler_set(reset_address);
|
||||
/* pass-through */
|
||||
/* fall through */
|
||||
case 0:
|
||||
is_enabled = true;
|
||||
break;
|
||||
|
@ -695,7 +695,7 @@ thumb2arm(u16 tinstr)
|
||||
return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
|
||||
(tinstr & 255); /* register_list */
|
||||
}
|
||||
/* Else fall through for illegal instruction case */
|
||||
/* Else, fall through - for illegal instruction case */
|
||||
|
||||
default:
|
||||
return BAD_INSTR;
|
||||
@ -751,6 +751,8 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
|
||||
case 0xe8e0:
|
||||
case 0xe9e0:
|
||||
poffset->un = (tinst2 & 0xff) << 2;
|
||||
/* Fall through */
|
||||
|
||||
case 0xe940:
|
||||
case 0xe9c0:
|
||||
return do_alignment_ldrdstrd;
|
||||
|
@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
return __get_dma_pgprot(attrs, prot);
|
||||
return prot;
|
||||
return __get_dma_pgprot(attrs, prot);
|
||||
}
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
|
@ -388,17 +388,15 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
|
||||
/*
|
||||
* not supported by current hardware on OMAP1
|
||||
* w |= (0x03 << 7);
|
||||
* fall through
|
||||
*/
|
||||
/* fall through */
|
||||
case OMAP_DMA_DATA_BURST_16:
|
||||
if (dma_omap2plus()) {
|
||||
burst = 0x3;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* OMAP1 don't support burst 16
|
||||
* fall through
|
||||
*/
|
||||
/* OMAP1 don't support burst 16 */
|
||||
/* fall through */
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -474,10 +472,8 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
|
||||
burst = 0x3;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* OMAP1 don't support burst 16
|
||||
* fall through
|
||||
*/
|
||||
/* OMAP1 don't support burst 16 */
|
||||
/* fall through */
|
||||
default:
|
||||
printk(KERN_ERR "Invalid DMA burst mode\n");
|
||||
BUG();
|
||||
|
@ -316,9 +316,10 @@
|
||||
|
||||
#define kvm_arm_exception_class \
|
||||
ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
|
||||
ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(CP14_64), ECN(SVC64), \
|
||||
ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(IMP_DEF), ECN(IABT_LOW), \
|
||||
ECN(IABT_CUR), ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
|
||||
ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
|
||||
ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
|
||||
ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
|
||||
ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
|
||||
ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
|
||||
ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
|
||||
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
|
||||
|
@ -209,7 +209,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
|
||||
|
||||
static inline pte_t pte_mkdevmap(pte_t pte)
|
||||
{
|
||||
return set_pte_bit(pte, __pgprot(PTE_DEVMAP));
|
||||
return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
|
||||
}
|
||||
|
||||
static inline void set_pte(pte_t *ptep, pte_t pte)
|
||||
@ -396,7 +396,10 @@ static inline int pmd_protnone(pmd_t pmd)
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
|
||||
#endif
|
||||
#define pmd_mkdevmap(pmd) pte_pmd(pte_mkdevmap(pmd_pte(pmd)))
|
||||
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
||||
{
|
||||
return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
|
||||
}
|
||||
|
||||
#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
|
||||
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
|
||||
|
@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
|
||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
|
||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
|
||||
/*
|
||||
* We already refuse to boot CPUs that don't support our configured
|
||||
* page size, so we can only detect mismatches for a page size other
|
||||
* than the one we're currently using. Unfortunately, SoCs like this
|
||||
* exist in the wild so, even though we don't like it, we'll have to go
|
||||
* along with it and treat them as non-strict.
|
||||
*/
|
||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
|
||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
|
||||
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
|
||||
/* Linux shouldn't care about secure memory */
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
|
||||
|
@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
struct plt_entry trampoline;
|
||||
struct plt_entry trampoline, *dst;
|
||||
struct module *mod;
|
||||
|
||||
/*
|
||||
@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
* to check if the actual opcodes are in fact identical,
|
||||
* regardless of the offset in memory so use memcmp() instead.
|
||||
*/
|
||||
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
||||
if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
|
||||
sizeof(trampoline))) {
|
||||
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
|
||||
dst = mod->arch.ftrace_trampoline;
|
||||
trampoline = get_plt_entry(addr, dst);
|
||||
if (memcmp(dst, &trampoline, sizeof(trampoline))) {
|
||||
if (plt_entry_is_initialized(dst)) {
|
||||
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* point the trampoline to our ftrace entry point */
|
||||
module_disable_ro(mod);
|
||||
*mod->arch.ftrace_trampoline = trampoline;
|
||||
*dst = trampoline;
|
||||
module_enable_ro(mod, true);
|
||||
|
||||
/* update trampoline before patching in the branch */
|
||||
smp_wmb();
|
||||
/*
|
||||
* Ensure updated trampoline is visible to instruction
|
||||
* fetch before we patch in the branch.
|
||||
*/
|
||||
__flush_icache_range((unsigned long)&dst[0],
|
||||
(unsigned long)&dst[1]);
|
||||
}
|
||||
addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
|
||||
addr = (unsigned long)dst;
|
||||
#else /* CONFIG_ARM64_MODULE_PLTS */
|
||||
return -EINVAL;
|
||||
#endif /* CONFIG_ARM64_MODULE_PLTS */
|
||||
|
@ -733,6 +733,7 @@ static const char *esr_class_str[] = {
|
||||
[ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
|
||||
[ESR_ELx_EC_FP_ASIMD] = "ASIMD",
|
||||
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
|
||||
[ESR_ELx_EC_PAC] = "PAC",
|
||||
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
|
||||
[ESR_ELx_EC_ILL] = "PSTATE.IL",
|
||||
[ESR_ELx_EC_SVC32] = "SVC (AArch32)",
|
||||
|
@ -18,40 +18,70 @@
|
||||
#define save_debug(ptr,reg,nr) \
|
||||
switch (nr) { \
|
||||
case 15: ptr[15] = read_debug(reg, 15); \
|
||||
/* Fall through */ \
|
||||
case 14: ptr[14] = read_debug(reg, 14); \
|
||||
/* Fall through */ \
|
||||
case 13: ptr[13] = read_debug(reg, 13); \
|
||||
/* Fall through */ \
|
||||
case 12: ptr[12] = read_debug(reg, 12); \
|
||||
/* Fall through */ \
|
||||
case 11: ptr[11] = read_debug(reg, 11); \
|
||||
/* Fall through */ \
|
||||
case 10: ptr[10] = read_debug(reg, 10); \
|
||||
/* Fall through */ \
|
||||
case 9: ptr[9] = read_debug(reg, 9); \
|
||||
/* Fall through */ \
|
||||
case 8: ptr[8] = read_debug(reg, 8); \
|
||||
/* Fall through */ \
|
||||
case 7: ptr[7] = read_debug(reg, 7); \
|
||||
/* Fall through */ \
|
||||
case 6: ptr[6] = read_debug(reg, 6); \
|
||||
/* Fall through */ \
|
||||
case 5: ptr[5] = read_debug(reg, 5); \
|
||||
/* Fall through */ \
|
||||
case 4: ptr[4] = read_debug(reg, 4); \
|
||||
/* Fall through */ \
|
||||
case 3: ptr[3] = read_debug(reg, 3); \
|
||||
/* Fall through */ \
|
||||
case 2: ptr[2] = read_debug(reg, 2); \
|
||||
/* Fall through */ \
|
||||
case 1: ptr[1] = read_debug(reg, 1); \
|
||||
/* Fall through */ \
|
||||
default: ptr[0] = read_debug(reg, 0); \
|
||||
}
|
||||
|
||||
#define restore_debug(ptr,reg,nr) \
|
||||
switch (nr) { \
|
||||
case 15: write_debug(ptr[15], reg, 15); \
|
||||
/* Fall through */ \
|
||||
case 14: write_debug(ptr[14], reg, 14); \
|
||||
/* Fall through */ \
|
||||
case 13: write_debug(ptr[13], reg, 13); \
|
||||
/* Fall through */ \
|
||||
case 12: write_debug(ptr[12], reg, 12); \
|
||||
/* Fall through */ \
|
||||
case 11: write_debug(ptr[11], reg, 11); \
|
||||
/* Fall through */ \
|
||||
case 10: write_debug(ptr[10], reg, 10); \
|
||||
/* Fall through */ \
|
||||
case 9: write_debug(ptr[9], reg, 9); \
|
||||
/* Fall through */ \
|
||||
case 8: write_debug(ptr[8], reg, 8); \
|
||||
/* Fall through */ \
|
||||
case 7: write_debug(ptr[7], reg, 7); \
|
||||
/* Fall through */ \
|
||||
case 6: write_debug(ptr[6], reg, 6); \
|
||||
/* Fall through */ \
|
||||
case 5: write_debug(ptr[5], reg, 5); \
|
||||
/* Fall through */ \
|
||||
case 4: write_debug(ptr[4], reg, 4); \
|
||||
/* Fall through */ \
|
||||
case 3: write_debug(ptr[3], reg, 3); \
|
||||
/* Fall through */ \
|
||||
case 2: write_debug(ptr[2], reg, 2); \
|
||||
/* Fall through */ \
|
||||
case 1: write_debug(ptr[1], reg, 1); \
|
||||
/* Fall through */ \
|
||||
default: write_debug(ptr[0], reg, 0); \
|
||||
}
|
||||
|
||||
|
@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
|
||||
switch (spsr_idx) {
|
||||
case KVM_SPSR_SVC:
|
||||
write_sysreg_el1(v, SYS_SPSR);
|
||||
break;
|
||||
case KVM_SPSR_ABT:
|
||||
write_sysreg(v, spsr_abt);
|
||||
break;
|
||||
case KVM_SPSR_UND:
|
||||
write_sysreg(v, spsr_und);
|
||||
break;
|
||||
case KVM_SPSR_IRQ:
|
||||
write_sysreg(v, spsr_irq);
|
||||
break;
|
||||
case KVM_SPSR_FIQ:
|
||||
write_sysreg(v, spsr_fiq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -632,7 +632,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
*/
|
||||
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
|
||||
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
}
|
||||
|
||||
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
|
||||
@ -981,13 +981,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
||||
trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
|
||||
trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
|
||||
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
|
||||
trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
|
||||
trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
|
||||
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
|
||||
trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
|
||||
trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
|
||||
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
|
||||
trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
|
||||
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
|
||||
|
||||
/* Macro to expand the PMEVCNTRn_EL0 register */
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
@ -1540,7 +1540,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
|
||||
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
|
||||
|
||||
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
|
||||
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
|
||||
@ -2254,13 +2254,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *table, size_t num)
|
||||
const struct sys_reg_desc *table, size_t num,
|
||||
unsigned long *bmap)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
if (table[i].reset)
|
||||
if (table[i].reset) {
|
||||
int reg = table[i].reg;
|
||||
|
||||
table[i].reset(vcpu, &table[i]);
|
||||
if (reg > 0 && reg < NR_SYS_REGS)
|
||||
set_bit(reg, bmap);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2774,18 +2780,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
size_t num;
|
||||
const struct sys_reg_desc *table;
|
||||
|
||||
/* Catch someone adding a register without putting in reset entry. */
|
||||
memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
|
||||
DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
|
||||
|
||||
/* Generic chip reset first (so target could override). */
|
||||
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
||||
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
|
||||
|
||||
table = get_target_table(vcpu->arch.target, true, &num);
|
||||
reset_sys_reg_descs(vcpu, table, num);
|
||||
reset_sys_reg_descs(vcpu, table, num, bmap);
|
||||
|
||||
for (num = 1; num < NR_SYS_REGS; num++) {
|
||||
if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
|
||||
if (WARN(!test_bit(num, bmap),
|
||||
"Didn't reset __vcpu_sys_reg(%zi)\n", num))
|
||||
break;
|
||||
}
|
||||
|
@ -14,9 +14,7 @@
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
|
||||
return pgprot_writecombine(prot);
|
||||
return prot;
|
||||
return pgprot_writecombine(prot);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
|
@ -150,16 +150,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kvm_arch_has_vcpu_debugfs(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_mips_free_vcpus(struct kvm *kvm)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -18,4 +18,4 @@ obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
|
||||
# other very old or stripped-down PA-RISC CPUs -- not currently supported
|
||||
|
||||
obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o
|
||||
CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough=3
|
||||
CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough
|
||||
|
@ -121,7 +121,6 @@ config PPC
|
||||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_DMA_MMAP_PGPROT
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
@ -107,22 +107,22 @@ extern void _set_L3CR(unsigned long);
|
||||
|
||||
static inline void dcbz(void *addr)
|
||||
{
|
||||
__asm__ __volatile__ ("dcbz %y0" : : "Z"(*(u8 *)addr) : "memory");
|
||||
__asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
|
||||
}
|
||||
|
||||
static inline void dcbi(void *addr)
|
||||
{
|
||||
__asm__ __volatile__ ("dcbi %y0" : : "Z"(*(u8 *)addr) : "memory");
|
||||
__asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
|
||||
}
|
||||
|
||||
static inline void dcbf(void *addr)
|
||||
{
|
||||
__asm__ __volatile__ ("dcbf %y0" : : "Z"(*(u8 *)addr) : "memory");
|
||||
__asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
|
||||
}
|
||||
|
||||
static inline void dcbst(void *addr)
|
||||
{
|
||||
__asm__ __volatile__ ("dcbst %y0" : : "Z"(*(u8 *)addr) : "memory");
|
||||
__asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
|
||||
}
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
|
||||
signal.o sysfs.o cacheinfo.o time.o \
|
||||
prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o misc_$(BITS).o \
|
||||
of_platform.o prom_parse.o \
|
||||
dma-common.o
|
||||
of_platform.o prom_parse.o
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
paca.o nvram_64.o firmware.o
|
||||
|
@ -1,17 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Contains common dma routines for all powerpc platforms.
|
||||
*
|
||||
* Copyright (C) 2019 Shawn Anastasio.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
return pgprot_noncached(prot);
|
||||
return prot;
|
||||
}
|
@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||
return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
|
||||
}
|
||||
|
||||
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_arch_vcpu_runnable(vcpu);
|
||||
}
|
||||
|
||||
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
@ -452,16 +457,6 @@ err_out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bool kvm_arch_has_vcpu_debugfs(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -54,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
|
||||
CONFIG_HVC_RISCV_SBI=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
CONFIG_SPI=y
|
||||
CONFIG_SPI_SIFIVE=y
|
||||
# CONFIG_PTP_1588_CLOCK is not set
|
||||
|
@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
|
||||
CONFIG_PCI_HOST_GENERIC=y
|
||||
CONFIG_PCIE_XILINX=y
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
|
||||
CONFIG_HVC_RISCV_SBI=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
# CONFIG_PTP_1588_CLOCK is not set
|
||||
CONFIG_DRM=y
|
||||
CONFIG_DRM_RADEON=y
|
||||
|
@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
|
||||
|
||||
static inline void __fstate_clean(struct pt_regs *regs)
|
||||
{
|
||||
regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
|
||||
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
|
||||
}
|
||||
|
||||
static inline void fstate_off(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
|
||||
}
|
||||
|
||||
static inline void fstate_save(struct task_struct *task,
|
||||
|
@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
|
||||
}
|
||||
|
||||
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
|
||||
#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
|
||||
|
||||
#define flush_tlb_range(vma, start, end) \
|
||||
remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
|
||||
#define flush_tlb_mm(mm) \
|
||||
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long addr)
|
||||
{
|
||||
flush_tlb_range(vma, addr, addr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
#define flush_tlb_mm(mm) \
|
||||
remote_sfence_vma(mm_cpumask(mm), 0, -1)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
{
|
||||
regs->sstatus = SR_SPIE;
|
||||
if (has_fpu)
|
||||
if (has_fpu) {
|
||||
regs->sstatus |= SR_FS_INITIAL;
|
||||
/*
|
||||
* Restore the initial value to the FP register
|
||||
* before starting the user program.
|
||||
*/
|
||||
fstate_restore(current, regs);
|
||||
}
|
||||
regs->sepc = pc;
|
||||
regs->sp = sp;
|
||||
set_fs(USER_DS);
|
||||
@ -75,10 +81,11 @@ void flush_thread(void)
|
||||
{
|
||||
#ifdef CONFIG_FPU
|
||||
/*
|
||||
* Reset FPU context
|
||||
* Reset FPU state and context
|
||||
* frm: round to nearest, ties to even (IEEE default)
|
||||
* fflags: accrued exceptions cleared
|
||||
*/
|
||||
fstate_off(current, task_pt_regs(current));
|
||||
memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
|
||||
#endif
|
||||
}
|
||||
|
@ -5,5 +5,3 @@ lib-y += memset.o
|
||||
lib-y += uaccess.o
|
||||
|
||||
lib-$(CONFIG_64BIT) += tishift.o
|
||||
|
||||
lib-$(CONFIG_32BIT) += udivdi3.o
|
||||
|
@ -81,9 +81,13 @@ EXPORT_SYMBOL(__delay);
|
||||
void udelay(unsigned long usecs)
|
||||
{
|
||||
u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT;
|
||||
u64 n;
|
||||
|
||||
if (unlikely(usecs > MAX_UDELAY_US)) {
|
||||
__delay((u64)usecs * riscv_timebase / 1000000ULL);
|
||||
n = (u64)usecs * riscv_timebase;
|
||||
do_div(n, 1000000);
|
||||
|
||||
__delay(n);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1,32 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2016-2017 Free Software Foundation, Inc.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
ENTRY(__udivdi3)
|
||||
mv a2, a1
|
||||
mv a1, a0
|
||||
li a0, -1
|
||||
beqz a2, .L5
|
||||
li a3, 1
|
||||
bgeu a2, a1, .L2
|
||||
.L1:
|
||||
blez a2, .L2
|
||||
slli a2, a2, 1
|
||||
slli a3, a3, 1
|
||||
bgtu a1, a2, .L1
|
||||
.L2:
|
||||
li a0, 0
|
||||
.L3:
|
||||
bltu a1, a2, .L4
|
||||
sub a1, a1, a2
|
||||
or a0, a0, a3
|
||||
.L4:
|
||||
srli a3, a3, 1
|
||||
srli a2, a2, 1
|
||||
bnez a3, .L3
|
||||
.L5:
|
||||
ret
|
||||
ENDPROC(__udivdi3)
|
@ -48,9 +48,7 @@ void store_ipl_parmblock(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
uv_set_shared(__pa(&ipl_block));
|
||||
rc = __diag308(DIAG308_STORE, &ipl_block);
|
||||
uv_remove_shared(__pa(&ipl_block));
|
||||
if (rc == DIAG308_RC_OK &&
|
||||
ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
|
||||
ipl_block_valid = 1;
|
||||
|
@ -114,12 +114,8 @@ recursion_check:
|
||||
* If it comes up a second time then there's something wrong going on:
|
||||
* just break out and report an unknown stack type.
|
||||
*/
|
||||
if (*visit_mask & (1UL << info->type)) {
|
||||
printk_deferred_once(KERN_WARNING
|
||||
"WARNING: stack recursion on stack type %d\n",
|
||||
info->type);
|
||||
if (*visit_mask & (1UL << info->type))
|
||||
goto unknown;
|
||||
}
|
||||
*visit_mask |= 1UL << info->type;
|
||||
return 0;
|
||||
unknown:
|
||||
|
@ -60,12 +60,5 @@ ENTRY(startup_continue)
|
||||
|
||||
.align 16
|
||||
.LPG1:
|
||||
.Lpcmsk:.quad 0x0000000180000000
|
||||
.L4malign:.quad 0xffffffffffc00000
|
||||
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
|
||||
.Lnop: .long 0x07000700
|
||||
.Lparmaddr:
|
||||
.quad PARMAREA
|
||||
.align 64
|
||||
.Ldw: .quad 0x0002000180000000,0x0000000000000000
|
||||
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/boot_data.h>
|
||||
#include <asm/uv.h>
|
||||
#include "entry.h"
|
||||
|
||||
#define IPL_PARM_BLOCK_VERSION 0
|
||||
@ -892,21 +891,15 @@ static void __reipl_run(void *unused)
|
||||
{
|
||||
switch (reipl_type) {
|
||||
case IPL_TYPE_CCW:
|
||||
uv_set_shared(__pa(reipl_block_ccw));
|
||||
diag308(DIAG308_SET, reipl_block_ccw);
|
||||
uv_remove_shared(__pa(reipl_block_ccw));
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
break;
|
||||
case IPL_TYPE_FCP:
|
||||
uv_set_shared(__pa(reipl_block_fcp));
|
||||
diag308(DIAG308_SET, reipl_block_fcp);
|
||||
uv_remove_shared(__pa(reipl_block_fcp));
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
break;
|
||||
case IPL_TYPE_NSS:
|
||||
uv_set_shared(__pa(reipl_block_nss));
|
||||
diag308(DIAG308_SET, reipl_block_nss);
|
||||
uv_remove_shared(__pa(reipl_block_nss));
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
break;
|
||||
case IPL_TYPE_UNKNOWN:
|
||||
@ -1176,9 +1169,7 @@ static struct kset *dump_kset;
|
||||
|
||||
static void diag308_dump(void *dump_block)
|
||||
{
|
||||
uv_set_shared(__pa(dump_block));
|
||||
diag308(DIAG308_SET, dump_block);
|
||||
uv_remove_shared(__pa(dump_block));
|
||||
while (1) {
|
||||
if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
|
||||
break;
|
||||
|
@ -1114,8 +1114,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
ROOT_DEV = Root_RAM0;
|
||||
|
||||
/* Is init_mm really needed? */
|
||||
init_mm.start_code = PAGE_OFFSET;
|
||||
init_mm.start_code = (unsigned long) _text;
|
||||
init_mm.end_code = (unsigned long) _etext;
|
||||
init_mm.end_data = (unsigned long) _edata;
|
||||
init_mm.brk = (unsigned long) _end;
|
||||
|
@ -216,11 +216,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||
|
||||
if (!vdso_enabled)
|
||||
return 0;
|
||||
/*
|
||||
* Only map the vdso for dynamically linked elf binaries.
|
||||
*/
|
||||
if (!uses_interp)
|
||||
return 0;
|
||||
|
||||
vdso_pages = vdso64_pages;
|
||||
#ifdef CONFIG_COMPAT_VDSO
|
||||
|
@ -32,10 +32,9 @@ PHDRS {
|
||||
SECTIONS
|
||||
{
|
||||
. = 0x100000;
|
||||
_stext = .; /* Start of text section */
|
||||
.text : {
|
||||
/* Text and read-only data */
|
||||
_text = .;
|
||||
_stext = .; /* Start of text section */
|
||||
_text = .; /* Text and read-only data */
|
||||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
@ -47,11 +46,10 @@ SECTIONS
|
||||
*(.text.*_indirect_*)
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_etext = .; /* End of text section */
|
||||
} :text = 0x0700
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_etext = .; /* End of text section */
|
||||
|
||||
NOTES :text :note
|
||||
|
||||
.dummy : { *(.dummy) } :data
|
||||
|
@ -2516,16 +2516,6 @@ out_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool kvm_arch_has_vcpu_debugfs(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
|
||||
|
@ -161,9 +161,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
|
||||
st->current_address = addr;
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd)) {
|
||||
if (pmd_large(*pmd)) {
|
||||
prot = pmd_val(*pmd) &
|
||||
@ -192,9 +192,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
|
||||
pud = pud_offset(p4d, addr);
|
||||
for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
|
||||
st->current_address = addr;
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (!pud_none(*pud))
|
||||
if (pud_large(*pud)) {
|
||||
prot = pud_val(*pud) &
|
||||
@ -222,9 +222,9 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
|
||||
st->current_address = addr;
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_none(*p4d))
|
||||
walk_pud_level(m, st, p4d, addr);
|
||||
else
|
||||
|
@ -11,8 +11,7 @@ chkbss: $(addprefix $(obj)/, $(chkbss-files))
|
||||
|
||||
quiet_cmd_chkbss = CHKBSS $<
|
||||
cmd_chkbss = \
|
||||
if $(OBJDUMP) -h $< | grep -q "\.bss" && \
|
||||
! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
|
||||
if ! $(OBJSIZE) --common $< | $(AWK) 'END { if ($$3) exit 1 }'; then \
|
||||
echo "error: $< .bss section is not empty" >&2; exit 1; \
|
||||
fi; \
|
||||
touch $@;
|
||||
|
@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn)
|
||||
printk("dbr");
|
||||
break;
|
||||
case FD_REG_N:
|
||||
if (0)
|
||||
goto d_reg_n;
|
||||
case F_REG_N:
|
||||
printk("fr%d", rn);
|
||||
break;
|
||||
@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
|
||||
printk("xd%d", rn & ~1);
|
||||
break;
|
||||
}
|
||||
d_reg_n:
|
||||
/* else, fall through */
|
||||
case D_REG_N:
|
||||
printk("dr%d", rn);
|
||||
break;
|
||||
@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
|
||||
printk("xd%d", rm & ~1);
|
||||
break;
|
||||
}
|
||||
/* else, fall through */
|
||||
case D_REG_M:
|
||||
printk("dr%d", rm);
|
||||
break;
|
||||
|
@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
|
||||
switch (sh_type) {
|
||||
case SH_BREAKPOINT_READ:
|
||||
*gen_type = HW_BREAKPOINT_R;
|
||||
break;
|
||||
case SH_BREAKPOINT_WRITE:
|
||||
*gen_type = HW_BREAKPOINT_W;
|
||||
break;
|
||||
|
@ -37,6 +37,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
|
||||
return diff;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clang may lower `memcmp == 0` to `bcmp == 0`.
|
||||
*/
|
||||
int bcmp(const void *s1, const void *s2, size_t len)
|
||||
{
|
||||
return memcmp(s1, s2, len);
|
||||
}
|
||||
|
||||
int strcmp(const char *str1, const char *str2)
|
||||
{
|
||||
const unsigned char *s1 = (const unsigned char *)str1;
|
||||
|
@ -18,6 +18,20 @@
|
||||
* Note: efi_info is commonly left uninitialized, but that field has a
|
||||
* private magic, so it is better to leave it unchanged.
|
||||
*/
|
||||
|
||||
#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
|
||||
|
||||
#define BOOT_PARAM_PRESERVE(struct_member) \
|
||||
{ \
|
||||
.start = offsetof(struct boot_params, struct_member), \
|
||||
.len = sizeof_mbr(struct boot_params, struct_member), \
|
||||
}
|
||||
|
||||
struct boot_params_to_save {
|
||||
unsigned int start;
|
||||
unsigned int len;
|
||||
};
|
||||
|
||||
static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
{
|
||||
/*
|
||||
@ -35,21 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
* problems again.
|
||||
*/
|
||||
if (boot_params->sentinel) {
|
||||
/* fields in boot_params are left uninitialized, clear them */
|
||||
boot_params->acpi_rsdp_addr = 0;
|
||||
memset(&boot_params->ext_ramdisk_image, 0,
|
||||
(char *)&boot_params->efi_info -
|
||||
(char *)&boot_params->ext_ramdisk_image);
|
||||
memset(&boot_params->kbd_status, 0,
|
||||
(char *)&boot_params->hdr -
|
||||
(char *)&boot_params->kbd_status);
|
||||
memset(&boot_params->_pad7[0], 0,
|
||||
(char *)&boot_params->edd_mbr_sig_buffer[0] -
|
||||
(char *)&boot_params->_pad7[0]);
|
||||
memset(&boot_params->_pad8[0], 0,
|
||||
(char *)&boot_params->eddbuf[0] -
|
||||
(char *)&boot_params->_pad8[0]);
|
||||
memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
|
||||
static struct boot_params scratch;
|
||||
char *bp_base = (char *)boot_params;
|
||||
char *save_base = (char *)&scratch;
|
||||
int i;
|
||||
|
||||
const struct boot_params_to_save to_save[] = {
|
||||
BOOT_PARAM_PRESERVE(screen_info),
|
||||
BOOT_PARAM_PRESERVE(apm_bios_info),
|
||||
BOOT_PARAM_PRESERVE(tboot_addr),
|
||||
BOOT_PARAM_PRESERVE(ist_info),
|
||||
BOOT_PARAM_PRESERVE(acpi_rsdp_addr),
|
||||
BOOT_PARAM_PRESERVE(hd0_info),
|
||||
BOOT_PARAM_PRESERVE(hd1_info),
|
||||
BOOT_PARAM_PRESERVE(sys_desc_table),
|
||||
BOOT_PARAM_PRESERVE(olpc_ofw_header),
|
||||
BOOT_PARAM_PRESERVE(efi_info),
|
||||
BOOT_PARAM_PRESERVE(alt_mem_k),
|
||||
BOOT_PARAM_PRESERVE(scratch),
|
||||
BOOT_PARAM_PRESERVE(e820_entries),
|
||||
BOOT_PARAM_PRESERVE(eddbuf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
|
||||
BOOT_PARAM_PRESERVE(e820_table),
|
||||
BOOT_PARAM_PRESERVE(eddbuf),
|
||||
};
|
||||
|
||||
memset(&scratch, 0, sizeof(scratch));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(to_save); i++) {
|
||||
memcpy(save_base + to_save[i].start,
|
||||
bp_base + to_save[i].start, to_save[i].len);
|
||||
}
|
||||
|
||||
memcpy(boot_params, save_base, sizeof(*boot_params));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,8 @@
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
|
||||
|
||||
#define KVM_MAX_VCPUS 288
|
||||
#define KVM_SOFT_MAX_VCPUS 240
|
||||
#define KVM_MAX_VCPU_ID 1023
|
||||
@ -1175,6 +1177,7 @@ struct kvm_x86_ops {
|
||||
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
|
||||
uint32_t guest_irq, bool set);
|
||||
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
|
||||
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
|
||||
bool *expired);
|
||||
|
@ -184,7 +184,8 @@ void __init default_setup_apic_routing(void)
|
||||
def_to_bigsmp = 0;
|
||||
break;
|
||||
}
|
||||
/* If P4 and above fall through */
|
||||
/* P4 and above */
|
||||
/* fall through */
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
def_to_bigsmp = 1;
|
||||
|
@ -98,6 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
|
||||
case 7:
|
||||
if (size < 0x40)
|
||||
break;
|
||||
/* Else, fall through */
|
||||
case 6:
|
||||
case 5:
|
||||
case 4:
|
||||
|
@ -17,6 +17,12 @@
|
||||
*/
|
||||
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
|
||||
|
||||
/*
|
||||
* Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
|
||||
* hardware or BIOS before kernel boot.
|
||||
*/
|
||||
static u32 orig_umwait_control_cached __ro_after_init;
|
||||
|
||||
/*
|
||||
* Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
|
||||
* the sysfs write functions.
|
||||
@ -52,6 +58,23 @@ static int umwait_cpu_online(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The CPU hotplug callback sets the control MSR to the original control
|
||||
* value.
|
||||
*/
|
||||
static int umwait_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* This code is protected by the CPU hotplug already and
|
||||
* orig_umwait_control_cached is never changed after it caches
|
||||
* the original control MSR value in umwait_init(). So there
|
||||
* is no race condition here.
|
||||
*/
|
||||
wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
|
||||
* is the only active CPU at this time. The MSR is set up on the APs via the
|
||||
@ -185,8 +208,22 @@ static int __init umwait_init(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_WAITPKG))
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Cache the original control MSR value before the control MSR is
|
||||
* changed. This is the only place where orig_umwait_control_cached
|
||||
* is modified.
|
||||
*/
|
||||
rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
|
||||
umwait_cpu_online, NULL);
|
||||
umwait_cpu_online, umwait_cpu_offline);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* On failure, the control MSR on all CPUs has the
|
||||
* original control value.
|
||||
*/
|
||||
return ret;
|
||||
}
|
||||
|
||||
register_syscore_ops(&umwait_syscore_ops);
|
||||
|
||||
|
@ -308,9 +308,6 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
|
||||
|
||||
static void kvm_guest_cpu_init(void)
|
||||
{
|
||||
if (!kvm_para_available())
|
||||
return;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
|
||||
u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
|
||||
|
||||
@ -625,9 +622,6 @@ static void __init kvm_guest_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!kvm_para_available())
|
||||
return;
|
||||
|
||||
paravirt_ops_setup();
|
||||
register_reboot_notifier(&kvm_pv_reboot_nb);
|
||||
for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
|
||||
@ -848,8 +842,6 @@ asm(
|
||||
*/
|
||||
void __init kvm_spinlock_init(void)
|
||||
{
|
||||
if (!kvm_para_available())
|
||||
return;
|
||||
/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
|
||||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return;
|
||||
|
@ -201,6 +201,7 @@ static int set_segment_reg(struct task_struct *task,
|
||||
case offsetof(struct user_regs_struct, ss):
|
||||
if (unlikely(value == 0))
|
||||
return -EIO;
|
||||
/* Else, fall through */
|
||||
|
||||
default:
|
||||
*pt_regs_access(task_pt_regs(task), offset) = value;
|
||||
|
@ -8,11 +8,6 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include "lapic.h"
|
||||
|
||||
bool kvm_arch_has_vcpu_debugfs(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static int vcpu_get_timer_advance_ns(void *data, u64 *val)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
|
||||
@ -48,37 +43,22 @@ static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val)
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n");
|
||||
|
||||
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
|
||||
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct dentry *ret;
|
||||
debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu,
|
||||
&vcpu_tsc_offset_fops);
|
||||
|
||||
ret = debugfs_create_file("tsc-offset", 0444,
|
||||
vcpu->debugfs_dentry,
|
||||
vcpu, &vcpu_tsc_offset_fops);
|
||||
if (!ret)
|
||||
return -ENOMEM;
|
||||
|
||||
if (lapic_in_kernel(vcpu)) {
|
||||
ret = debugfs_create_file("lapic_timer_advance_ns", 0444,
|
||||
vcpu->debugfs_dentry,
|
||||
vcpu, &vcpu_timer_advance_ns_fops);
|
||||
if (!ret)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (lapic_in_kernel(vcpu))
|
||||
debugfs_create_file("lapic_timer_advance_ns", 0444,
|
||||
vcpu->debugfs_dentry, vcpu,
|
||||
&vcpu_timer_advance_ns_fops);
|
||||
|
||||
if (kvm_has_tsc_control) {
|
||||
ret = debugfs_create_file("tsc-scaling-ratio", 0444,
|
||||
vcpu->debugfs_dentry,
|
||||
vcpu, &vcpu_tsc_scaling_fops);
|
||||
if (!ret)
|
||||
return -ENOMEM;
|
||||
ret = debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
|
||||
vcpu->debugfs_dentry,
|
||||
vcpu, &vcpu_tsc_scaling_frac_fops);
|
||||
if (!ret)
|
||||
return -ENOMEM;
|
||||
|
||||
debugfs_create_file("tsc-scaling-ratio", 0444,
|
||||
vcpu->debugfs_dentry, vcpu,
|
||||
&vcpu_tsc_scaling_fops);
|
||||
debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
|
||||
vcpu->debugfs_dentry, vcpu,
|
||||
&vcpu_tsc_scaling_frac_fops);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1548,7 +1548,6 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
|
||||
static void apic_timer_expired(struct kvm_lapic *apic)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
struct swait_queue_head *q = &vcpu->wq;
|
||||
struct kvm_timer *ktimer = &apic->lapic_timer;
|
||||
|
||||
if (atomic_read(&apic->lapic_timer.pending))
|
||||
@ -1566,13 +1565,6 @@ static void apic_timer_expired(struct kvm_lapic *apic)
|
||||
|
||||
atomic_inc(&apic->lapic_timer.pending);
|
||||
kvm_set_pending_timer(vcpu);
|
||||
|
||||
/*
|
||||
* For x86, the atomic_inc() is serialized, thus
|
||||
* using swait_active() is safe.
|
||||
*/
|
||||
if (swait_active(q))
|
||||
swake_up_one(q);
|
||||
}
|
||||
|
||||
static void start_sw_tscdeadline(struct kvm_lapic *apic)
|
||||
|
@ -5190,6 +5190,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
}
|
||||
|
||||
static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -7314,6 +7319,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
||||
|
||||
.pmu_ops = &amd_pmu_ops,
|
||||
.deliver_posted_interrupt = svm_deliver_avic_intr,
|
||||
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
|
||||
.update_pi_irte = svm_update_pi_irte,
|
||||
.setup_mce = svm_setup_mce,
|
||||
|
||||
|
@ -6117,6 +6117,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||
return max_irr;
|
||||
}
|
||||
|
||||
static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return pi_test_on(vcpu_to_pi_desc(vcpu));
|
||||
}
|
||||
|
||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||
{
|
||||
if (!kvm_vcpu_apicv_active(vcpu))
|
||||
@ -7726,6 +7731,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
||||
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
|
||||
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
||||
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
|
||||
.dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
|
||||
|
||||
.set_tss_addr = vmx_set_tss_addr,
|
||||
.set_identity_map_addr = vmx_set_identity_map_addr,
|
||||
|
@ -9698,6 +9698,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
|
||||
}
|
||||
|
||||
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
|
||||
return true;
|
||||
|
||||
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
|
||||
kvm_test_request(KVM_REQ_SMI, vcpu) ||
|
||||
kvm_test_request(KVM_REQ_EVENT, vcpu))
|
||||
return true;
|
||||
|
||||
if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.preempted_in_kernel;
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/types.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
unsigned int x86_family(unsigned int sig)
|
||||
{
|
||||
|
@ -178,13 +178,15 @@ void FPU_printall(void)
|
||||
for (i = 0; i < 8; i++) {
|
||||
FPU_REG *r = &st(i);
|
||||
u_char tagi = FPU_gettagi(i);
|
||||
|
||||
switch (tagi) {
|
||||
case TAG_Empty:
|
||||
continue;
|
||||
break;
|
||||
case TAG_Zero:
|
||||
case TAG_Special:
|
||||
/* Update tagi for the printk below */
|
||||
tagi = FPU_Special(r);
|
||||
/* fall through */
|
||||
case TAG_Valid:
|
||||
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
|
||||
getsign(r) ? '-' : '+',
|
||||
@ -198,7 +200,6 @@ void FPU_printall(void)
|
||||
printk("Whoops! Error in errors.c: tag%d is %d ", i,
|
||||
tagi);
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
printk("%s\n", tag_desc[(int)(unsigned)tagi]);
|
||||
}
|
||||
|
@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
|
||||
case TW_Denormal:
|
||||
if (denormal_operand() < 0)
|
||||
return;
|
||||
|
||||
/* fall through */
|
||||
case TAG_Zero:
|
||||
case TAG_Valid:
|
||||
setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
|
||||
|
@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
|
||||
emit_prologue(&prog, bpf_prog->aux->stack_depth,
|
||||
bpf_prog_was_classic(bpf_prog));
|
||||
addrs[0] = prog - temp;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
for (i = 1; i <= insn_cnt; i++, insn++) {
|
||||
const s32 imm32 = insn->imm;
|
||||
u32 dst_reg = insn->dst_reg;
|
||||
u32 src_reg = insn->src_reg;
|
||||
@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
extra_pass = true;
|
||||
goto skip_init_addrs;
|
||||
}
|
||||
addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
|
||||
addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
|
||||
if (!addrs) {
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
* Before first pass, make a rough estimation of addrs[]
|
||||
* each BPF instruction is translated to less than 64 bytes
|
||||
*/
|
||||
for (proglen = 0, i = 0; i < prog->len; i++) {
|
||||
for (proglen = 0, i = 0; i <= prog->len; i++) {
|
||||
proglen += 64;
|
||||
addrs[i] = proglen;
|
||||
}
|
||||
@ -1180,7 +1181,7 @@ out_image:
|
||||
|
||||
if (!image || !prog->is_func || extra_pass) {
|
||||
if (image)
|
||||
bpf_prog_fill_jited_linfo(prog, addrs);
|
||||
bpf_prog_fill_jited_linfo(prog, addrs + 1);
|
||||
out_addrs:
|
||||
kfree(addrs);
|
||||
kfree(jit_data);
|
||||
|
@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
|
||||
targets += $(purgatory-y)
|
||||
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
|
||||
|
||||
$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
@ -17,11 +20,34 @@ KCOV_INSTRUMENT := n
|
||||
|
||||
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
|
||||
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
|
||||
# sure how to relocate those. Like kexec-tools, use custom flags.
|
||||
# sure how to relocate those.
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
|
||||
KBUILD_CFLAGS += -m$(BITS)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
ifdef CONFIG_STACKPROTECTOR
|
||||
CFLAGS_REMOVE_sha256.o += -fstack-protector
|
||||
CFLAGS_REMOVE_purgatory.o += -fstack-protector
|
||||
CFLAGS_REMOVE_string.o += -fstack-protector
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
|
||||
endif
|
||||
|
||||
ifdef CONFIG_STACKPROTECTOR_STRONG
|
||||
CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
|
||||
CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
|
||||
CFLAGS_REMOVE_string.o += -fstack-protector-strong
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
|
||||
endif
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
|
||||
CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
|
||||
CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
|
||||
CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
|
||||
endif
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
@ -68,3 +68,9 @@ void purgatory(void)
|
||||
}
|
||||
copy_backup_region();
|
||||
}
|
||||
|
||||
/*
|
||||
* Defined in order to reuse memcpy() and memset() from
|
||||
* arch/x86/boot/compressed/string.c
|
||||
*/
|
||||
void warn(const char *msg) {}
|
||||
|
@ -1,23 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Simple string functions.
|
||||
*
|
||||
* Copyright (C) 2014 Red Hat Inc.
|
||||
*
|
||||
* Author:
|
||||
* Vivek Goyal <vgoyal@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "../boot/string.c"
|
||||
|
||||
void *memcpy(void *dst, const void *src, size_t len)
|
||||
{
|
||||
return __builtin_memcpy(dst, src, len);
|
||||
}
|
||||
|
||||
void *memset(void *dst, int c, size_t len)
|
||||
{
|
||||
return __builtin_memset(dst, c, len);
|
||||
}
|
@ -511,6 +511,7 @@ void cpu_reset(void)
|
||||
"add %2, %2, %7\n\t"
|
||||
"addi %0, %0, -1\n\t"
|
||||
"bnez %0, 1b\n\t"
|
||||
"isync\n\t"
|
||||
/* Jump to identity mapping */
|
||||
"jx %3\n"
|
||||
"2:\n\t"
|
||||
|
@ -1924,12 +1924,13 @@ static void bfq_add_request(struct request *rq)
|
||||
* confirmed no later than during the next
|
||||
* I/O-plugging interval for bfqq.
|
||||
*/
|
||||
if (!bfq_bfqq_has_short_ttime(bfqq) &&
|
||||
if (bfqd->last_completed_rq_bfqq &&
|
||||
!bfq_bfqq_has_short_ttime(bfqq) &&
|
||||
ktime_get_ns() - bfqd->last_completion <
|
||||
200 * NSEC_PER_USEC) {
|
||||
if (bfqd->last_completed_rq_bfqq != bfqq &&
|
||||
bfqd->last_completed_rq_bfqq !=
|
||||
bfqq->waker_bfqq) {
|
||||
bfqd->last_completed_rq_bfqq !=
|
||||
bfqq->waker_bfqq) {
|
||||
/*
|
||||
* First synchronization detected with
|
||||
* a candidate waker queue, or with a
|
||||
@ -2250,9 +2251,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
|
||||
blk_rq_pos(container_of(rb_prev(&req->rb_node),
|
||||
struct request, rb_node))) {
|
||||
struct bfq_queue *bfqq = bfq_init_rq(req);
|
||||
struct bfq_data *bfqd = bfqq->bfqd;
|
||||
struct bfq_data *bfqd;
|
||||
struct request *prev, *next_rq;
|
||||
|
||||
if (!bfqq)
|
||||
return;
|
||||
|
||||
bfqd = bfqq->bfqd;
|
||||
|
||||
/* Reposition request in its sort_list */
|
||||
elv_rb_del(&bfqq->sort_list, req);
|
||||
elv_rb_add(&bfqq->sort_list, req);
|
||||
@ -2299,6 +2305,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
|
||||
struct bfq_queue *bfqq = bfq_init_rq(rq),
|
||||
*next_bfqq = bfq_init_rq(next);
|
||||
|
||||
if (!bfqq)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If next and rq belong to the same bfq_queue and next is older
|
||||
* than rq, then reposition rq in the fifo (by substituting next
|
||||
@ -4764,6 +4773,8 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
||||
*/
|
||||
void bfq_put_queue(struct bfq_queue *bfqq)
|
||||
{
|
||||
struct bfq_queue *item;
|
||||
struct hlist_node *n;
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
struct bfq_group *bfqg = bfqq_group(bfqq);
|
||||
#endif
|
||||
@ -4808,6 +4819,36 @@ void bfq_put_queue(struct bfq_queue *bfqq)
|
||||
bfqq->bfqd->burst_size--;
|
||||
}
|
||||
|
||||
/*
|
||||
* bfqq does not exist any longer, so it cannot be woken by
|
||||
* any other queue, and cannot wake any other queue. Then bfqq
|
||||
* must be removed from the woken list of its possible waker
|
||||
* queue, and all queues in the woken list of bfqq must stop
|
||||
* having a waker queue. Strictly speaking, these updates
|
||||
* should be performed when bfqq remains with no I/O source
|
||||
* attached to it, which happens before bfqq gets freed. In
|
||||
* particular, this happens when the last process associated
|
||||
* with bfqq exits or gets associated with a different
|
||||
* queue. However, both events lead to bfqq being freed soon,
|
||||
* and dangling references would come out only after bfqq gets
|
||||
* freed. So these updates are done here, as a simple and safe
|
||||
* way to handle all cases.
|
||||
*/
|
||||
/* remove bfqq from woken list */
|
||||
if (!hlist_unhashed(&bfqq->woken_list_node))
|
||||
hlist_del_init(&bfqq->woken_list_node);
|
||||
|
||||
/* reset waker for all queues in woken list */
|
||||
hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
|
||||
woken_list_node) {
|
||||
item->waker_bfqq = NULL;
|
||||
bfq_clear_bfqq_has_waker(item);
|
||||
hlist_del_init(&item->woken_list_node);
|
||||
}
|
||||
|
||||
if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
|
||||
bfqq->bfqd->last_completed_rq_bfqq = NULL;
|
||||
|
||||
kmem_cache_free(bfq_pool, bfqq);
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
bfqg_and_blkg_put(bfqg);
|
||||
@ -4835,9 +4876,6 @@ static void bfq_put_cooperator(struct bfq_queue *bfqq)
|
||||
|
||||
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
{
|
||||
struct bfq_queue *item;
|
||||
struct hlist_node *n;
|
||||
|
||||
if (bfqq == bfqd->in_service_queue) {
|
||||
__bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
|
||||
bfq_schedule_dispatch(bfqd);
|
||||
@ -4847,18 +4885,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
|
||||
bfq_put_cooperator(bfqq);
|
||||
|
||||
/* remove bfqq from woken list */
|
||||
if (!hlist_unhashed(&bfqq->woken_list_node))
|
||||
hlist_del_init(&bfqq->woken_list_node);
|
||||
|
||||
/* reset waker for all queues in woken list */
|
||||
hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
|
||||
woken_list_node) {
|
||||
item->waker_bfqq = NULL;
|
||||
bfq_clear_bfqq_has_waker(item);
|
||||
hlist_del_init(&item->woken_list_node);
|
||||
}
|
||||
|
||||
bfq_put_queue(bfqq); /* release process reference */
|
||||
}
|
||||
|
||||
@ -5436,12 +5462,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
bfqq = bfq_init_rq(rq);
|
||||
if (at_head || blk_rq_is_passthrough(rq)) {
|
||||
if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &bfqd->dispatch);
|
||||
else
|
||||
list_add_tail(&rq->queuelist, &bfqd->dispatch);
|
||||
} else { /* bfqq is assumed to be non null here */
|
||||
} else {
|
||||
idle_timer_disabled = __bfq_insert_request(bfqd, rq);
|
||||
/*
|
||||
* Update bfqq, because, if a queue merge has occurred
|
||||
|
@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
rq = blk_mq_get_request(q, bio, &data);
|
||||
if (unlikely(!rq)) {
|
||||
rq_qos_cleanup(q, bio);
|
||||
|
||||
cookie = BLK_QC_T_NONE;
|
||||
if (bio->bi_opf & REQ_NOWAIT_INLINE)
|
||||
cookie = BLK_QC_T_EAGAIN;
|
||||
else if (bio->bi_opf & REQ_NOWAIT)
|
||||
if (bio->bi_opf & REQ_NOWAIT)
|
||||
bio_wouldblock_error(bio);
|
||||
return cookie;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
trace_block_getrq(q, bio, bio->bi_opf);
|
||||
@ -2666,8 +2662,6 @@ void blk_mq_release(struct request_queue *q)
|
||||
struct blk_mq_hw_ctx *hctx, *next;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&q->requeue_work);
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
|
||||
|
||||
|
@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work)
|
||||
|
||||
blk_free_queue_stats(q->stats);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
cancel_delayed_work_sync(&q->requeue_work);
|
||||
|
||||
blk_exit_queue(q);
|
||||
|
||||
blk_queue_free_zone_bitmaps(q);
|
||||
|
@ -1786,6 +1786,21 @@ nothing_to_do:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
|
||||
{
|
||||
struct request *rq = scmd->request;
|
||||
u32 req_blocks;
|
||||
|
||||
if (!blk_rq_is_passthrough(rq))
|
||||
return true;
|
||||
|
||||
req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
|
||||
if (n_blocks > req_blocks)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
|
||||
* @qc: Storage for translated ATA taskfile
|
||||
@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
|
||||
scsi_10_lba_len(cdb, &block, &n_block);
|
||||
if (cdb[1] & (1 << 3))
|
||||
tf_flags |= ATA_TFLAG_FUA;
|
||||
if (!ata_check_nblocks(scmd, n_block))
|
||||
goto invalid_fld;
|
||||
break;
|
||||
case READ_6:
|
||||
case WRITE_6:
|
||||
@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
|
||||
*/
|
||||
if (!n_block)
|
||||
n_block = 256;
|
||||
if (!ata_check_nblocks(scmd, n_block))
|
||||
goto invalid_fld;
|
||||
break;
|
||||
case READ_16:
|
||||
case WRITE_16:
|
||||
@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
|
||||
scsi_16_lba_len(cdb, &block, &n_block);
|
||||
if (cdb[1] & (1 << 3))
|
||||
tf_flags |= ATA_TFLAG_FUA;
|
||||
if (!ata_check_nblocks(scmd, n_block))
|
||||
goto invalid_fld;
|
||||
break;
|
||||
default:
|
||||
DPRINTK("no-byte command\n");
|
||||
|
@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||
unsigned int offset;
|
||||
unsigned char *buf;
|
||||
|
||||
if (!qc->cursg) {
|
||||
qc->curbytes = qc->nbytes;
|
||||
return;
|
||||
}
|
||||
if (qc->curbytes == qc->nbytes - qc->sect_size)
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
|
||||
@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
||||
|
||||
if (qc->cursg_ofs == qc->cursg->length) {
|
||||
qc->cursg = sg_next(qc->cursg);
|
||||
if (!qc->cursg)
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
qc->cursg_ofs = 0;
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
|
||||
static int rb532_pata_driver_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ata_host *ah = platform_get_drvdata(pdev);
|
||||
struct rb532_cf_info *info = ah->private_data;
|
||||
|
||||
ata_host_detach(ah);
|
||||
|
||||
|
@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
|
||||
choice
|
||||
prompt "Backlight initial state"
|
||||
default CHARLCD_BL_FLASH
|
||||
---help---
|
||||
Select the initial backlight state on boot or module load.
|
||||
|
||||
Previously, there was no option for this: the backlight flashed
|
||||
briefly on init. Now you can also turn it off/on.
|
||||
|
||||
config CHARLCD_BL_OFF
|
||||
bool "Off"
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
#include <misc/charlcd.h>
|
||||
#include "charlcd.h"
|
||||
|
||||
#define LCD_MINOR 156
|
||||
|
||||
|
@ -6,6 +6,9 @@
|
||||
* Copyright (C) 2016-2017 Glider bvba
|
||||
*/
|
||||
|
||||
#ifndef _CHARLCD_H
|
||||
#define _CHARLCD_H
|
||||
|
||||
struct charlcd {
|
||||
const struct charlcd_ops *ops;
|
||||
const unsigned char *char_conv; /* Optional */
|
||||
@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd);
|
||||
int charlcd_unregister(struct charlcd *lcd);
|
||||
|
||||
void charlcd_poke(struct charlcd *lcd);
|
||||
|
||||
#endif /* CHARLCD_H */
|
@ -14,8 +14,7 @@
|
||||
#include <linux/property.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <misc/charlcd.h>
|
||||
|
||||
#include "charlcd.h"
|
||||
|
||||
enum hd44780_pin {
|
||||
/* Order does matter due to writing to GPIO array subsets! */
|
||||
|
@ -55,7 +55,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <misc/charlcd.h>
|
||||
#include "charlcd.h"
|
||||
|
||||
#define KEYPAD_MINOR 185
|
||||
|
||||
@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
|
||||
return;
|
||||
|
||||
err_lcd_unreg:
|
||||
if (scan_timer.function)
|
||||
del_timer_sync(&scan_timer);
|
||||
if (lcd.enabled)
|
||||
charlcd_unregister(lcd.charlcd);
|
||||
err_unreg_device:
|
||||
|
@ -1823,12 +1823,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
|
||||
*/
|
||||
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
|
||||
{
|
||||
unsigned int ref;
|
||||
|
||||
/* see if we live in a "glue" directory */
|
||||
if (!live_in_glue_dir(glue_dir, dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&gdp_mutex);
|
||||
if (!kobject_has_children(glue_dir))
|
||||
/**
|
||||
* There is a race condition between removing glue directory
|
||||
* and adding a new device under the glue directory.
|
||||
*
|
||||
* CPU1: CPU2:
|
||||
*
|
||||
* device_add()
|
||||
* get_device_parent()
|
||||
* class_dir_create_and_add()
|
||||
* kobject_add_internal()
|
||||
* create_dir() // create glue_dir
|
||||
*
|
||||
* device_add()
|
||||
* get_device_parent()
|
||||
* kobject_get() // get glue_dir
|
||||
*
|
||||
* device_del()
|
||||
* cleanup_glue_dir()
|
||||
* kobject_del(glue_dir)
|
||||
*
|
||||
* kobject_add()
|
||||
* kobject_add_internal()
|
||||
* create_dir() // in glue_dir
|
||||
* sysfs_create_dir_ns()
|
||||
* kernfs_create_dir_ns(sd)
|
||||
*
|
||||
* sysfs_remove_dir() // glue_dir->sd=NULL
|
||||
* sysfs_put() // free glue_dir->sd
|
||||
*
|
||||
* // sd is freed
|
||||
* kernfs_new_node(sd)
|
||||
* kernfs_get(glue_dir)
|
||||
* kernfs_add_one()
|
||||
* kernfs_put()
|
||||
*
|
||||
* Before CPU1 remove last child device under glue dir, if CPU2 add
|
||||
* a new device under glue dir, the glue_dir kobject reference count
|
||||
* will be increase to 2 in kobject_get(k). And CPU2 has been called
|
||||
* kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
|
||||
* and sysfs_put(). This result in glue_dir->sd is freed.
|
||||
*
|
||||
* Then the CPU2 will see a stale "empty" but still potentially used
|
||||
* glue dir around in kernfs_new_node().
|
||||
*
|
||||
* In order to avoid this happening, we also should make sure that
|
||||
* kernfs_node for glue_dir is released in CPU1 only when refcount
|
||||
* for glue_dir kobj is 1.
|
||||
*/
|
||||
ref = kref_read(&glue_dir->kref);
|
||||
if (!kobject_has_children(glue_dir) && !--ref)
|
||||
kobject_del(glue_dir);
|
||||
kobject_put(glue_dir);
|
||||
mutex_unlock(&gdp_mutex);
|
||||
|
@ -157,8 +157,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
|
||||
* the device will only expose one IRQ, and this fallback
|
||||
* allows a common code path across either kind of resource.
|
||||
*/
|
||||
if (num == 0 && has_acpi_companion(&dev->dev))
|
||||
return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
|
||||
if (num == 0 && has_acpi_companion(&dev->dev)) {
|
||||
int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
|
||||
|
||||
/* Our callers expect -ENXIO for missing IRQs. */
|
||||
if (ret >= 0 || ret == -EPROBE_DEFER)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
#endif
|
||||
|
@ -44,7 +44,7 @@ config REGMAP_IRQ
|
||||
|
||||
config REGMAP_SOUNDWIRE
|
||||
tristate
|
||||
depends on SOUNDWIRE_BUS
|
||||
depends on SOUNDWIRE
|
||||
|
||||
config REGMAP_SCCB
|
||||
tristate
|
||||
|
@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting)
|
||||
}
|
||||
|
||||
flush_scheduled_work();
|
||||
/* pass one: without sleeping, do aoedev_downdev */
|
||||
/* pass one: do aoedev_downdev, which might sleep */
|
||||
restart1:
|
||||
spin_lock_irqsave(&devlist_lock, flags);
|
||||
for (d = devlist; d; d = d->next) {
|
||||
spin_lock(&d->lock);
|
||||
if (d->flags & DEVFL_TKILL)
|
||||
goto cont;
|
||||
|
||||
if (exiting) {
|
||||
/* unconditionally take each device down */
|
||||
} else if (specified) {
|
||||
@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting)
|
||||
|| d->ref)
|
||||
goto cont;
|
||||
|
||||
spin_unlock(&d->lock);
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
aoedev_downdev(d);
|
||||
d->flags |= DEVFL_TKILL;
|
||||
goto restart1;
|
||||
cont:
|
||||
spin_unlock(&d->lock);
|
||||
}
|
||||
@ -348,7 +355,7 @@ cont:
|
||||
/* pass two: call freedev, which might sleep,
|
||||
* for aoedevs marked with DEVFL_TKILL
|
||||
*/
|
||||
restart:
|
||||
restart2:
|
||||
spin_lock_irqsave(&devlist_lock, flags);
|
||||
for (d = devlist; d; d = d->next) {
|
||||
spin_lock(&d->lock);
|
||||
@ -357,7 +364,7 @@ restart:
|
||||
spin_unlock(&d->lock);
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
freedev(d);
|
||||
goto restart;
|
||||
goto restart2;
|
||||
}
|
||||
spin_unlock(&d->lock);
|
||||
}
|
||||
|
@ -885,7 +885,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
|
||||
|
||||
static int loop_kthread_worker_fn(void *worker_ptr)
|
||||
{
|
||||
current->flags |= PF_LESS_THROTTLE;
|
||||
current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
|
||||
return kthread_worker_fn(worker_ptr);
|
||||
}
|
||||
|
||||
|
@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
|
||||
}
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
|
||||
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
|
||||
if (err) {
|
||||
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
|
||||
return err;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1007,8 +1008,7 @@ fail:
|
||||
}
|
||||
kfree(req);
|
||||
}
|
||||
return -ENOMEM;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int connect_ring(struct backend_info *be)
|
||||
|
@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
bt_dev_dbg(hdev, "QCA pre shutdown cmd");
|
||||
|
||||
skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
|
||||
NULL, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
|
||||
|
||||
static void qca_tlv_check_data(struct rome_config *config,
|
||||
const struct firmware *fw)
|
||||
{
|
||||
@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config,
|
||||
BT_DBG("Length\t\t : %d bytes", length);
|
||||
|
||||
config->dnld_mode = ROME_SKIP_EVT_NONE;
|
||||
config->dnld_type = ROME_SKIP_EVT_NONE;
|
||||
|
||||
switch (config->type) {
|
||||
case TLV_TYPE_PATCH:
|
||||
@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
|
||||
|
||||
evt = skb_put(skb, sizeof(*evt));
|
||||
evt->ncmd = 1;
|
||||
evt->opcode = QCA_HCI_CC_OPCODE;
|
||||
evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
|
||||
|
||||
skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
|
||||
|
||||
@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
|
||||
*/
|
||||
if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
|
||||
config->dnld_type == ROME_SKIP_EVT_VSE)
|
||||
return qca_inject_cmd_complete_event(hdev);
|
||||
ret = qca_inject_cmd_complete_event(hdev);
|
||||
|
||||
out:
|
||||
release_firmware(fw);
|
||||
@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Give the controller some time to get ready to receive the NVM */
|
||||
msleep(10);
|
||||
|
||||
/* Download NVM configuration */
|
||||
config.type = TLV_TYPE_NVM;
|
||||
if (firmware_name)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
|
||||
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
|
||||
#define MAX_SIZE_PER_TLV_SEGMENT (243)
|
||||
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
|
||||
|
||||
#define EDL_CMD_REQ_RES_EVT (0x00)
|
||||
#define EDL_PATCH_VER_RES_EVT (0x19)
|
||||
@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
const char *firmware_name);
|
||||
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
|
||||
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
|
||||
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
|
||||
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
|
||||
{
|
||||
return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
|
||||
@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
|
||||
fw_size = fw->size;
|
||||
|
||||
/* The size of patch header is 30 bytes, should be skip */
|
||||
if (fw_size < 30)
|
||||
if (fw_size < 30) {
|
||||
err = -EINVAL;
|
||||
goto err_release_fw;
|
||||
}
|
||||
|
||||
fw_size -= 30;
|
||||
fw_ptr += 30;
|
||||
|
@ -705,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
|
||||
unsigned long flags;
|
||||
struct qca_data *qca = hu->priv;
|
||||
|
||||
BT_DBG("hu %p want to sleep", hu);
|
||||
BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
|
||||
|
||||
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||
|
||||
@ -720,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
|
||||
break;
|
||||
|
||||
case HCI_IBS_RX_ASLEEP:
|
||||
/* Fall through */
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Any other state is illegal */
|
||||
@ -912,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
if (hdr->evt == HCI_EV_VENDOR)
|
||||
complete(&qca->drop_ev_comp);
|
||||
|
||||
kfree(skb);
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1386,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
|
||||
/* Perform pre shutdown command */
|
||||
qca_send_pre_shutdown_cmd(hdev);
|
||||
|
||||
qca_power_shutdown(hu);
|
||||
return 0;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ static u64 riscv_sched_clock(void)
|
||||
return get_cycles64();
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
|
||||
static struct clocksource riscv_clocksource = {
|
||||
.name = "riscv_clocksource",
|
||||
.rating = 300,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
@ -92,7 +92,6 @@ void riscv_timer_interrupt(void)
|
||||
static int __init riscv_timer_init_dt(struct device_node *n)
|
||||
{
|
||||
int cpuid, hartid, error;
|
||||
struct clocksource *cs;
|
||||
|
||||
hartid = riscv_of_processor_hartid(n);
|
||||
if (hartid < 0) {
|
||||
@ -112,8 +111,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
|
||||
|
||||
pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
|
||||
__func__, cpuid, hartid);
|
||||
cs = per_cpu_ptr(&riscv_clocksource, cpuid);
|
||||
error = clocksource_register_hz(cs, riscv_timebase);
|
||||
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
|
||||
if (error) {
|
||||
pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
|
||||
error, cpuid);
|
||||
|
@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
|
||||
}
|
||||
|
||||
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user