mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 07:04:10 +08:00
Merge 5.9-rc8 into staging-next
We need the IIO fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
a944a1fb9a
4
.mailmap
4
.mailmap
@ -169,6 +169,10 @@ Juha Yrjola <juha.yrjola@solidboot.com>
|
||||
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
|
||||
Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
|
||||
Kay Sievers <kay.sievers@vrfy.org>
|
||||
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
|
||||
Kees Cook <keescook@chromium.org> <keescook@google.com>
|
||||
Kees Cook <keescook@chromium.org> <kees@outflux.net>
|
||||
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
|
||||
Kenneth W Chen <kenneth.w.chen@intel.com>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
|
||||
|
@ -1324,15 +1324,26 @@ PAGE_SIZE multiple when read back.
|
||||
pgmajfault
|
||||
Number of major page faults incurred
|
||||
|
||||
workingset_refault
|
||||
Number of refaults of previously evicted pages
|
||||
workingset_refault_anon
|
||||
Number of refaults of previously evicted anonymous pages.
|
||||
|
||||
workingset_activate
|
||||
Number of refaulted pages that were immediately activated
|
||||
workingset_refault_file
|
||||
Number of refaults of previously evicted file pages.
|
||||
|
||||
workingset_restore
|
||||
Number of restored pages which have been detected as an active
|
||||
workingset before they got reclaimed.
|
||||
workingset_activate_anon
|
||||
Number of refaulted anonymous pages that were immediately
|
||||
activated.
|
||||
|
||||
workingset_activate_file
|
||||
Number of refaulted file pages that were immediately activated.
|
||||
|
||||
workingset_restore_anon
|
||||
Number of restored anonymous pages which have been detected as
|
||||
an active workingset before they got reclaimed.
|
||||
|
||||
workingset_restore_file
|
||||
Number of restored file pages which have been detected as an
|
||||
active workingset before they got reclaimed.
|
||||
|
||||
workingset_nodereclaim
|
||||
Number of times a shadow node has been reclaimed
|
||||
|
@ -67,7 +67,7 @@ Parameters::
|
||||
the value passed in <key_size>.
|
||||
|
||||
<key_type>
|
||||
Either 'logon' or 'user' kernel key type.
|
||||
Either 'logon', 'user' or 'encrypted' kernel key type.
|
||||
|
||||
<key_description>
|
||||
The kernel keyring key description crypt target should look for
|
||||
@ -121,6 +121,14 @@ submit_from_crypt_cpus
|
||||
thread because it benefits CFQ to have writes submitted using the
|
||||
same context.
|
||||
|
||||
no_read_workqueue
|
||||
Bypass dm-crypt internal workqueue and process read requests synchronously.
|
||||
|
||||
no_write_workqueue
|
||||
Bypass dm-crypt internal workqueue and process write requests synchronously.
|
||||
This option is automatically enabled for host-managed zoned block devices
|
||||
(e.g. host-managed SMR hard-disks).
|
||||
|
||||
integrity:<bytes>:<type>
|
||||
The device requires additional <bytes> metadata per-sector stored
|
||||
in per-bio integrity structure. This metadata must by provided
|
||||
|
@ -690,7 +690,7 @@ which of the two parameters is added to the kernel command line. In the
|
||||
instruction of the CPUs (which, as a rule, suspends the execution of the program
|
||||
and causes the hardware to attempt to enter the shallowest available idle state)
|
||||
for this purpose, and if ``idle=poll`` is used, idle CPUs will execute a
|
||||
more or less ``lightweight'' sequence of instructions in a tight loop. [Note
|
||||
more or less "lightweight" sequence of instructions in a tight loop. [Note
|
||||
that using ``idle=poll`` is somewhat drastic in many cases, as preventing idle
|
||||
CPUs from saving almost any energy at all may not be the only effect of it.
|
||||
For example, on Intel hardware it effectively prevents CPUs from using
|
||||
|
@ -182,9 +182,6 @@ in the order of reservations, but only after all previous records where
|
||||
already committed. It is thus possible for slow producers to temporarily hold
|
||||
off submitted records, that were reserved later.
|
||||
|
||||
Reservation/commit/consumer protocol is verified by litmus tests in
|
||||
Documentation/litmus_tests/bpf-rb/_.
|
||||
|
||||
One interesting implementation bit, that significantly simplifies (and thus
|
||||
speeds up as well) implementation of both producers and consumers is how data
|
||||
area is mapped twice contiguously back-to-back in the virtual memory. This
|
||||
@ -200,7 +197,7 @@ a self-pacing notifications of new data being availability.
|
||||
being available after commit only if consumer has already caught up right up to
|
||||
the record being committed. If not, consumer still has to catch up and thus
|
||||
will see new data anyways without needing an extra poll notification.
|
||||
Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbuf.c_) show that
|
||||
Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbufs.c) show that
|
||||
this allows to achieve a very high throughput without having to resort to
|
||||
tricks like "notify only every Nth sample", which are necessary with perf
|
||||
buffer. For extreme cases, when BPF program wants more manual control of
|
||||
|
@ -23,7 +23,7 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: raspberrypi,bcm2835-firmware
|
||||
- const: simple-bus
|
||||
- const: simple-mfd
|
||||
|
||||
mboxes:
|
||||
$ref: '/schemas/types.yaml#/definitions/phandle'
|
||||
@ -57,7 +57,7 @@ required:
|
||||
examples:
|
||||
- |
|
||||
firmware {
|
||||
compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
|
||||
compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
|
||||
mboxes = <&mailbox>;
|
||||
|
||||
firmware_clocks: clocks {
|
||||
|
@ -67,7 +67,7 @@ examples:
|
||||
|
||||
main_crypto: crypto@4e00000 {
|
||||
compatible = "ti,j721-sa2ul";
|
||||
reg = <0x0 0x4e00000 0x0 0x1200>;
|
||||
reg = <0x4e00000 0x1200>;
|
||||
power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
|
||||
dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
|
||||
<&main_udmap 0x4001>;
|
||||
|
@ -145,10 +145,10 @@ examples:
|
||||
|
||||
display@fd4a0000 {
|
||||
compatible = "xlnx,zynqmp-dpsub-1.7";
|
||||
reg = <0x0 0xfd4a0000 0x0 0x1000>,
|
||||
<0x0 0xfd4aa000 0x0 0x1000>,
|
||||
<0x0 0xfd4ab000 0x0 0x1000>,
|
||||
<0x0 0xfd4ac000 0x0 0x1000>;
|
||||
reg = <0xfd4a0000 0x1000>,
|
||||
<0xfd4aa000 0x1000>,
|
||||
<0xfd4ab000 0x1000>,
|
||||
<0xfd4ac000 0x1000>;
|
||||
reg-names = "dp", "blend", "av_buf", "aud";
|
||||
interrupts = <0 119 4>;
|
||||
interrupt-parent = <&gic>;
|
||||
|
@ -57,7 +57,7 @@ examples:
|
||||
|
||||
dma: dma-controller@fd4c0000 {
|
||||
compatible = "xlnx,zynqmp-dpdma";
|
||||
reg = <0x0 0xfd4c0000 0x0 0x1000>;
|
||||
reg = <0xfd4c0000 0x1000>;
|
||||
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&dpdma_clk>;
|
||||
|
@ -20,8 +20,9 @@ Required properties:
|
||||
- gpio-controller : Marks the device node as a GPIO controller
|
||||
- interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt
|
||||
- interrupt-controller : Mark the GPIO controller as an interrupt-controller
|
||||
- ngpios : number of GPIO lines, see gpio.txt
|
||||
(should be multiple of 8, up to 80 pins)
|
||||
- ngpios : number of *hardware* GPIO lines, see gpio.txt. This will expose
|
||||
2 software GPIOs per hardware GPIO: one for hardware input, one for hardware
|
||||
output. Up to 80 pins, must be a multiple of 8.
|
||||
- clocks : A phandle to the APB clock for SGPM clock division
|
||||
- bus-frequency : SGPM CLK frequency
|
||||
|
||||
|
@ -30,7 +30,7 @@ properties:
|
||||
const: 0
|
||||
|
||||
patternProperties:
|
||||
"^multi-led[0-9a-f]$":
|
||||
"^multi-led@[0-9a-b]$":
|
||||
type: object
|
||||
allOf:
|
||||
- $ref: leds-class-multicolor.yaml#
|
||||
|
@ -1,38 +0,0 @@
|
||||
* Sony 1/2.5-Inch 8.51Mp CMOS Digital Image Sensor
|
||||
|
||||
The Sony imx274 is a 1/2.5-inch CMOS active pixel digital image sensor with
|
||||
an active array size of 3864H x 2202V. It is programmable through I2C
|
||||
interface. The I2C address is fixed to 0x1a as per sensor data sheet.
|
||||
Image data is sent through MIPI CSI-2, which is configured as 4 lanes
|
||||
at 1440 Mbps.
|
||||
|
||||
|
||||
Required Properties:
|
||||
- compatible: value should be "sony,imx274" for imx274 sensor
|
||||
- reg: I2C bus address of the device
|
||||
|
||||
Optional Properties:
|
||||
- reset-gpios: Sensor reset GPIO
|
||||
- clocks: Reference to the input clock.
|
||||
- clock-names: Should be "inck".
|
||||
- VANA-supply: Sensor 2.8v analog supply.
|
||||
- VDIG-supply: Sensor 1.8v digital core supply.
|
||||
- VDDL-supply: Sensor digital IO 1.2v supply.
|
||||
|
||||
The imx274 device node should contain one 'port' child node with
|
||||
an 'endpoint' subnode. For further reading on port node refer to
|
||||
Documentation/devicetree/bindings/media/video-interfaces.txt.
|
||||
|
||||
Example:
|
||||
sensor@1a {
|
||||
compatible = "sony,imx274";
|
||||
reg = <0x1a>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reset-gpios = <&gpio_sensor 0 0>;
|
||||
port {
|
||||
sensor_out: endpoint {
|
||||
remote-endpoint = <&csiss_in>;
|
||||
};
|
||||
};
|
||||
};
|
76
Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
Normal file
76
Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
Normal file
@ -0,0 +1,76 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/media/i2c/sony,imx274.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Sony 1/2.5-Inch 8.51MP CMOS Digital Image Sensor
|
||||
|
||||
maintainers:
|
||||
- Leon Luo <leonl@leopardimaging.com>
|
||||
|
||||
description: |
|
||||
The Sony IMX274 is a 1/2.5-inch CMOS active pixel digital image sensor with an
|
||||
active array size of 3864H x 2202V. It is programmable through I2C interface.
|
||||
Image data is sent through MIPI CSI-2, which is configured as 4 lanes at 1440
|
||||
Mbps.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: sony,imx274
|
||||
|
||||
reg:
|
||||
const: 0x1a
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
const: inck
|
||||
|
||||
vana-supply:
|
||||
description: Sensor 2.8 V analog supply.
|
||||
maxItems: 1
|
||||
|
||||
vdig-supply:
|
||||
description: Sensor 1.8 V digital core supply.
|
||||
maxItems: 1
|
||||
|
||||
vddl-supply:
|
||||
description: Sensor digital IO 1.2 V supply.
|
||||
maxItems: 1
|
||||
|
||||
port:
|
||||
type: object
|
||||
description: Output video port. See ../video-interfaces.txt.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- port
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
i2c0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
imx274: camera-sensor@1a {
|
||||
compatible = "sony,imx274";
|
||||
reg = <0x1a>;
|
||||
reset-gpios = <&gpio_sensor 0 0>;
|
||||
|
||||
port {
|
||||
sensor_out: endpoint {
|
||||
remote-endpoint = <&csiss_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -22,7 +22,7 @@
|
||||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| powerpc: | TODO |
|
||||
| riscv: | ok |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
|
@ -39,10 +39,10 @@ which can help simplify cross compiling. ::
|
||||
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
|
||||
|
||||
``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead
|
||||
``CROSS_COMPILE`` is used to set a command line flag: ``--target <triple>``. For
|
||||
``CROSS_COMPILE`` is used to set a command line flag: ``--target=<triple>``. For
|
||||
example: ::
|
||||
|
||||
clang --target aarch64-linux-gnu foo.c
|
||||
clang --target=aarch64-linux-gnu foo.c
|
||||
|
||||
LLVM Utilities
|
||||
--------------
|
||||
|
@ -206,6 +206,7 @@ Userspace to kernel:
|
||||
``ETHTOOL_MSG_TSINFO_GET`` get timestamping info
|
||||
``ETHTOOL_MSG_CABLE_TEST_ACT`` action start cable test
|
||||
``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` action start raw TDR cable test
|
||||
``ETHTOOL_MSG_TUNNEL_INFO_GET`` get tunnel offload info
|
||||
===================================== ================================
|
||||
|
||||
Kernel to userspace:
|
||||
@ -239,6 +240,7 @@ Kernel to userspace:
|
||||
``ETHTOOL_MSG_TSINFO_GET_REPLY`` timestamping info
|
||||
``ETHTOOL_MSG_CABLE_TEST_NTF`` Cable test results
|
||||
``ETHTOOL_MSG_CABLE_TEST_TDR_NTF`` Cable test TDR results
|
||||
``ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY`` tunnel offload info
|
||||
===================================== =================================
|
||||
|
||||
``GET`` requests are sent by userspace applications to retrieve device
|
||||
@ -1363,4 +1365,5 @@ are netlink only.
|
||||
``ETHTOOL_SFECPARAM`` n/a
|
||||
n/a ''ETHTOOL_MSG_CABLE_TEST_ACT''
|
||||
n/a ''ETHTOOL_MSG_CABLE_TEST_TDR_ACT''
|
||||
n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET``
|
||||
=================================== =====================================
|
||||
|
@ -701,23 +701,6 @@ Memory Consistency Flags
|
||||
:stub-columns: 0
|
||||
:widths: 3 1 4
|
||||
|
||||
* .. _`V4L2-FLAG-MEMORY-NON-CONSISTENT`:
|
||||
|
||||
- ``V4L2_FLAG_MEMORY_NON_CONSISTENT``
|
||||
- 0x00000001
|
||||
- A buffer is allocated either in consistent (it will be automatically
|
||||
coherent between the CPU and the bus) or non-consistent memory. The
|
||||
latter can provide performance gains, for instance the CPU cache
|
||||
sync/flush operations can be avoided if the buffer is accessed by the
|
||||
corresponding device only and the CPU does not read/write to/from that
|
||||
buffer. However, this requires extra care from the driver -- it must
|
||||
guarantee memory consistency by issuing a cache flush/sync when
|
||||
consistency is needed. If this flag is set V4L2 will attempt to
|
||||
allocate the buffer in non-consistent memory. The flag takes effect
|
||||
only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the
|
||||
queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS
|
||||
<V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability.
|
||||
|
||||
.. c:type:: v4l2_memory
|
||||
|
||||
enum v4l2_memory
|
||||
|
@ -120,13 +120,9 @@ than the number requested.
|
||||
If you want to just query the capabilities without making any
|
||||
other changes, then set ``count`` to 0, ``memory`` to
|
||||
``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type.
|
||||
* - __u32
|
||||
- ``flags``
|
||||
- Specifies additional buffer management attributes.
|
||||
See :ref:`memory-flags`.
|
||||
|
||||
* - __u32
|
||||
- ``reserved``\ [6]
|
||||
- ``reserved``\ [7]
|
||||
- A place holder for future extensions. Drivers and applications
|
||||
must set the array to zero.
|
||||
|
||||
|
@ -112,17 +112,10 @@ aborting or finishing any DMA in progress, an implicit
|
||||
``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will
|
||||
free any previously allocated buffers, so this is typically something
|
||||
that will be done at the start of the application.
|
||||
* - union {
|
||||
- (anonymous)
|
||||
* - __u32
|
||||
- ``flags``
|
||||
- Specifies additional buffer management attributes.
|
||||
See :ref:`memory-flags`.
|
||||
* - __u32
|
||||
- ``reserved``\ [1]
|
||||
- Kept for backwards compatibility. Use ``flags`` instead.
|
||||
* - }
|
||||
-
|
||||
- A place holder for future extensions. Drivers and applications
|
||||
must set the array to zero.
|
||||
|
||||
.. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}|
|
||||
|
||||
@ -169,7 +162,6 @@ aborting or finishing any DMA in progress, an implicit
|
||||
- This capability is set by the driver to indicate that the queue supports
|
||||
cache and memory management hints. However, it's only valid when the
|
||||
queue is used for :ref:`memory mapping <mmap>` streaming I/O. See
|
||||
:ref:`V4L2_FLAG_MEMORY_NON_CONSISTENT <V4L2-FLAG-MEMORY-NON-CONSISTENT>`,
|
||||
:ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE <V4L2-BUF-FLAG-NO-CACHE-INVALIDATE>` and
|
||||
:ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN <V4L2-BUF-FLAG-NO-CACHE-CLEAN>`.
|
||||
|
||||
|
@ -6173,3 +6173,23 @@ specific interfaces must be consistent, i.e. if one says the feature
|
||||
is supported, than the other should as well and vice versa. For arm64
|
||||
see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL".
|
||||
For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME".
|
||||
|
||||
8.25 KVM_CAP_S390_DIAG318
|
||||
-------------------------
|
||||
|
||||
:Architectures: s390
|
||||
|
||||
This capability enables a guest to set information about its control program
|
||||
(i.e. guest kernel type and version). The information is helpful during
|
||||
system/firmware service events, providing additional data about the guest
|
||||
environments running on the machine.
|
||||
|
||||
The information is associated with the DIAGNOSE 0x318 instruction, which sets
|
||||
an 8-byte value consisting of a one-byte Control Program Name Code (CPNC) and
|
||||
a 7-byte Control Program Version Code (CPVC). The CPNC determines what
|
||||
environment the control program is running in (e.g. Linux, z/VM...), and the
|
||||
CPVC is used for information specific to OS (e.g. Linux version, Linux
|
||||
distribution...)
|
||||
|
||||
If this capability is available, then the CPNC and CPVC can be synchronized
|
||||
between KVM and userspace via the sync regs mechanism (KVM_SYNC_DIAG318).
|
||||
|
22
MAINTAINERS
22
MAINTAINERS
@ -4374,12 +4374,6 @@ T: git git://git.infradead.org/users/hch/configfs.git
|
||||
F: fs/configfs/
|
||||
F: include/linux/configfs.h
|
||||
|
||||
CONNECTOR
|
||||
M: Evgeniy Polyakov <zbr@ioremap.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/connector/
|
||||
|
||||
CONSOLE SUBSYSTEM
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
S: Supported
|
||||
@ -6146,7 +6140,7 @@ F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
|
||||
F: drivers/edac/aspeed_edac.c
|
||||
|
||||
EDAC-BLUEFIELD
|
||||
M: Shravan Kumar Ramani <sramani@nvidia.com>
|
||||
M: Shravan Kumar Ramani <shravankr@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/edac/bluefield_edac.c
|
||||
|
||||
@ -8301,8 +8295,9 @@ S: Supported
|
||||
F: drivers/pci/hotplug/rpaphp*
|
||||
|
||||
IBM Power SRIOV Virtual NIC Device Driver
|
||||
M: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
M: John Allen <jallen@linux.ibm.com>
|
||||
M: Dany Madden <drt@linux.ibm.com>
|
||||
M: Lijun Pan <ljp@linux.ibm.com>
|
||||
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmvnic.*
|
||||
@ -8316,7 +8311,7 @@ F: arch/powerpc/platforms/powernv/copy-paste.h
|
||||
F: arch/powerpc/platforms/powernv/vas*
|
||||
|
||||
IBM Power Virtual Ethernet Device Driver
|
||||
M: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
M: Cristobal Forno <cforno12@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmveth.*
|
||||
@ -9222,7 +9217,7 @@ F: drivers/firmware/iscsi_ibft*
|
||||
|
||||
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
|
||||
M: Sagi Grimberg <sagi@grimberg.me>
|
||||
M: Max Gurtovoy <maxg@nvidia.com>
|
||||
M: Max Gurtovoy <mgurtovoy@nvidia.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.openfabrics.org
|
||||
@ -11013,6 +11008,7 @@ F: drivers/char/hw_random/mtk-rng.c
|
||||
|
||||
MEDIATEK SWITCH DRIVER
|
||||
M: Sean Wang <sean.wang@mediatek.com>
|
||||
M: Landen Chao <Landen.Chao@mediatek.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/dsa/mt7530.*
|
||||
@ -12026,6 +12022,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: include/linux/etherdevice.h
|
||||
F: include/linux/fcdevice.h
|
||||
@ -13156,6 +13153,7 @@ F: drivers/firmware/pcdp.*
|
||||
|
||||
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
|
||||
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
|
||||
M: Pali Rohár <pali@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
@ -16128,7 +16126,7 @@ M: Leon Luo <leonl@leopardimaging.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://linuxtv.org/media_tree.git
|
||||
F: Documentation/devicetree/bindings/media/i2c/imx274.txt
|
||||
F: Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml
|
||||
F: drivers/media/i2c/imx274.c
|
||||
|
||||
SONY IMX290 SENSOR DRIVER
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -116,7 +116,6 @@
|
||||
switch0: ksz8563@0 {
|
||||
compatible = "microchip,ksz8563";
|
||||
reg = <0>;
|
||||
phy-mode = "mii";
|
||||
reset-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_LOW>;
|
||||
|
||||
spi-max-frequency = <500000>;
|
||||
@ -140,6 +139,7 @@
|
||||
reg = <2>;
|
||||
label = "cpu";
|
||||
ethernet = <&macb0>;
|
||||
phy-mode = "mii";
|
||||
fixed-link {
|
||||
speed = <100>;
|
||||
full-duplex;
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
soc {
|
||||
firmware: firmware {
|
||||
compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
|
||||
compatible = "raspberrypi,bcm2835-firmware", "simple-mfd";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
@ -24,7 +24,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev,
|
||||
imx6_set_lpm(WAIT_UNCLOCKED);
|
||||
raw_spin_unlock(&cpuidle_lock);
|
||||
|
||||
rcu_idle_enter();
|
||||
cpu_do_idle();
|
||||
rcu_idle_exit();
|
||||
|
||||
raw_spin_lock(&cpuidle_lock);
|
||||
if (num_idle_cpus-- == num_online_cpus())
|
||||
@ -44,7 +46,7 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
|
||||
{
|
||||
.exit_latency = 50,
|
||||
.target_residency = 75,
|
||||
.flags = CPUIDLE_FLAG_TIMER_STOP,
|
||||
.flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE,
|
||||
.enter = imx6q_enter_wait,
|
||||
.name = "WAIT",
|
||||
.desc = "Clock off",
|
||||
|
@ -298,15 +298,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
|
||||
}
|
||||
|
||||
/* Always check for S1PTW *before* using this. */
|
||||
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
|
||||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
||||
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
||||
@ -335,6 +335,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
|
||||
}
|
||||
|
||||
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
|
||||
@ -372,6 +377,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu))
|
||||
return true;
|
||||
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
||||
|
@ -298,8 +298,21 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
case EFI_BOOT_SERVICES_DATA:
|
||||
case EFI_CONVENTIONAL_MEMORY:
|
||||
case EFI_PERSISTENT_MEMORY:
|
||||
if (memblock_is_map_memory(phys) ||
|
||||
!memblock_is_region_memory(phys, size)) {
|
||||
pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Mapping kernel memory is permitted if the region in
|
||||
* question is covered by a single memblock with the
|
||||
* NOMAP attribute set: this enables the use of ACPI
|
||||
* table overrides passed via initramfs, which are
|
||||
* reserved in memory using arch_reserve_mem_area()
|
||||
* below. As this particular use case only requires
|
||||
* read access, fall through to the R/O mapping case.
|
||||
*/
|
||||
fallthrough;
|
||||
|
||||
case EFI_RUNTIME_SERVICES_CODE:
|
||||
/*
|
||||
@ -388,3 +401,8 @@ int apei_claim_sea(struct pt_regs *regs)
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
|
||||
{
|
||||
memblock_mark_nomap(addr, size);
|
||||
}
|
||||
|
@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.desc = "ARM erratum 1418040",
|
||||
.capability = ARM64_WORKAROUND_1418040,
|
||||
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
|
||||
.type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
|
||||
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
|
||||
/*
|
||||
* We need to allow affected CPUs to come in late, but
|
||||
* also need the non-affected CPUs to be able to come
|
||||
* in at any point in time. Wonderful.
|
||||
*/
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
|
||||
|
@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
reg = per_cpu_ptr(&stolen_time_region, cpu);
|
||||
if (!reg->kaddr) {
|
||||
pr_warn_once("stolen time enabled but not configured for cpu %d\n",
|
||||
cpu);
|
||||
|
||||
/*
|
||||
* paravirt_steal_clock() may be called before the CPU
|
||||
* online notification callback runs. Until the callback
|
||||
* has run we just return zero.
|
||||
*/
|
||||
if (!reg->kaddr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
|
||||
}
|
||||
|
||||
static int stolen_time_dying_cpu(unsigned int cpu)
|
||||
static int stolen_time_cpu_down_prepare(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_stolen_time_cpu(unsigned int cpu)
|
||||
static int stolen_time_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
struct arm_smccc_res res;
|
||||
@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pv_time_init_stolen_time(void)
|
||||
static int __init pv_time_init_stolen_time(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
|
||||
"hypervisor/arm/pvtime:starting",
|
||||
init_stolen_time_cpu, stolen_time_dying_cpu);
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
"hypervisor/arm/pvtime:online",
|
||||
stolen_time_cpu_online,
|
||||
stolen_time_cpu_down_prepare);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool has_pv_steal_clock(void)
|
||||
static bool __init has_pv_steal_clock(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
|
@ -449,7 +449,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
|
||||
kvm_vcpu_dabt_isvalid(vcpu) &&
|
||||
!kvm_vcpu_abt_issea(vcpu) &&
|
||||
!kvm_vcpu_dabt_iss1tw(vcpu);
|
||||
!kvm_vcpu_abt_iss1tw(vcpu);
|
||||
|
||||
if (valid) {
|
||||
int ret = __vgic_v2_perform_cpuif_access(vcpu);
|
||||
|
@ -31,7 +31,14 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* __load_guest_stage2() includes an ISB only when the AT
|
||||
* workaround is applied. Take care of the opposite condition,
|
||||
* ensuring that we always have an ISB, but not two ISBs back
|
||||
* to back.
|
||||
*/
|
||||
__load_guest_stage2(mmu);
|
||||
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
|
||||
}
|
||||
|
||||
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
|
||||
|
@ -1849,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
|
||||
|
||||
write_fault = kvm_is_write_fault(vcpu);
|
||||
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
|
||||
VM_BUG_ON(write_fault && exec_fault);
|
||||
|
||||
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
|
||||
@ -2131,7 +2131,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu)) {
|
||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
|
@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
|
||||
static inline int bpf2a64_offset(int bpf_insn, int off,
|
||||
const struct jit_ctx *ctx)
|
||||
{
|
||||
int to = ctx->offset[bpf_to];
|
||||
/* -1 to account for the Branch instruction */
|
||||
int from = ctx->offset[bpf_from] - 1;
|
||||
|
||||
return to - from;
|
||||
/* BPF JMP offset is relative to the next instruction */
|
||||
bpf_insn++;
|
||||
/*
|
||||
* Whereas arm64 branch instructions encode the offset
|
||||
* from the branch itself, so we must subtract 1 from the
|
||||
* instruction offset.
|
||||
*/
|
||||
return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
|
||||
}
|
||||
|
||||
static void jit_fill_hole(void *area, unsigned int size)
|
||||
@ -642,7 +645,7 @@ emit_bswap_uxt:
|
||||
|
||||
/* JUMP off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
||||
jmp_offset = bpf2a64_offset(i, off, ctx);
|
||||
check_imm26(jmp_offset);
|
||||
emit(A64_B(jmp_offset), ctx);
|
||||
break;
|
||||
@ -669,7 +672,7 @@ emit_bswap_uxt:
|
||||
case BPF_JMP32 | BPF_JSLE | BPF_X:
|
||||
emit(A64_CMP(is64, dst, src), ctx);
|
||||
emit_cond_jmp:
|
||||
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
||||
jmp_offset = bpf2a64_offset(i, off, ctx);
|
||||
check_imm19(jmp_offset);
|
||||
switch (BPF_OP(code)) {
|
||||
case BPF_JEQ:
|
||||
@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
||||
const struct bpf_prog *prog = ctx->prog;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* - offset[0] offset of the end of prologue,
|
||||
* start of the 1st instruction.
|
||||
* - offset[1] - offset of the end of 1st instruction,
|
||||
* start of the 2nd instruction
|
||||
* [....]
|
||||
* - offset[3] - offset of the end of 3rd instruction,
|
||||
* start of 4th instruction
|
||||
*/
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
const struct bpf_insn *insn = &prog->insnsi[i];
|
||||
int ret;
|
||||
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
ret = build_insn(insn, ctx, extra_pass);
|
||||
if (ret > 0) {
|
||||
i++;
|
||||
@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
continue;
|
||||
}
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* offset is allocated with prog->len + 1 so fill in
|
||||
* the last element with the offset after the last
|
||||
* instruction (end of program)
|
||||
*/
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.prog = prog;
|
||||
|
||||
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
|
||||
ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
|
||||
if (ctx.offset == NULL) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
@ -1089,7 +1108,7 @@ skip_init_ctx:
|
||||
prog->jited_len = prog_size;
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_prog_fill_jited_linfo(prog, ctx.offset);
|
||||
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
|
||||
out_off:
|
||||
kfree(ctx.offset);
|
||||
kfree(jit_data);
|
||||
|
@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
|
||||
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
}
|
||||
|
||||
#define acpi_unlazy_tlb(x)
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
extern cpumask_t early_cpu_possible_map;
|
||||
#define for_each_possible_early_cpu(cpu) \
|
||||
|
@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
|
||||
if (map_start < map_end)
|
||||
memmap_init_zone((unsigned long)(map_end - map_start),
|
||||
args->nid, args->zone, page_to_pfn(map_start),
|
||||
MEMMAP_EARLY, NULL);
|
||||
MEMINIT_EARLY, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
|
||||
unsigned long start_pfn)
|
||||
{
|
||||
if (!vmem_map) {
|
||||
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
|
||||
NULL);
|
||||
memmap_init_zone(size, nid, zone, start_pfn,
|
||||
MEMINIT_EARLY, NULL);
|
||||
} else {
|
||||
struct page *start;
|
||||
struct memmap_init_callback_data args;
|
||||
|
@ -877,6 +877,7 @@ config SNI_RM
|
||||
select I8253
|
||||
select I8259
|
||||
select ISA
|
||||
select MIPS_L1_CACHE_SHIFT_6
|
||||
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
|
||||
select SYS_HAS_CPU_R4X00
|
||||
select SYS_HAS_CPU_R5000
|
||||
|
@ -148,7 +148,7 @@ void __init plat_mem_setup(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
|
||||
if (c->cputype == CPU_74K) {
|
||||
pr_info("Using bcma bus\n");
|
||||
#ifdef CONFIG_BCM47XX_BCMA
|
||||
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
|
||||
|
@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
|
||||
case CPU_34K:
|
||||
case CPU_1004K:
|
||||
case CPU_74K:
|
||||
case CPU_1074K:
|
||||
case CPU_M14KC:
|
||||
case CPU_M14KEC:
|
||||
case CPU_INTERAPTIV:
|
||||
|
@ -44,6 +44,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
|
||||
endif
|
||||
endif
|
||||
|
||||
# Some -march= flags enable MMI instructions, and GCC complains about that
|
||||
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
|
||||
cflags-y += $(call cc-option,-mno-loongson-mmi)
|
||||
|
||||
#
|
||||
# Loongson Machines' Support
|
||||
#
|
||||
|
@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rt, value);
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rq, value_next);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
|
||||
compute_return_epc(regs);
|
||||
own_fpu(1);
|
||||
}
|
||||
@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
goto sigbus;
|
||||
|
||||
lose_fpu(1);
|
||||
value_next = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rq);
|
||||
value_next = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
|
||||
|
||||
StoreDW(addr + 8, value_next, res);
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
value = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lswc2_format.rt);
|
||||
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0);
|
||||
|
||||
StoreDW(addr, value, res);
|
||||
if (res)
|
||||
@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt, value);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
|
||||
compute_return_epc(regs);
|
||||
own_fpu(1);
|
||||
|
||||
@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
if (res)
|
||||
goto fault;
|
||||
|
||||
set_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt, value);
|
||||
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value);
|
||||
compute_return_epc(regs);
|
||||
own_fpu(1);
|
||||
break;
|
||||
@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
goto sigbus;
|
||||
|
||||
lose_fpu(1);
|
||||
value = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt);
|
||||
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
|
||||
|
||||
StoreW(addr, value, res);
|
||||
if (res)
|
||||
@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
|
||||
goto sigbus;
|
||||
|
||||
lose_fpu(1);
|
||||
value = get_fpr64(current->thread.fpu.fpr,
|
||||
insn.loongson3_lsdc2_format.rt);
|
||||
value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0);
|
||||
|
||||
StoreDW(addr, value, res);
|
||||
if (res)
|
||||
|
@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
|
||||
},
|
||||
};
|
||||
|
||||
static u32 a20r_ack_hwint(void)
|
||||
/*
|
||||
* Trigger chipset to update CPU's CAUSE IP field
|
||||
*/
|
||||
static u32 a20r_update_cause_ip(void)
|
||||
{
|
||||
u32 status = read_c0_status();
|
||||
|
||||
@ -205,12 +208,14 @@ static void a20r_hwint(void)
|
||||
int irq;
|
||||
|
||||
clear_c0_status(IE_IRQ0);
|
||||
status = a20r_ack_hwint();
|
||||
status = a20r_update_cause_ip();
|
||||
cause = read_c0_cause();
|
||||
|
||||
irq = ffs(((cause & status) >> 8) & 0xf8);
|
||||
if (likely(irq > 0))
|
||||
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
|
||||
|
||||
a20r_update_cause_ip();
|
||||
set_c0_status(IE_IRQ0);
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,6 @@ config PPC
|
||||
#
|
||||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
|
@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y
|
||||
CONFIG_FB_NVIDIA_I2C=y
|
||||
CONFIG_FB_RADEON=y
|
||||
# CONFIG_LCD_CLASS_DEVICE is not set
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
|
@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m
|
||||
CONFIG_FB_SM501=m
|
||||
CONFIG_FB_IBM_GXT4500=y
|
||||
CONFIG_LCD_PLATFORM=m
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
|
||||
CONFIG_LOGO=y
|
||||
|
@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
|
||||
|
||||
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
if (early_radix_enabled())
|
||||
return radix__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
/*
|
||||
* Hash has more strict restrictions. At this point we don't
|
||||
* know which translations we will pick. Hence go with hash
|
||||
* restrictions.
|
||||
*/
|
||||
return hash__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
}
|
||||
|
@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
||||
if (!tbl)
|
||||
return 0;
|
||||
|
||||
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
|
||||
mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
|
||||
tbl->it_page_shift - 1);
|
||||
mask += mask - 1;
|
||||
|
||||
return mask;
|
||||
|
@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso32ld = VDSO32L $@
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
quiet_cmd_vdso32as = VDSO32A $@
|
||||
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
|
||||
|
||||
|
@ -111,7 +111,6 @@ SECTIONS
|
||||
*(.note.GNU-stack)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso64ld = VDSO64L $@
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
|
||||
# install commands for the unstripped file
|
||||
quiet_cmd_vdso_install = INSTALL $@
|
||||
|
@ -30,7 +30,7 @@ SECTIONS
|
||||
. = ALIGN(16);
|
||||
.text : {
|
||||
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
|
||||
*(.sfpr)
|
||||
*(.sfpr .glink)
|
||||
} :text
|
||||
PROVIDE(__etext = .);
|
||||
PROVIDE(_etext = .);
|
||||
@ -111,7 +111,6 @@ SECTIONS
|
||||
*(.branch_lt)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
|
||||
}
|
||||
}
|
||||
|
||||
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/*
|
||||
* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/*
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
{
|
||||
|
@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void)
|
||||
if (!(mfmsr() & MSR_HV))
|
||||
early_check_vec5();
|
||||
|
||||
if (early_radix_enabled())
|
||||
if (early_radix_enabled()) {
|
||||
radix__early_init_devtree();
|
||||
else
|
||||
/*
|
||||
* We have finalized the translation we are going to use by now.
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
* Hence don't limit memblock allocations.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
||||
} else
|
||||
hash__early_init_devtree();
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
@ -822,7 +822,7 @@ free_stats:
|
||||
kfree(stats);
|
||||
return rc ? rc : seq_buf_used(&s);
|
||||
}
|
||||
DEVICE_ATTR_RO(perf_stats);
|
||||
DEVICE_ATTR_ADMIN_RO(perf_stats);
|
||||
|
||||
static ssize_t flags_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
@ -32,6 +32,7 @@ config RISCV
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
|
||||
select CLONE_BACKWARDS
|
||||
select CLINT_TIMER if !MMU
|
||||
select COMMON_CLK
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_ARCH_TOPOLOGY if SMP
|
||||
|
@ -95,10 +95,12 @@
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
clint0: interrupt-controller@2000000 {
|
||||
clint0: clint@2000000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,clint0";
|
||||
reg = <0x2000000 0xC000>;
|
||||
interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
|
||||
interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
|
||||
&cpu1_intc 3 &cpu1_intc 7>;
|
||||
clocks = <&sysctl K210_CLK_ACLK>;
|
||||
};
|
||||
|
||||
|
26
arch/riscv/include/asm/clint.h
Normal file
26
arch/riscv/include/asm/clint.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020 Google, Inc
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_CLINT_H
|
||||
#define _ASM_RISCV_CLINT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/mmio.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
/*
|
||||
* This lives in the CLINT driver, but is accessed directly by timex.h to avoid
|
||||
* any overhead when accessing the MMIO timer.
|
||||
*
|
||||
* The ISA defines mtime as a 64-bit memory-mapped register that increments at
|
||||
* a constant frequency, but it doesn't define some other constraints we depend
|
||||
* on (most notably ordering constraints, but also some simpler stuff like the
|
||||
* memory layout). Thus, this is called "clint_time_val" instead of something
|
||||
* like "riscv_mtime", to signify that these non-ISA assumptions must hold.
|
||||
*/
|
||||
extern u64 __iomem *clint_time_val;
|
||||
#endif
|
||||
|
||||
#endif
|
@ -66,6 +66,13 @@ do { \
|
||||
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
|
||||
*/
|
||||
#define MCOUNT_INSN_SIZE 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct dyn_ftrace;
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
||||
#define ftrace_init_nop ftrace_init_nop
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_FTRACE_H */
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include <linux/random.h>
|
||||
#include <linux/version.h>
|
||||
#include <asm/timex.h>
|
||||
|
||||
extern unsigned long __stack_chk_guard;
|
||||
|
||||
@ -18,12 +17,9 @@ extern unsigned long __stack_chk_guard;
|
||||
static __always_inline void boot_init_stack_canary(void)
|
||||
{
|
||||
unsigned long canary;
|
||||
unsigned long tsc;
|
||||
|
||||
/* Try to get a semi random initial value. */
|
||||
get_random_bytes(&canary, sizeof(canary));
|
||||
tsc = get_cycles();
|
||||
canary += tsc + (tsc << BITS_PER_LONG/2);
|
||||
canary ^= LINUX_VERSION_CODE;
|
||||
canary &= CANARY_MASK;
|
||||
|
||||
|
@ -10,6 +10,44 @@
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
|
||||
#include <asm/clint.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return readq_relaxed(clint_time_val);
|
||||
}
|
||||
#else /* !CONFIG_64BIT */
|
||||
static inline u32 get_cycles(void)
|
||||
{
|
||||
return readl_relaxed(((u32 *)clint_time_val));
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
static inline u32 get_cycles_hi(void)
|
||||
{
|
||||
return readl_relaxed(((u32 *)clint_time_val) + 1);
|
||||
}
|
||||
#define get_cycles_hi get_cycles_hi
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* Much like MIPS, we may not have a viable counter to use at an early point
|
||||
* in the boot process. Unfortunately we don't have a fallback, so instead
|
||||
* we just return 0.
|
||||
*/
|
||||
static inline unsigned long random_get_entropy(void)
|
||||
{
|
||||
if (unlikely(clint_time_val == NULL))
|
||||
return 0;
|
||||
return get_cycles();
|
||||
}
|
||||
#define random_get_entropy() random_get_entropy()
|
||||
|
||||
#else /* CONFIG_RISCV_M_MODE */
|
||||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return csr_read(CSR_TIME);
|
||||
@ -41,6 +79,8 @@ static inline u64 get_cycles64(void)
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#endif /* !CONFIG_RISCV_M_MODE */
|
||||
|
||||
#define ARCH_HAS_READ_CURRENT_TIMER
|
||||
static inline int read_current_timer(unsigned long *timer_val)
|
||||
{
|
||||
|
@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
return __ftrace_modify_call(rec->ip, addr, false);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is called early on, and isn't wrapped by
|
||||
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
|
||||
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
|
||||
* just directly poke the text, but it's simpler to just take the lock
|
||||
* ourselves.
|
||||
*/
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
||||
{
|
||||
int out;
|
||||
|
||||
ftrace_arch_code_modify_prepare();
|
||||
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
|
||||
ftrace_arch_code_modify_post_process();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
|
||||
|
@ -226,12 +226,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
|
||||
|
||||
ptep = &fixmap_pte[pte_index(addr)];
|
||||
|
||||
if (pgprot_val(prot)) {
|
||||
if (pgprot_val(prot))
|
||||
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
|
||||
} else {
|
||||
else
|
||||
pte_clear(&init_mm, addr, ptep);
|
||||
local_flush_tlb_page(addr);
|
||||
}
|
||||
}
|
||||
|
||||
static pte_t *__init get_pte_virt(phys_addr_t pa)
|
||||
|
@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
|
||||
|
||||
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
|
||||
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
||||
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
|
||||
{
|
||||
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
|
||||
return (p4d_t *) pgd;
|
||||
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||
return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
|
||||
return (p4d_t *) pgdp;
|
||||
}
|
||||
#define p4d_offset_lockless p4d_offset_lockless
|
||||
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
|
||||
{
|
||||
return p4d_offset_lockless(pgdp, *pgdp, address);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
|
||||
static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
|
||||
{
|
||||
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
|
||||
return (pud_t *) p4d;
|
||||
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||
return (pud_t *) p4d_deref(p4d) + pud_index(address);
|
||||
return (pud_t *) p4dp;
|
||||
}
|
||||
#define pud_offset_lockless pud_offset_lockless
|
||||
|
||||
static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
|
||||
{
|
||||
return pud_offset_lockless(p4dp, *p4dp, address);
|
||||
}
|
||||
#define pud_offset pud_offset
|
||||
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
|
||||
{
|
||||
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
|
||||
return (pmd_t *) pud;
|
||||
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||
return (pmd_t *) pud_deref(pud) + pmd_index(address);
|
||||
return (pmd_t *) pudp;
|
||||
}
|
||||
#define pmd_offset_lockless pmd_offset_lockless
|
||||
|
||||
static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
|
||||
{
|
||||
return pmd_offset_lockless(pudp, *pudp, address);
|
||||
}
|
||||
#define pmd_offset pmd_offset
|
||||
|
||||
|
@ -26,6 +26,7 @@ void do_protection_exception(struct pt_regs *regs);
|
||||
void do_dat_exception(struct pt_regs *regs);
|
||||
void do_secure_storage_access(struct pt_regs *regs);
|
||||
void do_non_secure_storage_access(struct pt_regs *regs);
|
||||
void do_secure_storage_violation(struct pt_regs *regs);
|
||||
|
||||
void addressing_exception(struct pt_regs *regs);
|
||||
void data_exception(struct pt_regs *regs);
|
||||
|
@ -39,14 +39,13 @@ void enabled_wait(void)
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Account time spent with enabled wait psw loaded as idle time. */
|
||||
/* XXX seqcount has tracepoints that require RCU */
|
||||
write_seqcount_begin(&idle->seqcount);
|
||||
raw_write_seqcount_begin(&idle->seqcount);
|
||||
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
|
||||
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
|
||||
idle->idle_time += idle_time;
|
||||
idle->idle_count++;
|
||||
account_idle_time(cputime_to_nsecs(idle_time));
|
||||
write_seqcount_end(&idle->seqcount);
|
||||
raw_write_seqcount_end(&idle->seqcount);
|
||||
}
|
||||
NOKPROBE_SYMBOL(enabled_wait);
|
||||
|
||||
|
@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception) /* 3b */
|
||||
PGM_CHECK_DEFAULT /* 3c */
|
||||
PGM_CHECK(do_secure_storage_access) /* 3d */
|
||||
PGM_CHECK(do_non_secure_storage_access) /* 3e */
|
||||
PGM_CHECK_DEFAULT /* 3f */
|
||||
PGM_CHECK(do_secure_storage_violation) /* 3f */
|
||||
PGM_CHECK(monitor_event_exception) /* 40 */
|
||||
PGM_CHECK_DEFAULT /* 41 */
|
||||
PGM_CHECK_DEFAULT /* 42 */
|
||||
|
@ -619,7 +619,7 @@ static struct notifier_block kdump_mem_nb = {
|
||||
/*
|
||||
* Make sure that the area behind memory_end is protected
|
||||
*/
|
||||
static void reserve_memory_end(void)
|
||||
static void __init reserve_memory_end(void)
|
||||
{
|
||||
if (memory_end_set)
|
||||
memblock_reserve(memory_end, ULONG_MAX);
|
||||
@ -628,7 +628,7 @@ static void reserve_memory_end(void)
|
||||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void reserve_oldmem(void)
|
||||
static void __init reserve_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
@ -640,7 +640,7 @@ static void reserve_oldmem(void)
|
||||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void remove_oldmem(void)
|
||||
static void __init remove_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
|
@ -859,6 +859,21 @@ void do_non_secure_storage_access(struct pt_regs *regs)
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_non_secure_storage_access);
|
||||
|
||||
void do_secure_storage_violation(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Either KVM messed up the secure guest mapping or the same
|
||||
* page is mapped into multiple secure guests.
|
||||
*
|
||||
* This exception is only triggered when a guest 2 is running
|
||||
* and can therefore never occur in kernel context.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"Secure storage violation in task: %s, pid %d\n",
|
||||
current->comm, current->pid);
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
}
|
||||
|
||||
#else
|
||||
void do_secure_storage_access(struct pt_regs *regs)
|
||||
{
|
||||
@ -869,4 +884,9 @@ void do_non_secure_storage_access(struct pt_regs *regs)
|
||||
{
|
||||
default_trap_handler(regs);
|
||||
}
|
||||
|
||||
void do_secure_storage_violation(struct pt_regs *regs)
|
||||
{
|
||||
default_trap_handler(regs);
|
||||
}
|
||||
#endif
|
||||
|
@ -668,6 +668,10 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
|
||||
int zpci_disable_device(struct zpci_dev *zdev)
|
||||
{
|
||||
zpci_dma_exit_device(zdev);
|
||||
/*
|
||||
* The zPCI function may already be disabled by the platform, this is
|
||||
* detected in clp_disable_fh() which becomes a no-op.
|
||||
*/
|
||||
return clp_disable_fh(zdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
||||
|
@ -143,6 +143,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
zpci_remove_device(zdev);
|
||||
}
|
||||
|
||||
zdev->fh = ccdf->fh;
|
||||
zpci_disable_device(zdev);
|
||||
zdev->state = ZPCI_FN_STATE_STANDBY;
|
||||
if (!clp_get_state(ccdf->fid, &state) &&
|
||||
state == ZPCI_FN_STATE_RESERVED) {
|
||||
|
@ -8,7 +8,6 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/percpu.h>
|
||||
|
@ -370,7 +370,6 @@ syscall_trace_entry:
|
||||
nop
|
||||
cmp/eq #-1, r0
|
||||
bt syscall_exit
|
||||
mov.l r0, @(OFF_R0,r15) ! Save return value
|
||||
! Reload R0-R4 from kernel stack, where the
|
||||
! parent may have modified them using
|
||||
! ptrace(POKEUSR). (Note that R0-R2 are
|
||||
|
@ -455,16 +455,11 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
long ret = 0;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
||||
tracehook_report_syscall_entry(regs))
|
||||
/*
|
||||
* Tracing decided this syscall should not happen.
|
||||
* We'll return a bogus call number to get an ENOSYS
|
||||
* error, but leave the original number in regs->regs[0].
|
||||
*/
|
||||
ret = -1L;
|
||||
tracehook_report_syscall_entry(regs)) {
|
||||
regs->regs[0] = -ENOSYS;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (secure_computing() == -1)
|
||||
return -1;
|
||||
@ -475,7 +470,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
|
||||
regs->regs[6], regs->regs[7]);
|
||||
|
||||
return ret ?: regs->regs[0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
|
||||
|
@ -43,6 +43,8 @@ KBUILD_CFLAGS += -Wno-pointer-sign
|
||||
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
# Disable relocation relaxation in case the link is not PIE.
|
||||
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
||||
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
GCOV_PROFILE := n
|
||||
|
@ -19,6 +19,7 @@ CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
# CONFIG_64BIT is not set
|
||||
CONFIG_SMP=y
|
||||
CONFIG_X86_GENERIC=y
|
||||
CONFIG_HPET_TIMER=y
|
||||
@ -186,7 +187,6 @@ CONFIG_DRM_I915=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
|
@ -181,7 +181,6 @@ CONFIG_DRM_I915=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
|
@ -299,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
|
||||
old_regs = set_irq_regs(regs);
|
||||
|
||||
instrumentation_begin();
|
||||
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
|
||||
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
|
||||
instrumentation_begin();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
|
@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs)
|
||||
* rdx: Function argument (can be NULL if none)
|
||||
*/
|
||||
SYM_FUNC_START(asm_call_on_stack)
|
||||
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
|
||||
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
|
||||
/*
|
||||
* Save the frame pointer unconditionally. This allows the ORC
|
||||
* unwinder to handle the stack switch.
|
||||
|
@ -159,8 +159,6 @@ static inline u64 x86_default_get_root_pointer(void)
|
||||
extern int x86_acpi_numa_init(void);
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#define acpi_unlazy_tlb(x) leave_mm(x)
|
||||
|
||||
#ifdef CONFIG_ACPI_APEI
|
||||
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||
{
|
||||
|
@ -60,12 +60,26 @@
|
||||
#define FRAME_END "pop %" _ASM_BP "\n"
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#define ENCODE_FRAME_POINTER \
|
||||
"lea 1(%rsp), %rbp\n\t"
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return (unsigned long)regs + 1;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_X86_64 */
|
||||
|
||||
#define ENCODE_FRAME_POINTER \
|
||||
"movl %esp, %ebp\n\t" \
|
||||
"andl $0x7fffffff, %ebp\n\t"
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return (unsigned long)regs & 0x7fffffff;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
@ -83,6 +97,11 @@
|
||||
|
||||
#define ENCODE_FRAME_POINTER
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define FRAME_BEGIN
|
||||
|
@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \
|
||||
instrumentation_begin(); \
|
||||
irq_enter_rcu(); \
|
||||
kvm_set_cpu_l1tf_flush_l1d(); \
|
||||
run_on_irqstack_cond(__##func, regs, regs); \
|
||||
run_sysvec_on_irqstack_cond(__##func, regs); \
|
||||
irq_exit_rcu(); \
|
||||
instrumentation_end(); \
|
||||
irqentry_exit(regs, state); \
|
||||
|
@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void)
|
||||
return __this_cpu_read(irq_count) != -1;
|
||||
}
|
||||
|
||||
void asm_call_on_stack(void *sp, void *func, void *arg);
|
||||
void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
|
||||
void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs);
|
||||
void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
|
||||
struct irq_desc *desc);
|
||||
|
||||
static __always_inline void __run_on_irqstack(void *func, void *arg)
|
||||
static __always_inline void __run_on_irqstack(void (*func)(void))
|
||||
{
|
||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||
|
||||
__this_cpu_add(irq_count, 1);
|
||||
asm_call_on_stack(tos - 8, func, arg);
|
||||
asm_call_on_stack(tos - 8, func, NULL);
|
||||
__this_cpu_sub(irq_count, 1);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||
|
||||
__this_cpu_add(irq_count, 1);
|
||||
asm_call_sysvec_on_stack(tos - 8, func, regs);
|
||||
__this_cpu_sub(irq_count, 1);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
||||
struct irq_desc *desc)
|
||||
{
|
||||
void *tos = __this_cpu_read(hardirq_stack_ptr);
|
||||
|
||||
__this_cpu_add(irq_count, 1);
|
||||
asm_call_irq_on_stack(tos - 8, func, desc);
|
||||
__this_cpu_sub(irq_count, 1);
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
static inline bool irqstack_active(void) { return false; }
|
||||
static inline void __run_on_irqstack(void *func, void *arg) { }
|
||||
static inline void __run_on_irqstack(void (*func)(void)) { }
|
||||
static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs) { }
|
||||
static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
|
||||
struct irq_desc *desc) { }
|
||||
#endif /* !CONFIG_X86_64 */
|
||||
|
||||
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
||||
@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
|
||||
return !user_mode(regs) && !irqstack_active();
|
||||
}
|
||||
|
||||
static __always_inline void run_on_irqstack_cond(void *func, void *arg,
|
||||
|
||||
static __always_inline void run_on_irqstack_cond(void (*func)(void),
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
void (*__func)(void *arg) = func;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (irq_needs_irq_stack(regs))
|
||||
__run_on_irqstack(__func, arg);
|
||||
__run_on_irqstack(func);
|
||||
else
|
||||
__func(arg);
|
||||
func();
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (irq_needs_irq_stack(regs))
|
||||
__run_sysvec_on_irqstack(func, regs);
|
||||
else
|
||||
func(regs);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (irq_needs_irq_stack(regs))
|
||||
__run_irq_on_irqstack(func, desc);
|
||||
else
|
||||
func(desc);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -2243,6 +2243,7 @@ static inline void __init check_timer(void)
|
||||
legacy_pic->init(0);
|
||||
legacy_pic->make_irq(0);
|
||||
apic_write(APIC_LVT0, APIC_DM_EXTINT);
|
||||
legacy_pic->unmask(0);
|
||||
|
||||
unlock_ExtINT_logic();
|
||||
|
||||
|
@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_X86_64))
|
||||
run_on_irqstack_cond(desc->handle_irq, desc, regs);
|
||||
run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
|
||||
else
|
||||
__handle_irq(desc, regs);
|
||||
}
|
||||
|
@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
||||
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
run_on_irqstack_cond(__do_softirq, NULL, NULL);
|
||||
run_on_irqstack_cond(__do_softirq, NULL);
|
||||
}
|
||||
|
@ -652,6 +652,7 @@ static void __init kvm_guest_init(void)
|
||||
}
|
||||
|
||||
if (pv_tlb_flush_supported()) {
|
||||
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
|
||||
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
|
||||
pr_info("KVM setup pv remote TLB flush\n");
|
||||
}
|
||||
@ -764,14 +765,6 @@ static __init int activate_jump_labels(void)
|
||||
}
|
||||
arch_initcall(activate_jump_labels);
|
||||
|
||||
static void kvm_free_pv_cpu_mask(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
|
||||
}
|
||||
|
||||
static __init int kvm_alloc_cpumask(void)
|
||||
{
|
||||
int cpu;
|
||||
@ -790,20 +783,11 @@ static __init int kvm_alloc_cpumask(void)
|
||||
|
||||
if (alloc)
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (!zalloc_cpumask_var_node(
|
||||
per_cpu_ptr(&__pv_cpu_mask, cpu),
|
||||
GFP_KERNEL, cpu_to_node(cpu))) {
|
||||
goto zalloc_cpumask_fail;
|
||||
}
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
}
|
||||
|
||||
apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
|
||||
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
|
||||
return 0;
|
||||
|
||||
zalloc_cpumask_fail:
|
||||
kvm_free_pv_cpu_mask();
|
||||
return -ENOMEM;
|
||||
}
|
||||
arch_initcall(kvm_alloc_cpumask);
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/io_bitmap.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/frame.h>
|
||||
|
||||
#include "process.h"
|
||||
|
||||
@ -133,7 +134,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
||||
fork_frame = container_of(childregs, struct fork_frame, regs);
|
||||
frame = &fork_frame->frame;
|
||||
|
||||
frame->bp = 0;
|
||||
frame->bp = encode_frame_pointer(childregs);
|
||||
frame->ret_addr = (unsigned long) ret_from_fork;
|
||||
p->thread.sp = (unsigned long) fork_frame;
|
||||
p->thread.io_bitmap = NULL;
|
||||
|
@ -2183,6 +2183,12 @@ static int iret_interception(struct vcpu_svm *svm)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int invd_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
/* Treat an INVD instruction as a NOP and just skip it. */
|
||||
return kvm_skip_emulated_instruction(&svm->vcpu);
|
||||
}
|
||||
|
||||
static int invlpg_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
|
||||
@ -2774,7 +2780,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
||||
[SVM_EXIT_RDPMC] = rdpmc_interception,
|
||||
[SVM_EXIT_CPUID] = cpuid_interception,
|
||||
[SVM_EXIT_IRET] = iret_interception,
|
||||
[SVM_EXIT_INVD] = emulate_on_interception,
|
||||
[SVM_EXIT_INVD] = invd_interception,
|
||||
[SVM_EXIT_PAUSE] = pause_interception,
|
||||
[SVM_EXIT_HLT] = halt_interception,
|
||||
[SVM_EXIT_INVLPG] = invlpg_interception,
|
||||
|
@ -129,6 +129,9 @@ static bool __read_mostly enable_preemption_timer = 1;
|
||||
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
|
||||
#endif
|
||||
|
||||
extern bool __read_mostly allow_smaller_maxphyaddr;
|
||||
module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
|
||||
|
||||
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
|
||||
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
|
||||
#define KVM_VM_CR0_ALWAYS_ON \
|
||||
@ -791,6 +794,18 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (is_guest_mode(vcpu))
|
||||
eb |= get_vmcs12(vcpu)->exception_bitmap;
|
||||
else {
|
||||
/*
|
||||
* If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
|
||||
* between guest and host. In that case we only care about present
|
||||
* faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
|
||||
* prepare_vmcs02_rare.
|
||||
*/
|
||||
bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
|
||||
int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
|
||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
|
||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
|
||||
}
|
||||
|
||||
vmcs_write32(EXCEPTION_BITMAP, eb);
|
||||
}
|
||||
@ -4352,16 +4367,6 @@ static void init_vmcs(struct vcpu_vmx *vmx)
|
||||
vmx->pt_desc.guest.output_mask = 0x7F;
|
||||
vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
|
||||
* between guest and host. In that case we only care about present
|
||||
* faults.
|
||||
*/
|
||||
if (enable_ept) {
|
||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, PFERR_PRESENT_MASK);
|
||||
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, PFERR_PRESENT_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
@ -4803,6 +4808,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
|
||||
* EPT will cause page fault only if we need to
|
||||
* detect illegal GPAs.
|
||||
*/
|
||||
WARN_ON_ONCE(!allow_smaller_maxphyaddr);
|
||||
kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
|
||||
return 1;
|
||||
} else
|
||||
@ -5331,7 +5337,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
|
||||
* would also use advanced VM-exit information for EPT violations to
|
||||
* reconstruct the page fault error code.
|
||||
*/
|
||||
if (unlikely(kvm_mmu_is_illegal_gpa(vcpu, gpa)))
|
||||
if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
|
||||
return kvm_emulate_instruction(vcpu, 0);
|
||||
|
||||
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
|
||||
@ -8305,10 +8311,11 @@ static int __init vmx_init(void)
|
||||
vmx_check_vmcs12_offsets();
|
||||
|
||||
/*
|
||||
* Intel processors don't have problems with
|
||||
* GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
|
||||
* it for VMX by default
|
||||
* Shadow paging doesn't have a (further) performance penalty
|
||||
* from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
|
||||
* by default
|
||||
*/
|
||||
if (!enable_ept)
|
||||
allow_smaller_maxphyaddr = true;
|
||||
|
||||
return 0;
|
||||
|
@ -552,7 +552,10 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
|
||||
|
||||
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
|
||||
if (!enable_ept)
|
||||
return true;
|
||||
|
||||
return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
|
||||
}
|
||||
|
||||
void dump_vmcs(void);
|
||||
|
@ -188,7 +188,7 @@ static struct kvm_shared_msrs __percpu *shared_msrs;
|
||||
u64 __read_mostly host_efer;
|
||||
EXPORT_SYMBOL_GPL(host_efer);
|
||||
|
||||
bool __read_mostly allow_smaller_maxphyaddr;
|
||||
bool __read_mostly allow_smaller_maxphyaddr = 0;
|
||||
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
|
||||
|
||||
static u64 __read_mostly host_xss;
|
||||
@ -976,6 +976,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||
X86_CR4_SMEP;
|
||||
unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
|
||||
|
||||
if (kvm_valid_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
@ -1003,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
if (kvm_x86_ops.set_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
|
||||
if (((cr4 ^ old_cr4) & pdptr_bits) ||
|
||||
if (((cr4 ^ old_cr4) & mmu_role_bits) ||
|
||||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
||||
@ -3221,9 +3222,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_IA32_POWER_CTL:
|
||||
msr_info->data = vcpu->arch.msr_ia32_power_ctl;
|
||||
break;
|
||||
case MSR_IA32_TSC:
|
||||
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
|
||||
case MSR_IA32_TSC: {
|
||||
/*
|
||||
* Intel SDM states that MSR_IA32_TSC read adds the TSC offset
|
||||
* even when not intercepted. AMD manual doesn't explicitly
|
||||
* state this but appears to behave the same.
|
||||
*
|
||||
* On userspace reads and writes, however, we unconditionally
|
||||
* operate L1's TSC value to ensure backwards-compatible
|
||||
* behavior for migration.
|
||||
*/
|
||||
u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
|
||||
vcpu->arch.tsc_offset;
|
||||
|
||||
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
|
||||
break;
|
||||
}
|
||||
case MSR_MTRRcap:
|
||||
case 0x200 ... 0x2ff:
|
||||
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
|
||||
|
@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
||||
*/
|
||||
if (size < 8) {
|
||||
if (!IS_ALIGNED(dest, 4) || size != 4)
|
||||
clean_cache_range(dst, 1);
|
||||
clean_cache_range(dst, size);
|
||||
} else {
|
||||
if (!IS_ALIGNED(dest, 8)) {
|
||||
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||
|
@ -1412,6 +1412,11 @@ out:
|
||||
|
||||
hctx->dispatched[queued_to_index(queued)]++;
|
||||
|
||||
/* If we didn't flush the entire list, we could have told the driver
|
||||
* there was more coming, but that turned out to be a lie.
|
||||
*/
|
||||
if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
|
||||
q->mq_ops->commit_rqs(hctx);
|
||||
/*
|
||||
* Any items that need requeuing? Stuff them into hctx->dispatch,
|
||||
* that is where we will continue on next queue run.
|
||||
@ -1425,14 +1430,6 @@ out:
|
||||
|
||||
blk_mq_release_budgets(q, nr_budgets);
|
||||
|
||||
/*
|
||||
* If we didn't flush the entire list, we could have told
|
||||
* the driver there was more coming, but that turned out to
|
||||
* be a lie.
|
||||
*/
|
||||
if (q->mq_ops->commit_rqs && queued)
|
||||
q->mq_ops->commit_rqs(hctx);
|
||||
|
||||
spin_lock(&hctx->lock);
|
||||
list_splice_tail_init(list, &hctx->dispatch);
|
||||
spin_unlock(&hctx->lock);
|
||||
@ -2079,6 +2076,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list)
|
||||
{
|
||||
int queued = 0;
|
||||
int errors = 0;
|
||||
|
||||
while (!list_empty(list)) {
|
||||
blk_status_t ret;
|
||||
@ -2095,6 +2093,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
break;
|
||||
}
|
||||
blk_mq_end_request(rq, ret);
|
||||
errors++;
|
||||
} else
|
||||
queued++;
|
||||
}
|
||||
@ -2104,7 +2103,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
* the driver there was more coming, but that turned out to
|
||||
* be a lie.
|
||||
*/
|
||||
if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs && queued)
|
||||
if ((!list_empty(list) || errors) &&
|
||||
hctx->queue->mq_ops->commit_rqs && queued)
|
||||
hctx->queue->mq_ops->commit_rqs(hctx);
|
||||
}
|
||||
|
||||
|
@ -801,6 +801,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||
|
||||
/**
|
||||
* blk_queue_set_zoned - configure a disk queue zoned model.
|
||||
* @disk: the gendisk of the queue to configure
|
||||
* @model: the zoned model to set
|
||||
*
|
||||
* Set the zoned model of the request queue of @disk according to @model.
|
||||
* When @model is BLK_ZONED_HM (host managed), this should be called only
|
||||
* if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
|
||||
* If @model specifies BLK_ZONED_HA (host aware), the effective model used
|
||||
* depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
|
||||
* on the disk.
|
||||
*/
|
||||
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
{
|
||||
switch (model) {
|
||||
case BLK_ZONED_HM:
|
||||
/*
|
||||
* Host managed devices are supported only if
|
||||
* CONFIG_BLK_DEV_ZONED is enabled.
|
||||
*/
|
||||
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
|
||||
break;
|
||||
case BLK_ZONED_HA:
|
||||
/*
|
||||
* Host aware devices can be treated either as regular block
|
||||
* devices (similar to drive managed devices) or as zoned block
|
||||
* devices to take advantage of the zone command set, similarly
|
||||
* to host managed devices. We try the latter if there are no
|
||||
* partitions and zoned block device support is enabled, else
|
||||
* we do nothing special as far as the block layer is concerned.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
|
||||
disk_has_partitions(disk))
|
||||
model = BLK_ZONED_NONE;
|
||||
break;
|
||||
case BLK_ZONED_NONE:
|
||||
default:
|
||||
if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
|
||||
model = BLK_ZONED_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
disk->queue->limits.zoned = model;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
||||
|
||||
static int __init blk_settings_init(void)
|
||||
{
|
||||
blk_max_low_pfn = max_low_pfn - 1;
|
||||
|
@ -161,18 +161,10 @@ static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
|
||||
}
|
||||
|
||||
/* Power(C) State timer broadcast control */
|
||||
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx,
|
||||
int broadcast)
|
||||
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx)
|
||||
{
|
||||
int state = cx - pr->power.states;
|
||||
|
||||
if (state >= pr->power.timer_broadcast_on_state) {
|
||||
if (broadcast)
|
||||
tick_broadcast_enter();
|
||||
else
|
||||
tick_broadcast_exit();
|
||||
}
|
||||
return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
|
||||
}
|
||||
|
||||
#else
|
||||
@ -180,10 +172,11 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
|
||||
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cstate) { }
|
||||
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
|
||||
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx,
|
||||
int broadcast)
|
||||
|
||||
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -566,32 +559,43 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
|
||||
|
||||
/**
|
||||
* acpi_idle_enter_bm - enters C3 with proper BM handling
|
||||
* @drv: cpuidle driver
|
||||
* @pr: Target processor
|
||||
* @cx: Target state context
|
||||
* @timer_bc: Whether or not to change timer mode to broadcast
|
||||
* @index: index of target state
|
||||
*/
|
||||
static void acpi_idle_enter_bm(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx, bool timer_bc)
|
||||
static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
|
||||
struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx,
|
||||
int index)
|
||||
{
|
||||
acpi_unlazy_tlb(smp_processor_id());
|
||||
|
||||
/*
|
||||
* Must be done before busmaster disable as we might need to
|
||||
* access HPET !
|
||||
*/
|
||||
if (timer_bc)
|
||||
lapic_timer_state_broadcast(pr, cx, 1);
|
||||
static struct acpi_processor_cx safe_cx = {
|
||||
.entry_method = ACPI_CSTATE_HALT,
|
||||
};
|
||||
|
||||
/*
|
||||
* disable bus master
|
||||
* bm_check implies we need ARB_DIS
|
||||
* bm_control implies whether we can do ARB_DIS
|
||||
*
|
||||
* That leaves a case where bm_check is set and bm_control is
|
||||
* not set. In that case we cannot do much, we enter C3
|
||||
* without doing anything.
|
||||
* That leaves a case where bm_check is set and bm_control is not set.
|
||||
* In that case we cannot do much, we enter C3 without doing anything.
|
||||
*/
|
||||
if (pr->flags.bm_control) {
|
||||
bool dis_bm = pr->flags.bm_control;
|
||||
|
||||
/* If we can skip BM, demote to a safe state. */
|
||||
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
|
||||
dis_bm = false;
|
||||
index = drv->safe_state_index;
|
||||
if (index >= 0) {
|
||||
cx = this_cpu_read(acpi_cstate[index]);
|
||||
} else {
|
||||
cx = &safe_cx;
|
||||
index = -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
if (dis_bm) {
|
||||
raw_spin_lock(&c3_lock);
|
||||
c3_cpu_count++;
|
||||
/* Disable bus master arbitration when all CPUs are in C3 */
|
||||
@ -600,18 +604,21 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
|
||||
raw_spin_unlock(&c3_lock);
|
||||
}
|
||||
|
||||
rcu_idle_enter();
|
||||
|
||||
acpi_idle_do_entry(cx);
|
||||
|
||||
rcu_idle_exit();
|
||||
|
||||
/* Re-enable bus master arbitration */
|
||||
if (pr->flags.bm_control) {
|
||||
if (dis_bm) {
|
||||
raw_spin_lock(&c3_lock);
|
||||
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||
c3_cpu_count--;
|
||||
raw_spin_unlock(&c3_lock);
|
||||
}
|
||||
|
||||
if (timer_bc)
|
||||
lapic_timer_state_broadcast(pr, cx, 0);
|
||||
return index;
|
||||
}
|
||||
|
||||
static int acpi_idle_enter(struct cpuidle_device *dev,
|
||||
@ -625,32 +632,21 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
if (cx->type != ACPI_STATE_C1) {
|
||||
if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
|
||||
return acpi_idle_enter_bm(drv, pr, cx, index);
|
||||
|
||||
/* C2 to C1 demotion. */
|
||||
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
|
||||
index = ACPI_IDLE_STATE_START;
|
||||
cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
|
||||
if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
|
||||
acpi_idle_enter_bm(pr, cx, true);
|
||||
return index;
|
||||
} else if (drv->safe_state_index >= 0) {
|
||||
index = drv->safe_state_index;
|
||||
cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
} else {
|
||||
acpi_safe_halt();
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lapic_timer_state_broadcast(pr, cx, 1);
|
||||
|
||||
if (cx->type == ACPI_STATE_C3)
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
acpi_idle_do_entry(cx);
|
||||
|
||||
lapic_timer_state_broadcast(pr, cx, 0);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
@ -666,7 +662,13 @@ static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
|
||||
return 0;
|
||||
|
||||
if (pr->flags.bm_check) {
|
||||
acpi_idle_enter_bm(pr, cx, false);
|
||||
u8 bm_sts_skip = cx->bm_sts_skip;
|
||||
|
||||
/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
|
||||
cx->bm_sts_skip = 1;
|
||||
acpi_idle_enter_bm(drv, pr, cx, index);
|
||||
cx->bm_sts_skip = bm_sts_skip;
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
@ -682,11 +684,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
|
||||
{
|
||||
int i, count = ACPI_IDLE_STATE_START;
|
||||
struct acpi_processor_cx *cx;
|
||||
struct cpuidle_state *state;
|
||||
|
||||
if (max_cstate == 0)
|
||||
max_cstate = 1;
|
||||
|
||||
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
|
||||
state = &acpi_idle_driver.states[count];
|
||||
cx = &pr->power.states[i];
|
||||
|
||||
if (!cx->valid)
|
||||
@ -694,6 +698,15 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
|
||||
|
||||
per_cpu(acpi_cstate[count], dev->cpu) = cx;
|
||||
|
||||
if (lapic_timer_needs_broadcast(pr, cx))
|
||||
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
|
||||
|
||||
if (cx->type == ACPI_STATE_C3) {
|
||||
state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
|
||||
if (pr->flags.bm_check)
|
||||
state->flags |= CPUIDLE_FLAG_RCU_IDLE;
|
||||
}
|
||||
|
||||
count++;
|
||||
if (count == CPUIDLE_STATE_MAX)
|
||||
break;
|
||||
|
@ -2224,7 +2224,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
|
||||
|
||||
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
goto err_disable;
|
||||
|
||||
rc = -ENOMEM;
|
||||
eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
|
||||
|
@ -761,41 +761,10 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
|
||||
return pfn_to_nid(pfn);
|
||||
}
|
||||
|
||||
/* register memory section under specified node if it spans that node */
|
||||
static int register_mem_sect_under_node(struct memory_block *mem_blk,
|
||||
void *arg)
|
||||
static int do_register_memory_block_under_node(int nid,
|
||||
struct memory_block *mem_blk)
|
||||
{
|
||||
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
|
||||
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
|
||||
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
|
||||
int ret, nid = *(int *)arg;
|
||||
unsigned long pfn;
|
||||
|
||||
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
|
||||
int page_nid;
|
||||
|
||||
/*
|
||||
* memory block could have several absent sections from start.
|
||||
* skip pfn range from absent section
|
||||
*/
|
||||
if (!pfn_in_present_section(pfn)) {
|
||||
pfn = round_down(pfn + PAGES_PER_SECTION,
|
||||
PAGES_PER_SECTION) - 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to check if page belongs to nid only for the boot
|
||||
* case, during hotplug we know that all pages in the memory
|
||||
* block belong to the same node.
|
||||
*/
|
||||
if (system_state == SYSTEM_BOOTING) {
|
||||
page_nid = get_nid_for_pfn(pfn);
|
||||
if (page_nid < 0)
|
||||
continue;
|
||||
if (page_nid != nid)
|
||||
continue;
|
||||
}
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If this memory block spans multiple nodes, we only indicate
|
||||
@ -812,11 +781,59 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
|
||||
return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
|
||||
&node_devices[nid]->dev.kobj,
|
||||
kobject_name(&node_devices[nid]->dev.kobj));
|
||||
}
|
||||
|
||||
/* register memory section under specified node if it spans that node */
|
||||
static int register_mem_block_under_node_early(struct memory_block *mem_blk,
|
||||
void *arg)
|
||||
{
|
||||
unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
|
||||
unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
|
||||
unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
|
||||
int nid = *(int *)arg;
|
||||
unsigned long pfn;
|
||||
|
||||
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
|
||||
int page_nid;
|
||||
|
||||
/*
|
||||
* memory block could have several absent sections from start.
|
||||
* skip pfn range from absent section
|
||||
*/
|
||||
if (!pfn_in_present_section(pfn)) {
|
||||
pfn = round_down(pfn + PAGES_PER_SECTION,
|
||||
PAGES_PER_SECTION) - 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to check if page belongs to nid only at the boot
|
||||
* case because node's ranges can be interleaved.
|
||||
*/
|
||||
page_nid = get_nid_for_pfn(pfn);
|
||||
if (page_nid < 0)
|
||||
continue;
|
||||
if (page_nid != nid)
|
||||
continue;
|
||||
|
||||
return do_register_memory_block_under_node(nid, mem_blk);
|
||||
}
|
||||
/* mem section does not span the specified node */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* During hotplug we know that all pages in the memory block belong to the same
|
||||
* node.
|
||||
*/
|
||||
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
|
||||
void *arg)
|
||||
{
|
||||
int nid = *(int *)arg;
|
||||
|
||||
return do_register_memory_block_under_node(nid, mem_blk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unregister a memory block device under the node it spans. Memory blocks
|
||||
* with multiple nodes cannot be offlined and therefore also never be removed.
|
||||
@ -832,11 +849,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
|
||||
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
|
||||
}
|
||||
|
||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
|
||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
|
||||
enum meminit_context context)
|
||||
{
|
||||
walk_memory_blocks_func_t func;
|
||||
|
||||
if (context == MEMINIT_HOTPLUG)
|
||||
func = register_mem_block_under_node_hotplug;
|
||||
else
|
||||
func = register_mem_block_under_node_early;
|
||||
|
||||
return walk_memory_blocks(PFN_PHYS(start_pfn),
|
||||
PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
|
||||
register_mem_sect_under_node);
|
||||
func);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLBFS
|
||||
|
@ -217,7 +217,7 @@ struct regmap_field {
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
extern void regmap_debugfs_initcall(void);
|
||||
extern void regmap_debugfs_init(struct regmap *map, const char *name);
|
||||
extern void regmap_debugfs_init(struct regmap *map);
|
||||
extern void regmap_debugfs_exit(struct regmap *map);
|
||||
|
||||
static inline void regmap_debugfs_disable(struct regmap *map)
|
||||
@ -227,7 +227,7 @@ static inline void regmap_debugfs_disable(struct regmap *map)
|
||||
|
||||
#else
|
||||
static inline void regmap_debugfs_initcall(void) { }
|
||||
static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
|
||||
static inline void regmap_debugfs_init(struct regmap *map) { }
|
||||
static inline void regmap_debugfs_exit(struct regmap *map) { }
|
||||
static inline void regmap_debugfs_disable(struct regmap *map) { }
|
||||
#endif
|
||||
@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
|
||||
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
|
||||
|
||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len);
|
||||
const void *val, size_t val_len, bool noinc);
|
||||
|
||||
void regmap_async_complete_cb(struct regmap_async *async, int ret);
|
||||
|
||||
|
@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
|
||||
|
||||
map->cache_bypass = true;
|
||||
|
||||
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
|
||||
ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
|
||||
if (ret)
|
||||
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
|
||||
base, cur - map->reg_stride, ret);
|
||||
|
@ -17,7 +17,6 @@
|
||||
|
||||
struct regmap_debugfs_node {
|
||||
struct regmap *map;
|
||||
const char *name;
|
||||
struct list_head link;
|
||||
};
|
||||
|
||||
@ -544,11 +543,12 @@ static const struct file_operations regmap_cache_bypass_fops = {
|
||||
.write = regmap_cache_bypass_write_file,
|
||||
};
|
||||
|
||||
void regmap_debugfs_init(struct regmap *map, const char *name)
|
||||
void regmap_debugfs_init(struct regmap *map)
|
||||
{
|
||||
struct rb_node *next;
|
||||
struct regmap_range_node *range_node;
|
||||
const char *devname = "dummy";
|
||||
const char *name = map->name;
|
||||
|
||||
/*
|
||||
* Userspace can initiate reads from the hardware over debugfs.
|
||||
@ -569,7 +569,6 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
|
||||
if (!node)
|
||||
return;
|
||||
node->map = map;
|
||||
node->name = name;
|
||||
mutex_lock(®map_debugfs_early_lock);
|
||||
list_add(&node->link, ®map_debugfs_early_list);
|
||||
mutex_unlock(®map_debugfs_early_lock);
|
||||
@ -679,7 +678,7 @@ void regmap_debugfs_initcall(void)
|
||||
|
||||
mutex_lock(®map_debugfs_early_lock);
|
||||
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) {
|
||||
regmap_debugfs_init(node->map, node->name);
|
||||
regmap_debugfs_init(node->map);
|
||||
list_del(&node->link);
|
||||
kfree(node);
|
||||
}
|
||||
|
@ -581,14 +581,34 @@ static void regmap_range_exit(struct regmap *map)
|
||||
kfree(map->selector_work_buf);
|
||||
}
|
||||
|
||||
static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
|
||||
{
|
||||
if (config->name) {
|
||||
const char *name = kstrdup_const(config->name, GFP_KERNEL);
|
||||
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
kfree_const(map->name);
|
||||
map->name = name;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int regmap_attach_dev(struct device *dev, struct regmap *map,
|
||||
const struct regmap_config *config)
|
||||
{
|
||||
struct regmap **m;
|
||||
int ret;
|
||||
|
||||
map->dev = dev;
|
||||
|
||||
regmap_debugfs_init(map, config->name);
|
||||
ret = regmap_set_name(map, config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
regmap_debugfs_init(map);
|
||||
|
||||
/* Add a devres resource for dev_get_regmap() */
|
||||
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
|
||||
@ -687,13 +707,9 @@ struct regmap *__regmap_init(struct device *dev,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (config->name) {
|
||||
map->name = kstrdup_const(config->name, GFP_KERNEL);
|
||||
if (!map->name) {
|
||||
ret = -ENOMEM;
|
||||
ret = regmap_set_name(map, config);
|
||||
if (ret)
|
||||
goto err_map;
|
||||
}
|
||||
}
|
||||
|
||||
if (config->disable_locking) {
|
||||
map->lock = map->unlock = regmap_lock_unlock_none;
|
||||
@ -1137,7 +1153,7 @@ skip_format_initialization:
|
||||
if (ret != 0)
|
||||
goto err_regcache;
|
||||
} else {
|
||||
regmap_debugfs_init(map, config->name);
|
||||
regmap_debugfs_init(map);
|
||||
}
|
||||
|
||||
return map;
|
||||
@ -1297,6 +1313,8 @@ EXPORT_SYMBOL_GPL(regmap_field_free);
|
||||
*/
|
||||
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
|
||||
{
|
||||
int ret;
|
||||
|
||||
regcache_exit(map);
|
||||
regmap_debugfs_exit(map);
|
||||
|
||||
@ -1309,7 +1327,11 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
|
||||
map->readable_noinc_reg = config->readable_noinc_reg;
|
||||
map->cache_type = config->cache_type;
|
||||
|
||||
regmap_debugfs_init(map, config->name);
|
||||
ret = regmap_set_name(map, config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
regmap_debugfs_init(map);
|
||||
|
||||
map->cache_bypass = false;
|
||||
map->cache_only = false;
|
||||
@ -1464,7 +1486,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
|
||||
}
|
||||
|
||||
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len)
|
||||
const void *val, size_t val_len, bool noinc)
|
||||
{
|
||||
struct regmap_range_node *range;
|
||||
unsigned long flags;
|
||||
@ -1523,7 +1545,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||
win_residue, val_len / map->format.val_bytes);
|
||||
ret = _regmap_raw_write_impl(map, reg, val,
|
||||
win_residue *
|
||||
map->format.val_bytes);
|
||||
map->format.val_bytes, noinc);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
@ -1537,7 +1559,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||
win_residue = range->window_len - win_offset;
|
||||
}
|
||||
|
||||
ret = _regmap_select_page(map, ®, range, val_num);
|
||||
ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
@ -1745,7 +1767,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
|
||||
map->work_buf +
|
||||
map->format.reg_bytes +
|
||||
map->format.pad_bytes,
|
||||
map->format.val_bytes);
|
||||
map->format.val_bytes,
|
||||
false);
|
||||
}
|
||||
|
||||
static inline void *_regmap_map_get_context(struct regmap *map)
|
||||
@ -1839,7 +1862,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
|
||||
EXPORT_SYMBOL_GPL(regmap_write_async);
|
||||
|
||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len)
|
||||
const void *val, size_t val_len, bool noinc)
|
||||
{
|
||||
size_t val_bytes = map->format.val_bytes;
|
||||
size_t val_count = val_len / val_bytes;
|
||||
@ -1860,7 +1883,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
/* Write as many bytes as possible with chunk_size */
|
||||
for (i = 0; i < chunk_count; i++) {
|
||||
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
|
||||
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1871,7 +1894,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
/* Write remaining bytes */
|
||||
if (val_len)
|
||||
ret = _regmap_raw_write_impl(map, reg, val, val_len);
|
||||
ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1904,7 +1927,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
ret = _regmap_raw_write(map, reg, val, val_len);
|
||||
ret = _regmap_raw_write(map, reg, val, val_len, false);
|
||||
|
||||
map->unlock(map->lock_arg);
|
||||
|
||||
@ -1962,7 +1985,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
|
||||
write_len = map->max_raw_write;
|
||||
else
|
||||
write_len = val_len;
|
||||
ret = _regmap_raw_write(map, reg, val, write_len);
|
||||
ret = _regmap_raw_write(map, reg, val, write_len, true);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
val = ((u8 *)val) + write_len;
|
||||
@ -2439,7 +2462,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
|
||||
|
||||
map->async = true;
|
||||
|
||||
ret = _regmap_raw_write(map, reg, val, val_len);
|
||||
ret = _regmap_raw_write(map, reg, val, val_len, false);
|
||||
|
||||
map->async = false;
|
||||
|
||||
@ -2450,7 +2473,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
|
||||
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
|
||||
|
||||
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
unsigned int val_len)
|
||||
unsigned int val_len, bool noinc)
|
||||
{
|
||||
struct regmap_range_node *range;
|
||||
int ret;
|
||||
@ -2463,7 +2486,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
range = _regmap_range_lookup(map, reg);
|
||||
if (range) {
|
||||
ret = _regmap_select_page(map, ®, range,
|
||||
val_len / map->format.val_bytes);
|
||||
noinc ? 1 : val_len / map->format.val_bytes);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
@ -2501,7 +2524,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
|
||||
if (!map->format.parse_val)
|
||||
return -EINVAL;
|
||||
|
||||
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
|
||||
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
|
||||
if (ret == 0)
|
||||
*val = map->format.parse_val(work_val);
|
||||
|
||||
@ -2617,7 +2640,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
|
||||
/* Read bytes that fit into whole chunks */
|
||||
for (i = 0; i < chunk_count; i++) {
|
||||
ret = _regmap_raw_read(map, reg, val, chunk_bytes);
|
||||
ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
|
||||
@ -2628,7 +2651,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
|
||||
/* Read remaining bytes */
|
||||
if (val_len) {
|
||||
ret = _regmap_raw_read(map, reg, val, val_len);
|
||||
ret = _regmap_raw_read(map, reg, val, val_len, false);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
@ -2703,7 +2726,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
|
||||
read_len = map->max_raw_read;
|
||||
else
|
||||
read_len = val_len;
|
||||
ret = _regmap_raw_read(map, reg, val, read_len);
|
||||
ret = _regmap_raw_read(map, reg, val, read_len, true);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
val = ((u8 *)val) + read_len;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user