2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 11:13:58 +08:00

Merge branch 'ib-chrome-platform-input-atmel-mx-ts-platform-removal' of git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform into next

Sync up with chrome-platform to bring in changes to Atmel touch
controller.
This commit is contained in:
Dmitry Torokhov 2018-04-23 16:07:15 -07:00
commit 57eb13a9e8
771 changed files with 8718 additions and 5973 deletions

View File

@ -1,110 +1,139 @@
What: /sys/class/ata_...
Date: August 2008
Contact: Gwendal Grignou<gwendal@google.com>
Description:
Provide a place in sysfs for storing the ATA topology of the system. This allows
retrieving various information about ATA objects.
Provide a place in sysfs for storing the ATA topology of the
system. This allows retrieving various information about ATA
objects.
Files under /sys/class/ata_port
-------------------------------
For each port, a directory ataX is created where X is the ata_port_id of
the port. The device parent is the ata host device.
For each port, a directory ataX is created where X is the ata_port_id of the
port. The device parent is the ata host device.
idle_irq (read)
Number of IRQ received by the port while idle [some ata HBA only].
What: /sys/class/ata_port/ataX/nr_pmp_links
What: /sys/class/ata_port/ataX/idle_irq
Date: May, 2010
KernelVersion: v2.6.37
Contact: Gwendal Grignou <gwendal@chromium.org>
Description:
nr_pmp_links: (RO) If a SATA Port Multiplier (PM) is
connected, the number of links behind it.
nr_pmp_links (read)
idle_irq: (RO) Number of IRQ received by the port while
idle [some ata HBA only].
If a SATA Port Multiplier (PM) is connected, number of link behind it.
What: /sys/class/ata_port/ataX/port_no
Date: May, 2013
KernelVersion: v3.11
Contact: Gwendal Grignou <gwendal@chromium.org>
Description:
(RO) Host local port number. While registering host controller,
port numbers are tracked based upon number of ports available on
the controller. This attribute is needed by udev for composing
persistent links in /dev/disk/by-path.
Files under /sys/class/ata_link
-------------------------------
Behind each port, there is a ata_link. If there is a SATA PM in the
topology, 15 ata_link objects are created.
Behind each port, there is a ata_link. If there is a SATA PM in the topology, 15
ata_link objects are created.
If a link is behind a port, the directory name is linkX, where X is
ata_port_id of the port.
If a link is behind a PM, its name is linkX.Y where X is ata_port_id
of the parent port and Y the PM port.
If a link is behind a port, the directory name is linkX, where X is ata_port_id
of the port. If a link is behind a PM, its name is linkX.Y where X is
ata_port_id of the parent port and Y the PM port.
hw_sata_spd_limit
Maximum speed supported by the connected SATA device.
What: /sys/class/ata_link/linkX[.Y]/hw_sata_spd_limit
What: /sys/class/ata_link/linkX[.Y]/sata_spd_limit
What: /sys/class/ata_link/linkX[.Y]/sata_spd
Date: May, 2010
KernelVersion: v2.6.37
Contact: Gwendal Grignou <gwendal@chromium.org>
Description:
hw_sata_spd_limit: (RO) Maximum speed supported by the
connected SATA device.
sata_spd_limit
sata_spd_limit: (RO) Maximum speed imposed by libata.
Maximum speed imposed by libata.
sata_spd: (RO) Current speed of the link
eg. 1.5, 3 Gbps etc.
sata_spd
Current speed of the link [1.5, 3Gps,...].
Files under /sys/class/ata_device
---------------------------------
Behind each link, up to two ata device are created.
The name of the directory is devX[.Y].Z where:
- X is ata_port_id of the port where the device is connected,
- Y the port of the PM if any, and
- Z the device id: for PATA, there is usually 2 devices [0,1],
only 1 for SATA.
Behind each link, up to two ata devices are created.
The name of the directory is devX[.Y].Z where:
- X is ata_port_id of the port where the device is connected,
- Y the port of the PM if any, and
- Z the device id: for PATA, there is usually 2 devices [0,1], only 1 for SATA.
class
Device class. Can be "ata" for disk, "atapi" for packet device,
"pmp" for PM, or "none" if no device was found behind the link.
dma_mode
What: /sys/class/ata_device/devX[.Y].Z/spdn_cnt
What: /sys/class/ata_device/devX[.Y].Z/gscr
What: /sys/class/ata_device/devX[.Y].Z/ering
What: /sys/class/ata_device/devX[.Y].Z/id
What: /sys/class/ata_device/devX[.Y].Z/pio_mode
What: /sys/class/ata_device/devX[.Y].Z/xfer_mode
What: /sys/class/ata_device/devX[.Y].Z/dma_mode
What: /sys/class/ata_device/devX[.Y].Z/class
Date: May, 2010
KernelVersion: v2.6.37
Contact: Gwendal Grignou <gwendal@chromium.org>
Description:
spdn_cnt: (RO) Number of times libata decided to lower the
speed of link due to errors.
Transfer modes supported by the device when in DMA mode.
Mostly used by PATA device.
gscr: (RO) Cached result of the dump of PM GSCR
register. Valid registers are:
pio_mode
0: SATA_PMP_GSCR_PROD_ID,
1: SATA_PMP_GSCR_REV,
2: SATA_PMP_GSCR_PORT_INFO,
32: SATA_PMP_GSCR_ERROR,
33: SATA_PMP_GSCR_ERROR_EN,
64: SATA_PMP_GSCR_FEAT,
96: SATA_PMP_GSCR_FEAT_EN,
130: SATA_PMP_GSCR_SII_GPIO
Transfer modes supported by the device when in PIO mode.
Mostly used by PATA device.
Only valid if the device is a PM.
xfer_mode
ering: (RO) Formatted output of the error ring of the
device.
Current transfer mode.
id: (RO) Cached result of IDENTIFY command, as
described in ATA8 7.16 and 7.17. Only valid if
the device is not a PM.
id
pio_mode: (RO) Transfer modes supported by the device when
in PIO mode. Mostly used by PATA device.
Cached result of IDENTIFY command, as described in ATA8 7.16 and 7.17.
Only valid if the device is not a PM.
xfer_mode: (RO) Current transfer mode
gscr
dma_mode: (RO) Transfer modes supported by the device when
in DMA mode. Mostly used by PATA device.
Cached result of the dump of PM GSCR register.
Valid registers are:
0: SATA_PMP_GSCR_PROD_ID,
1: SATA_PMP_GSCR_REV,
2: SATA_PMP_GSCR_PORT_INFO,
32: SATA_PMP_GSCR_ERROR,
33: SATA_PMP_GSCR_ERROR_EN,
64: SATA_PMP_GSCR_FEAT,
96: SATA_PMP_GSCR_FEAT_EN,
130: SATA_PMP_GSCR_SII_GPIO
Only valid if the device is a PM.
class: (RO) Device class. Can be "ata" for disk,
"atapi" for packet device, "pmp" for PM, or
"none" if no device was found behind the link.
trim
Shows the DSM TRIM mode currently used by the device. Valid
values are:
unsupported: Drive does not support DSM TRIM
unqueued: Drive supports unqueued DSM TRIM only
queued: Drive supports queued DSM TRIM
forced_unqueued: Drive's queued DSM support is known to be
buggy and only unqueued TRIM commands
are sent
What: /sys/class/ata_device/devX[.Y].Z/trim
Date: May, 2015
KernelVersion: v4.10
Contact: Gwendal Grignou <gwendal@chromium.org>
Description:
(RO) Shows the DSM TRIM mode currently used by the device. Valid
values are:
spdn_cnt
unsupported: Drive does not support DSM TRIM
Number of time libata decided to lower the speed of link due to errors.
unqueued: Drive supports unqueued DSM TRIM only
ering
queued: Drive supports queued DSM TRIM
Formatted output of the error ring of the device.
forced_unqueued: Drive's queued DSM support is known to
be buggy and only unqueued TRIM commands
are sent

View File

@ -0,0 +1,58 @@
What: /sys/block/*/device/sw_activity
Date: Jun, 2008
KernelVersion: v2.6.27
Contact: linux-ide@vger.kernel.org
Description:
(RW) Used by drivers which support software controlled activity
LEDs.
It has the following valid values:
0 OFF - the LED is not activated on activity
1 BLINK_ON - the LED blinks on every 10ms when activity is
detected.
2 BLINK_OFF - the LED is on when idle, and blinks off
every 10ms when activity is detected.
Note that the user must turn sw_activity OFF it they wish to
control the activity LED via the em_message file.
What: /sys/block/*/device/unload_heads
Date: Sep, 2008
KernelVersion: v2.6.28
Contact: linux-ide@vger.kernel.org
Description:
(RW) Hard disk shock protection
Writing an integer value to this file will take the heads of the
respective drive off the platter and block all I/O operations
for the specified number of milliseconds.
- If the device does not support the unload heads feature,
access is denied with -EOPNOTSUPP.
- The maximal value accepted for a timeout is 30000
milliseconds.
- A previously set timeout can be cancelled and disk can resume
normal operation immediately by specifying a timeout of 0.
- Some hard drives only comply with an earlier version of the
ATA standard, but support the unload feature nonetheless.
There is no safe way Linux can detect these devices, so this
is not enabled by default. If it is known that your device
does support the unload feature, then you can tell the kernel
to enable it by writing -1. It can be disabled again by
writing -2.
- Values below -2 are rejected with -EINVAL
For more information, see
Documentation/laptops/disk-shock-protection.txt
What: /sys/block/*/device/ncq_prio_enable
Date: Oct, 2016
KernelVersion: v4.10
Contact: linux-ide@vger.kernel.org
Description:
(RW) Write to the file to turn on or off the SATA ncq (native
command queueing) support. By default this feature is turned
off.

View File

@ -27,3 +27,92 @@ Description: This file contains the current status of the "SSD Smart Path"
the direct i/o path to physical devices. This setting is
controller wide, affecting all configured logical drives on the
controller. This file is readable and writable.
What: /sys/class/scsi_host/hostX/link_power_management_policy
Date: Oct, 2007
KernelVersion: v2.6.24
Contact: linux-ide@vger.kernel.org
Description:
(RW) This parameter allows the user to read and set the link
(interface) power management.
There are four possible options:
min_power: Tell the controller to try to make the link use the
least possible power when possible. This may sacrifice some
performance due to increased latency when coming out of lower
power states.
max_performance: Generally, this means no power management.
Tell the controller to have performance be a priority over power
management.
medium_power: Tell the controller to enter a lower power state
when possible, but do not enter the lowest power state, thus
improving latency over min_power setting.
med_power_with_dipm: Identical to the existing medium_power
setting except that it enables dipm (device initiated power
management) on top, which makes it match the Windows IRST (Intel
Rapid Storage Technology) driver settings. This setting is also
close to min_power, except that:
a) It does not use host-initiated slumber mode, but it does
allow device-initiated slumber
b) It does not enable low power device sleep mode (DevSlp).
What: /sys/class/scsi_host/hostX/em_message
What: /sys/class/scsi_host/hostX/em_message_type
Date: Jun, 2008
KernelVersion: v2.6.27
Contact: linux-ide@vger.kernel.org
Description:
em_message: (RW) Enclosure management support. For the LED
protocol, writes and reads correspond to the LED message format
as defined in the AHCI spec.
The user must turn sw_activity (under /sys/block/*/device/) OFF
it they wish to control the activity LED via the em_message
file.
em_message_type: (RO) Displays the current enclosure management
protocol that is being used by the driver (for eg. LED, SAF-TE,
SES-2, SGPIO etc).
What: /sys/class/scsi_host/hostX/ahci_port_cmd
What: /sys/class/scsi_host/hostX/ahci_host_caps
What: /sys/class/scsi_host/hostX/ahci_host_cap2
Date: Mar, 2010
KernelVersion: v2.6.35
Contact: linux-ide@vger.kernel.org
Description:
[to be documented]
What: /sys/class/scsi_host/hostX/ahci_host_version
Date: Mar, 2010
KernelVersion: v2.6.35
Contact: linux-ide@vger.kernel.org
Description:
(RO) Display the version of the AHCI spec implemented by the
host.
What: /sys/class/scsi_host/hostX/em_buffer
Date: Apr, 2010
KernelVersion: v2.6.35
Contact: linux-ide@vger.kernel.org
Description:
(RW) Allows access to AHCI EM (enclosure management) buffer
directly if the host supports EM.
For eg. the AHCI driver supports SGPIO EM messages but the
SATA/AHCI specs do not define the SGPIO message format of the EM
buffer. Different hardware(HW) vendors may have different
definitions. With the em_buffer attribute, this issue can be
solved by allowing HW vendors to provide userland drivers and
tools for their SGPIO initiators.
What: /sys/class/scsi_host/hostX/em_message_supported
Date: Oct, 2009
KernelVersion: v2.6.39
Contact: linux-ide@vger.kernel.org
Description:
(RO) Displays supported enclosure management message types.

View File

@ -152,6 +152,11 @@ OCXL_IOCTL_IRQ_SET_FD:
Associate an event fd to an AFU interrupt so that the user process
can be notified when the AFU sends an interrupt.
OCXL_IOCTL_GET_METADATA:
Obtains configuration information from the card, such at the size of
MMIO areas, the AFU version, and the PASID for the current context.
mmap
----

View File

@ -16,6 +16,7 @@ Required properties:
- ddc: phandle to the hdmi ddc node
- phy: phandle to the hdmi phy node
- samsung,syscon-phandle: phandle for system controller node for PMU.
- #sound-dai-cells: should be 0.
Required properties for Exynos 4210, 4212, 5420 and 5433:
- clocks: list of clock IDs from SoC clock driver.

View File

@ -11,7 +11,11 @@ Required properties:
interrupts.
Optional properties:
- clocks: Optional reference to the clock used by the XOR engine.
- clocks: Optional reference to the clocks used by the XOR engine.
- clock-names: mandatory if there is a second clock, in this case the
name must be "core" for the first clock and "reg" for the second
one
Example:

View File

@ -3,11 +3,11 @@ Device-Tree bindings for sigma delta modulator
Required properties:
- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
as a generic SD modulator if modulator not specified in compatible list.
- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
- #io-channel-cells = <0>: See the IIO bindings section "IIO consumers".
Example node:
ads1202: adc@0 {
compatible = "sd-modulator";
#io-channel-cells = <1>;
#io-channel-cells = <0>;
};

View File

@ -50,14 +50,15 @@ Example:
compatible = "marvell,mv88e6085";
reg = <0>;
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
};
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch1phy0: switch1phy0@0 {
reg = <0>;
interrupt-parent = <&switch0>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch1phy0: switch1phy0@0 {
reg = <0>;
interrupt-parent = <&switch0>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
};
};
};
};
@ -74,23 +75,24 @@ Example:
compatible = "marvell,mv88e6390";
reg = <0>;
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
};
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch1phy0: switch1phy0@0 {
reg = <0>;
interrupt-parent = <&switch0>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
};
};
mdio1 {
compatible = "marvell,mv88e6xxx-mdio-external";
#address-cells = <1>;
#size-cells = <0>;
switch1phy9: switch1phy0@9 {
reg = <9>;
mdio {
#address-cells = <1>;
#size-cells = <0>;
switch1phy0: switch1phy0@0 {
reg = <0>;
interrupt-parent = <&switch0>;
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
};
};
mdio1 {
compatible = "marvell,mv88e6xxx-mdio-external";
#address-cells = <1>;
#size-cells = <0>;
switch1phy9: switch1phy0@9 {
reg = <9>;
};
};
};
};

View File

@ -18,6 +18,7 @@ Required properties:
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
- "renesas,etheravb-r8a77970" for the R8A77970 SoC.
- "renesas,etheravb-r8a77980" for the R8A77980 SoC.
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.
- "renesas,etheravb-rcar-gen3" as a fallback for the above
R-Car Gen3 devices.
@ -26,7 +27,11 @@ Required properties:
SoC-specific version corresponding to the platform first followed by
the generic version.
- reg: offset and length of (1) the register block and (2) the stream buffer.
- reg: Offset and length of (1) the register block and (2) the stream buffer.
The region for the register block is mandatory.
The region for the stream buffer is optional, as it is only present on
R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
and M3-N (R8A77965).
- interrupts: A list of interrupt-specifiers, one for each entry in
interrupt-names.
If interrupt-names is not present, an interrupt specifier

View File

@ -19,7 +19,7 @@ Required properties:
configured in FS mode;
- "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
configured in HS mode;
- "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs
- "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs
configured in HS mode;
- reg : Should contain 1 register range (address and length)
- interrupts : Should contain 1 interrupt

View File

@ -4,6 +4,7 @@ Required properties:
- compatible: Must contain one of the following:
- "renesas,r8a7795-usb3-peri"
- "renesas,r8a7796-usb3-peri"
- "renesas,r8a77965-usb3-peri"
- "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
device

View File

@ -12,6 +12,7 @@ Required properties:
- "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
- "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices

View File

@ -13,6 +13,7 @@ Required properties:
- "renesas,xhci-r8a7793" for r8a7793 SoC
- "renesas,xhci-r8a7795" for r8a7795 SoC
- "renesas,xhci-r8a7796" for r8a7796 SoC
- "renesas,xhci-r8a77965" for r8a77965 SoC
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
device
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device

View File

@ -111,7 +111,7 @@ TROUBLESHOOTING SERIAL CONSOLE PROBLEMS
- If you don't have an HCDP, the kernel doesn't know where
your console lives until the driver discovers serial
devices. Use "console=uart, io,0x3f8" (or appropriate
devices. Use "console=uart,io,0x3f8" (or appropriate
address for your machine).
Kernel and init script output works fine, but no "login:" prompt:

View File

@ -20,8 +20,8 @@ TCP Segmentation Offload
TCP segmentation allows a device to segment a single frame into multiple
frames with a data payload size specified in skb_shinfo()->gso_size.
When TCP segmentation requested the bit for either SKB_GSO_TCP or
SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
When TCP segmentation requested the bit for either SKB_GSO_TCPV4 or
SKB_GSO_TCPV6 should be set in skb_shinfo()->gso_type and
skb_shinfo()->gso_size should be set to a non-zero value.
TCP segmentation is dependent on support for the use of partial checksum
@ -153,8 +153,18 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
Therefore, any code in the core networking stack must be aware of the
possibility that gso_size will be GSO_BY_FRAGS and handle that case
appropriately. (For size checks, the skb_gso_validate_*_len family of
helpers do this automatically.)
appropriately.
There are some helpers to make this easier:
- skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
an skb is an SCTP GSO skb.
- For size checks, the skb_gso_validate_*_len family of helpers correctly
considers GSO_BY_FRAGS.
- For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.

View File

@ -36,8 +36,7 @@ import glob
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
from docutils.parsers.rst import directives, Directive
from sphinx.ext.autodoc import AutodocReporter
__version__ = '1.0'

View File

@ -2394,7 +2394,6 @@ T: git git://github.com/ndyer/linux.git
S: Maintained
F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
F: drivers/input/touchscreen/atmel_mxt_ts.c
F: include/linux/platform_data/atmel_mxt_ts.h
ATMEL SAMA5D2 ADC DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
@ -9925,6 +9924,13 @@ F: Documentation/ABI/stable/sysfs-bus-nvmem
F: include/linux/nvmem-consumer.h
F: include/linux/nvmem-provider.h
NXP SGTL5000 DRIVER
M: Fabio Estevam <fabio.estevam@nxp.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/sound/sgtl5000.txt
F: sound/soc/codecs/sgtl5000*
NXP TDA998X DRM DRIVER
M: Russell King <linux@armlinux.org.uk>
S: Supported
@ -10327,7 +10333,7 @@ F: drivers/oprofile/
F: include/linux/oprofile.h
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mfasheh@versity.com>
M: Mark Fasheh <mark@fasheh.com>
M: Joel Becker <jlbec@evilplan.org>
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
W: http://ocfs2.wiki.kernel.org
@ -10837,6 +10843,7 @@ F: drivers/platform/x86/peaq-wmi.c
PER-CPU MEMORY ALLOCATOR
M: Tejun Heo <tj@kernel.org>
M: Christoph Lameter <cl@linux.com>
M: Dennis Zhou <dennisszhou@gmail.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
S: Maintained
F: include/linux/percpu*.h
@ -12107,6 +12114,7 @@ M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/samsung/
F: Documentation/devicetree/bindings/sound/samsung*
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>

View File

@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# clang sets -fmerge-all-constants by default as optimization, but this
# is non-conforming behavior for C and in fact breaks the kernel, so we
# need to disable it here generally.
KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
# for gcc -fno-merge-all-constants disables everything, but it is fine
# to have actual conforming behavior enabled.
KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)

View File

@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO
config MACH_DNS323
bool "D-Link DNS-323"
select GENERIC_NET_UTILS
select I2C_BOARDINFO if I2C
help
Say 'Y' here if you want your kernel to support the
@ -66,7 +65,6 @@ config MACH_DNS323
config MACH_TS209
bool "QNAP TS-109/TS-209"
select GENERIC_NET_UTILS
help
Say 'Y' here if you want your kernel to support the
QNAP TS-109/TS-209 platform.
@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL
config MACH_TS409
bool "QNAP TS-409"
select GENERIC_NET_UTILS
help
Say 'Y' here if you want your kernel to support the
QNAP TS-409 platform.

View File

@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these
* functions be kept somewhere?
*/
static int __init dns323_parse_hex_nibble(char n)
{
if (n >= '0' && n <= '9')
return n - '0';
if (n >= 'A' && n <= 'F')
return n - 'A' + 10;
if (n >= 'a' && n <= 'f')
return n - 'a' + 10;
return -1;
}
static int __init dns323_parse_hex_byte(const char *b)
{
int hi;
int lo;
hi = dns323_parse_hex_nibble(b[0]);
lo = dns323_parse_hex_nibble(b[1]);
if (hi < 0 || lo < 0)
return -1;
return (hi << 4) | lo;
}
static int __init dns323_read_mac_addr(void)
{
u_int8_t addr[6];
void __iomem *mac_page;
int i;
char *mac_page;
/* MAC address is stored as a regular ol' string in /dev/mtdblock4
* (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80).
@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void)
if (!mac_page)
return -ENOMEM;
if (!mac_pton((__force const char *) mac_page, addr))
goto error_fail;
/* Sanity check the string we're looking at */
for (i = 0; i < 5; i++) {
if (*(mac_page + (i * 3) + 2) != ':') {
goto error_fail;
}
}
for (i = 0; i < 6; i++) {
int byte;
byte = dns323_parse_hex_byte(mac_page + (i * 3));
if (byte < 0) {
goto error_fail;
}
addr[i] = byte;
}
iounmap(mac_page);
printk("DNS-323: Found ethernet MAC address: %pM\n", addr);

View File

@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
static int __init qnap_tsx09_parse_hex_nibble(char n)
{
if (n >= '0' && n <= '9')
return n - '0';
if (n >= 'A' && n <= 'F')
return n - 'A' + 10;
if (n >= 'a' && n <= 'f')
return n - 'a' + 10;
return -1;
}
static int __init qnap_tsx09_parse_hex_byte(const char *b)
{
int hi;
int lo;
hi = qnap_tsx09_parse_hex_nibble(b[0]);
lo = qnap_tsx09_parse_hex_nibble(b[1]);
if (hi < 0 || lo < 0)
return -1;
return (hi << 4) | lo;
}
static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
{
u_int8_t addr[6];
int i;
if (!mac_pton(addr_str, addr))
return -1;
for (i = 0; i < 6; i++) {
int byte;
/*
* Enforce "xx:xx:xx:xx:xx:xx\n" format.
*/
if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
return -1;
byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
if (byte < 0)
return -1;
addr[i] = byte;
}
printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
unsigned long addr;
for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
void __iomem *nor_page;
char *nor_page;
int ret = 0;
nor_page = ioremap(addr, 1024);
if (nor_page != NULL) {
ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page);
ret = qnap_tsx09_check_mac_addr(nor_page);
iounmap(nor_page);
}

View File

@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if (res.a0)
if ((int)res.a0 < 0)
return 0;
cb = call_hvc_arch_workaround_1;
smccc_start = __smccc_workaround_1_hvc_start;
@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if (res.a0)
if ((int)res.a0 < 0)
return 0;
cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start;

View File

@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
{
int ret = 0;
vcpu_load(vcpu);
trace_kvm_set_guest_debug(vcpu, dbg->control);
if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
}
out:
vcpu_put(vcpu);
return ret;
}

View File

@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
/* creating or taking down mappings is always safe */
if (old == 0 || new == 0)
@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
if ((old | new) & PTE_CONT)
return false;
/* Transitioning from Global to Non-Global is safe */
if (((old ^ new) == PTE_NG) && (new & PTE_NG))
return true;
/* Transitioning from Non-Global to Global is unsafe */
if (old & ~new & PTE_NG)
return false;
return ((old ^ new) & ~mask) == 0;
}
@ -972,3 +972,13 @@ int pmd_clear_huge(pmd_t *pmdp)
pmd_clear(pmdp);
return 1;
}
int pud_free_pmd_page(pud_t *pud)
{
return pud_none(*pud);
}
int pmd_free_pte_page(pmd_t *pmd)
{
return pmd_none(*pmd);
}

View File

@ -2,7 +2,6 @@
#ifndef __H8300_BYTEORDER_H__
#define __H8300_BYTEORDER_H__
#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
#include <linux/byteorder/big_endian.h>
#endif

View File

@ -66,38 +66,35 @@ ATOMIC_OPS(add, +)
ATOMIC_OPS(sub, -)
#ifdef __OPTIMIZE__
#define __ia64_atomic_const(i) __builtin_constant_p(i) ? \
#define __ia64_atomic_const(i) \
static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
#define atomic_add_return(i, v) \
({ \
int __i = (i); \
static const int __ia64_atomic_p = __ia64_atomic_const(i); \
__ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \
ia64_atomic_add(__i, v); \
})
#define atomic_sub_return(i, v) \
({ \
int __i = (i); \
static const int __ia64_atomic_p = __ia64_atomic_const(i); \
__ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \
ia64_atomic_sub(__i, v); \
})
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
__ia64_atomic_p
#else
#define atomic_add_return(i, v) ia64_atomic_add(i, v)
#define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
#define __ia64_atomic_const(i) 0
#endif
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic_add(__ia64_aar_i, v); \
})
#define atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define atomic_fetch_add(i,v) \
({ \
int __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
__ia64_atomic_const(i) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
})
@ -105,11 +102,7 @@ ATOMIC_OPS(sub, -)
#define atomic_fetch_sub(i,v) \
({ \
int __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
__ia64_atomic_const(i) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic_fetch_sub(__ia64_asr_i, v); \
})
@ -170,11 +163,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
@ -182,11 +171,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_sub_return(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
__ia64_atomic_const(i) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
@ -194,11 +179,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_fetch_add(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
__ia64_atomic_const(i) \
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
})
@ -206,11 +187,7 @@ ATOMIC64_OPS(sub, -)
#define atomic64_fetch_sub(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
__ia64_atomic_const(i) \
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
: ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
})

View File

@ -117,7 +117,7 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]);
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
#endif
return size;
@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
u64 virt_addr=simple_strtoull(buf, NULL, 16);
int ret;
ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);

View File

@ -16,7 +16,7 @@ import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
print("Usage: %s FILE" % sys.argv[0])
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
@ -29,7 +29,7 @@ def check_func (func, slots, rlen_sum):
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum))
return
num_funcs = 0
@ -43,23 +43,23 @@ for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
start = int(m.group(2), 16)
end = int(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
rlen_sum = 0
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
rlen_sum += int(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
print("No errors detected in %u functions." % num_funcs)
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
print("%u %s detected in %u functions." % (num_errors, err, num_funcs))
sys.exit(1)

View File

@ -24,6 +24,7 @@ config MICROBLAZE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select NO_BOOTMEM
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_OPROFILE

View File

@ -8,7 +8,6 @@ menu "Platform options"
config OPT_LIB_FUNCTION
bool "Optimalized lib function"
depends on CPU_LITTLE_ENDIAN
default y
help
Allows turn on optimalized library function (memcpy and memmove).
@ -21,6 +20,7 @@ config OPT_LIB_FUNCTION
config OPT_LIB_ASM
bool "Optimalized lib function ASM"
depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
depends on CPU_BIG_ENDIAN
default n
help
Allows turn on optimalized library function (memcpy and memmove).

View File

@ -44,7 +44,6 @@ void machine_shutdown(void);
void machine_halt(void);
void machine_power_off(void);
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
# endif /* __ASSEMBLY__ */

View File

@ -29,10 +29,6 @@
* between mem locations with size of xfer spec'd in bytes
*/
#ifdef __MICROBLAZEEL__
#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
#endif
#include <linux/linkage.h>
.text
.globl memcpy

View File

@ -32,9 +32,6 @@ int mem_init_done;
#ifndef CONFIG_MMU
unsigned int __page_offset;
EXPORT_SYMBOL(__page_offset);
#else
static int init_bootmem_done;
#endif /* CONFIG_MMU */
char *klimit = _end;
@ -117,7 +114,6 @@ static void __init paging_init(void)
void __init setup_memory(void)
{
unsigned long map_size;
struct memblock_region *reg;
#ifndef CONFIG_MMU
@ -174,17 +170,6 @@ void __init setup_memory(void)
pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
/*
* Find an area to use for the bootmem bitmap.
* We look for the first area which is at least
* 128kB in length (128kB is enough for a bitmap
* for 4GB of memory, using 4kB pages), plus 1 page
* (in case the address isn't page-aligned).
*/
map_size = init_bootmem_node(NODE_DATA(0),
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* Add active regions with valid PFNs */
for_each_memblock(memory, reg) {
unsigned long start_pfn, end_pfn;
@ -196,32 +181,9 @@ void __init setup_memory(void)
&memblock.memory, 0);
}
/* free bootmem is whole main memory */
free_bootmem_with_active_regions(0, max_low_pfn);
/* reserve allocate blocks */
for_each_memblock(reserved, reg) {
unsigned long top = reg->base + reg->size - 1;
pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
(u32) reg->base, (u32) reg->size, top,
memory_start + lowmem_size - 1);
if (top <= (memory_start + lowmem_size - 1)) {
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
} else if (reg->base < (memory_start + lowmem_size - 1)) {
unsigned long trunc_size = memory_start + lowmem_size -
reg->base;
reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
}
}
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_MMU
init_bootmem_done = 1;
#endif
paging_init();
}
@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void)
/* This is only called until mem_init is done. */
void __init *early_get_page(void)
{
void *p;
if (init_bootmem_done) {
p = alloc_bootmem_pages(PAGE_SIZE);
} else {
/*
* Mem start + kernel_tlb -> here is limit
* because of mem mapping from head.S
*/
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
memory_start + kernel_tlb));
}
return p;
/*
* Mem start + kernel_tlb -> here is limit
* because of mem mapping from head.S
*/
return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
memory_start + kernel_tlb));
}
#endif /* CONFIG_MMU */
void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
{
if (mem_init_done)
return kmalloc(size, mask);
else
return alloc_bootmem(size);
}
void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;

View File

@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
}
board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
if (!board_data)
goto error;
ath25_board.config = (struct ath25_boarddata *)board_data;
memcpy_fromio(board_data, bcfg, 0x100);
if (broken_boarddata) {

View File

@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
}
host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
if (!host_data)
return -ENOMEM;
raw_spin_lock_init(&host_data->lock);
addr = of_get_address(ciu_node, 0, NULL, NULL);

View File

@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
return;
}
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi0", NULL))
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
panic("Can't request IPI0 interrupt");
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi1", NULL))
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
panic("Can't request IPI1 interrupt");
}

View File

@ -13,6 +13,8 @@ choice
config SOC_AMAZON_SE
bool "Amazon SE"
select SOC_TYPE_XWAY
select MFD_SYSCON
select MFD_CORE
config SOC_XWAY
bool "XWAY"

View File

@ -549,9 +549,9 @@ void __init ltq_soc_init(void)
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
ltq_ar9_fpi_hz(), CLOCK_250M);
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
@ -560,7 +560,7 @@ void __init ltq_soc_init(void)
} else {
clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0);
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);

View File

@ -7,6 +7,8 @@ choice
config LEMOTE_FULOONG2E
bool "Lemote Fuloong(2e) mini-PC"
select ARCH_SPARSEMEM_ENABLE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select CEVT_R4K
select CSRC_R4K
select SYS_HAS_CPU_LOONGSON2E
@ -33,6 +35,8 @@ config LEMOTE_FULOONG2E
config LEMOTE_MACH2F
bool "Lemote Loongson 2F family machines"
select ARCH_SPARSEMEM_ENABLE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select BOARD_SCACHE
select BOOT_ELF32
select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
@ -62,6 +66,8 @@ config LEMOTE_MACH2F
config LOONGSON_MACH3X
bool "Generic Loongson 3 family machines"
select ARCH_SPARSEMEM_ENABLE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select BOOT_ELF32
select BOARD_SCACHE

View File

@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
u32 n1;
u32 rev;
/* Early detection of CMP support */
mips_cm_probe();
mips_cpc_probe();
if (mips_cps_numiocu(0)) {
/*
* mips_cm_probe() wipes out bootloader
* config for CM regions and we have to configure them
* again. This SoC cannot talk to pamlbus devices
* witout proper iocu region set up.
*
* FIXME: it would be better to do this with values
* from DT, but we need this very early because
* without this we cannot talk to pretty much anything
* including serial.
*/
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
CM_GCR_REGn_MASK_CMTGT_IOCU0);
__sync();
}
n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
rt2880_pinmux_data = mt7621_pinmux_data;
/* Early detection of CMP support */
mips_cm_probe();
mips_cpc_probe();
if (mips_cps_numiocu(0)) {
/*
* mips_cm_probe() wipes out bootloader
* config for CM regions and we have to configure them
* again. This SoC cannot talk to pamlbus devices
* witout proper iocu region set up.
*
* FIXME: it would be better to do this with values
* from DT, but we need this very early because
* without this we cannot talk to pretty much anything
* including serial.
*/
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
CM_GCR_REGn_MASK_CMTGT_IOCU0);
}
if (!register_cps_smp_ops())
return;

View File

@ -96,16 +96,9 @@ static void ralink_restart(char *command)
unreachable();
}
static void ralink_halt(void)
{
local_irq_disable();
unreachable();
}
static int __init mips_reboot_setup(void)
{
_machine_restart = ralink_restart;
_machine_halt = ralink_halt;
return 0;
}

View File

@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm)
rp3440, etc. So, avoid it if the mm isn't too big. */
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
mm_total_size(mm) >= parisc_cache_flush_threshold) {
flush_tlb_all();
if (mm->context)
flush_tlb_all();
flush_cache_all();
return;
}
@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
if (unlikely(mm->context))
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
pgd_t *pgd;
unsigned long addr;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) {
flush_tlb_range(vma, start, end);
if (vma->vm_mm->context)
flush_tlb_range(vma, start, end);
flush_cache_all();
return;
}
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
return;
}
pgd = vma->vm_mm->pgd;
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn)) {
if (unlikely(vma->vm_mm->context))
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) {
flush_tlb_page(vma, vmaddr);
if (likely(vma->vm_mm->context))
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}

View File

@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
$(addprefix $(obj)/,$(libfdtheader))
src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \

View File

@ -874,7 +874,6 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
.mmu = 0,
.hash_ext = 0,
.radix_ext = 0,
.byte22 = 0,
},
/* option vector 6: IBM PAPR hints */

View File

@ -195,6 +195,12 @@ static void kvmppc_pte_free(pte_t *ptep)
kmem_cache_free(kvm_pte_cache, ptep);
}
/* Like pmd_huge() and pmd_large(), but works regardless of config options */
static inline int pmd_is_leaf(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_PTE);
}
static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
unsigned int level, unsigned long mmu_seq)
{
@ -219,7 +225,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
else
new_pmd = pmd_alloc_one(kvm->mm, gpa);
if (level == 0 && !(pmd && pmd_present(*pmd)))
if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
new_ptep = kvmppc_pte_alloc();
/* Check if we might have been invalidated; let the guest retry if so */
@ -244,12 +250,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
new_pmd = NULL;
}
pmd = pmd_offset(pud, gpa);
if (pmd_large(*pmd)) {
/* Someone else has instantiated a large page here; retry */
ret = -EAGAIN;
goto out_unlock;
}
if (level == 1 && !pmd_none(*pmd)) {
if (pmd_is_leaf(*pmd)) {
unsigned long lgpa = gpa & PMD_MASK;
/*
* If we raced with another CPU which has just put
* a 2MB pte in after we saw a pte page, try again.
*/
if (level == 0 && !new_ptep) {
ret = -EAGAIN;
goto out_unlock;
}
/* Valid 2MB page here already, remove it */
old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
~0UL, 0, lgpa, PMD_SHIFT);
kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
if (old & _PAGE_DIRTY) {
unsigned long gfn = lgpa >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
memslot = gfn_to_memslot(kvm, gfn);
if (memslot && memslot->dirty_bitmap)
kvmppc_update_dirty_map(memslot,
gfn, PMD_SIZE);
}
} else if (level == 1 && !pmd_none(*pmd)) {
/*
* There's a page table page here, but we wanted
* to install a large page. Tell the caller and let
@ -412,28 +436,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
} else {
page = pages[0];
pfn = page_to_pfn(page);
if (PageHuge(page)) {
page = compound_head(page);
pte_size <<= compound_order(page);
if (PageCompound(page)) {
pte_size <<= compound_order(compound_head(page));
/* See if we can insert a 2MB large-page PTE here */
if (pte_size >= PMD_SIZE &&
(gpa & PMD_MASK & PAGE_MASK) ==
(hva & PMD_MASK & PAGE_MASK)) {
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
(hva & (PMD_SIZE - PAGE_SIZE))) {
level = 1;
pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
}
}
/* See if we can provide write access */
if (writing) {
/*
* We assume gup_fast has set dirty on the host PTE.
*/
pgflags |= _PAGE_WRITE;
} else {
local_irq_save(flags);
ptep = find_current_mm_pte(current->mm->pgd,
hva, NULL, NULL);
if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
if (ptep && pte_write(*ptep))
pgflags |= _PAGE_WRITE;
local_irq_restore(flags);
}
@ -459,18 +479,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte = pfn_pte(pfn, __pgprot(pgflags));
ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
}
if (ret == 0 || ret == -EAGAIN)
ret = RESUME_GUEST;
if (page) {
/*
* We drop pages[0] here, not page because page might
* have been set to the head page of a compound, but
* we have to drop the reference on the correct tail
* page to match the get inside gup()
*/
put_page(pages[0]);
if (!ret && (pgflags & _PAGE_WRITE))
set_page_dirty_lock(page);
put_page(page);
}
if (ret == 0 || ret == -EAGAIN)
ret = RESUME_GUEST;
return ret;
}
@ -644,7 +661,7 @@ void kvmppc_free_radix(struct kvm *kvm)
continue;
pmd = pmd_offset(pud, 0);
for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
if (pmd_huge(*pmd)) {
if (pmd_is_leaf(*pmd)) {
pmd_clear(pmd);
continue;
}

View File

@ -2885,7 +2885,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
trace_hardirqs_on();
guest_enter();
guest_enter_irqoff();
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
@ -2893,8 +2893,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
guest_exit();
trace_hardirqs_off();
set_irq_happened(trap);
@ -2937,6 +2935,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_set_host_core(pcpu);
local_irq_enable();
guest_exit();
/* Let secondaries go back to the offline loop */
for (i = 0; i < controlled_threads; ++i) {
@ -3656,15 +3655,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
goto up_out;
psize = vma_kernel_pagesize(vma);
porder = __ilog2(psize);
up_read(&current->mm->mmap_sem);
/* We can handle 4k, 64k or 16M pages in the VRMA */
err = -EINVAL;
if (!(psize == 0x1000 || psize == 0x10000 ||
psize == 0x1000000))
goto out_srcu;
if (psize >= 0x1000000)
psize = 0x1000000;
else if (psize >= 0x10000)
psize = 0x10000;
else
psize = 0x1000;
porder = __ilog2(psize);
senc = slb_pgsize_encoding(psize);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |

View File

@ -320,7 +320,6 @@ kvm_novcpu_exit:
stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit
nop
lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host
/*
@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
secondary_too_late:
li r12, 0
stw r12, STACK_SLOT_TRAP(r1)
cmpdi r4, 0
beq 11f
stw r12, VCPU_TRAP(r4)
@ -1558,12 +1558,12 @@ mc_cont:
3: stw r5,VCPU_SLB_MAX(r9)
guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
mr r3, r12
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
nop
ld r9, HSTATE_KVM_VCPU(r13)
lwz r12, VCPU_TRAP(r9)
/* Stop others sending VCPU interrupts to this physical CPU */
li r0, -1
@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
* have to coordinate the hardware threads.
* Here STACK_SLOT_TRAP(r1) contains the trap number.
*/
kvmhv_switch_to_host:
/* Secondary threads wait for primary to do partition switch */
@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* If HMI, call kvmppc_realmode_hmi_handler() */
lwz r12, STACK_SLOT_TRAP(r1)
cmpwi r12, BOOK3S_INTERRUPT_HMI
bne 27f
bl kvmppc_realmode_hmi_handler
nop
cmpdi r3, 0
li r12, BOOK3S_INTERRUPT_HMI
/*
* At this point kvmppc_realmode_hmi_handler may have resync-ed
* the TB, and if it has, we must not subtract the guest timebase
@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION
lwz r8, KVM_SPLIT_DO_RESTORE(r3)
cmpwi r8, 0
beq 47f
stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_p9_restore_lpcr
nop
lwz r12, STACK_SLOT_TRAP(r1)
b 48f
47:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, SFS
mtlr r0

View File

@ -1345,7 +1345,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, int is_default_endian)
{
enum emulation_result emulated;
enum emulation_result emulated = EMULATE_DONE;
while (vcpu->arch.mmio_vmx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
@ -1608,7 +1608,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_sigset_deactivate(vcpu);
#ifdef CONFIG_ALTIVEC
out:
#endif
vcpu_put(vcpu);
return r;
}

View File

@ -240,6 +240,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* goto out;
*/
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
PPC_BCC(COND_GE, out);

View File

@ -63,6 +63,7 @@ static inline int init_new_context(struct task_struct *tsk,
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
/* pgd_alloc() did not account this pmd */
mm_inc_nr_pmds(mm);
mm_inc_nr_puds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;

View File

@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/ctl_reg.h>
#include <asm/dwarf.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@ -230,7 +231,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
.hidden \name
.type \name,@function
\name:
.cfi_startproc
CFI_STARTPROC
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
exrl 0,0f
#else
@ -239,7 +240,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
#endif
j .
0: br \reg
.cfi_endproc
CFI_ENDPROC
.endm
GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
@ -426,13 +427,13 @@ ENTRY(system_call)
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled register to prevent speculative use
xgr %r0,%r0
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
# clear user controlled register to prevent speculative use
xgr %r0,%r0
# load address of system call table
lg %r10,__THREAD_sysc_table(%r13,%r12)
llgh %r8,__PT_INT_CODE+2(%r11)
@ -1439,6 +1440,7 @@ cleanup_critical:
stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
# set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)

View File

@ -2,8 +2,8 @@
#include <linux/module.h>
#include <asm/nospec-branch.h>
int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF);
int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL);
int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
static int __init nospectre_v2_setup_early(char *str)
{

View File

@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
{ "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
{ "instruction_gs", VCPU_STAT(instruction_gs) },
@ -2146,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
/* we still need the basic sca for the ipte control */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
return;
}
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {

View File

@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
pte_unmap(pte);
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
pmd_t orig, pmd_t pmd)
{
pmd_t orig = *pmdp;
*pmdp = pmd;
if (mm == &init_mm)
return;
@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
}
}
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
pmd_t orig = *pmdp;
*pmdp = pmd;
__set_pmd_acct(mm, addr, orig, pmd);
}
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
do {
old = *pmdp;
} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
__set_pmd_acct(vma->vm_mm, address, old, pmd);
return old;
}

View File

@ -2307,7 +2307,7 @@ choice
it can be used to assist security vulnerability exploitation.
This setting can be changed at boot time via the kernel command
line parameter vsyscall=[native|emulate|none].
line parameter vsyscall=[emulate|none].
On a system with recent enough glibc (2.14 or newer) and no
static binaries, you can say None without a performance penalty
@ -2315,15 +2315,6 @@ choice
If unsure, select "Emulate".
config LEGACY_VSYSCALL_NATIVE
bool "Native"
help
Actual executable code is located in the fixed vsyscall
address mapping, implementing time() efficiently. Since
this makes the mapping executable, it can be used during
security vulnerability exploitation (traditionally as
ROP gadgets). This configuration is not recommended.
config LEGACY_VSYSCALL_EMULATE
bool "Emulate"
help

View File

@ -315,19 +315,6 @@ config X86_L1_CACHE_SHIFT
default "4" if MELAN || M486 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
config X86_PPRO_FENCE
bool "PentiumPro memory ordering errata workaround"
depends on M686 || M586MMX || M586TSC || M586 || M486 || MGEODEGX1
---help---
Old PentiumPro multiprocessor systems had errata that could cause
memory operations to violate the x86 ordering standard in rare cases.
Enabling this option will attempt to work around some (but not all)
occurrences of this problem, at the cost of much heavier spinlock and
memory barrier operations.
If unsure, say n here. Even distro kernels should think twice before
enabling this: there are few systems, and an unlikely bug.
config X86_F00F_BUG
def_bool y
depends on M586MMX || M586TSC || M586 || M486

View File

@ -223,6 +223,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
LDFLAGS := -m elf_$(UTS_MACHINE)
#
# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
# the linker to force 2MB page size regardless of the default page size used
# by the linker.
#
ifdef CONFIG_X86_64
LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
endif
# Speed up the build
KBUILD_CFLAGS += -pipe
# Workaround for a gcc prelease that unfortunately was shipped in a suse release

View File

@ -309,6 +309,10 @@ static void parse_elf(void *output)
switch (phdr->p_type) {
case PT_LOAD:
#ifdef CONFIG_X86_64
if ((phdr->p_align % 0x200000) != 0)
error("Alignment of LOAD segment isn't multiple of 2MB");
#endif
#ifdef CONFIG_RELOCATABLE
dest = output;
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);

View File

@ -1138,7 +1138,7 @@ apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
#endif /* CONFIG_HYPERV */
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry int3 do_int3 has_error_code=0
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN

View File

@ -363,9 +363,7 @@ ENTRY(entry_INT80_compat)
pushq 2*8(%rdi) /* regs->ip */
pushq 1*8(%rdi) /* regs->orig_ax */
movq (%rdi), %rdi /* restore %rdi */
pushq %rdi /* pt_regs->di */
pushq (%rdi) /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
@ -406,15 +404,3 @@ ENTRY(entry_INT80_compat)
TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode
END(entry_INT80_compat)
ENTRY(stub32_clone)
/*
* The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
* The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
*
* The native 64-bit kernel's sys_clone() implements the latter,
* so we need to swap arguments here before calling it:
*/
xchg %r8, %rcx
jmp sys_clone
ENDPROC(stub32_clone)

View File

@ -8,12 +8,12 @@
#
0 i386 restart_syscall sys_restart_syscall
1 i386 exit sys_exit
2 i386 fork sys_fork sys_fork
2 i386 fork sys_fork
3 i386 read sys_read
4 i386 write sys_write
5 i386 open sys_open compat_sys_open
6 i386 close sys_close
7 i386 waitpid sys_waitpid sys32_waitpid
7 i386 waitpid sys_waitpid compat_sys_x86_waitpid
8 i386 creat sys_creat
9 i386 link sys_link
10 i386 unlink sys_unlink
@ -78,7 +78,7 @@
69 i386 ssetmask sys_ssetmask
70 i386 setreuid sys_setreuid16
71 i386 setregid sys_setregid16
72 i386 sigsuspend sys_sigsuspend sys_sigsuspend
72 i386 sigsuspend sys_sigsuspend
73 i386 sigpending sys_sigpending compat_sys_sigpending
74 i386 sethostname sys_sethostname
75 i386 setrlimit sys_setrlimit compat_sys_setrlimit
@ -96,7 +96,7 @@
87 i386 swapon sys_swapon
88 i386 reboot sys_reboot
89 i386 readdir sys_old_readdir compat_sys_old_readdir
90 i386 mmap sys_old_mmap sys32_mmap
90 i386 mmap sys_old_mmap compat_sys_x86_mmap
91 i386 munmap sys_munmap
92 i386 truncate sys_truncate compat_sys_truncate
93 i386 ftruncate sys_ftruncate compat_sys_ftruncate
@ -126,7 +126,7 @@
117 i386 ipc sys_ipc compat_sys_ipc
118 i386 fsync sys_fsync
119 i386 sigreturn sys_sigreturn sys32_sigreturn
120 i386 clone sys_clone stub32_clone
120 i386 clone sys_clone compat_sys_x86_clone
121 i386 setdomainname sys_setdomainname
122 i386 uname sys_newuname
123 i386 modify_ldt sys_modify_ldt
@ -186,8 +186,8 @@
177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
179 i386 rt_sigsuspend sys_rt_sigsuspend
180 i386 pread64 sys_pread64 sys32_pread
181 i386 pwrite64 sys_pwrite64 sys32_pwrite
180 i386 pread64 sys_pread64 compat_sys_x86_pread
181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite
182 i386 chown sys_chown16
183 i386 getcwd sys_getcwd
184 i386 capget sys_capget
@ -196,14 +196,14 @@
187 i386 sendfile sys_sendfile compat_sys_sendfile
188 i386 getpmsg
189 i386 putpmsg
190 i386 vfork sys_vfork sys_vfork
190 i386 vfork sys_vfork
191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
192 i386 mmap2 sys_mmap_pgoff
193 i386 truncate64 sys_truncate64 sys32_truncate64
194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64
195 i386 stat64 sys_stat64 sys32_stat64
196 i386 lstat64 sys_lstat64 sys32_lstat64
197 i386 fstat64 sys_fstat64 sys32_fstat64
193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64
194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64
195 i386 stat64 sys_stat64 compat_sys_x86_stat64
196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64
197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64
198 i386 lchown32 sys_lchown
199 i386 getuid32 sys_getuid
200 i386 getgid32 sys_getgid
@ -231,7 +231,7 @@
# 222 is unused
# 223 is unused
224 i386 gettid sys_gettid
225 i386 readahead sys_readahead sys32_readahead
225 i386 readahead sys_readahead compat_sys_x86_readahead
226 i386 setxattr sys_setxattr
227 i386 lsetxattr sys_lsetxattr
228 i386 fsetxattr sys_fsetxattr
@ -256,7 +256,7 @@
247 i386 io_getevents sys_io_getevents compat_sys_io_getevents
248 i386 io_submit sys_io_submit compat_sys_io_submit
249 i386 io_cancel sys_io_cancel
250 i386 fadvise64 sys_fadvise64 sys32_fadvise64
250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64
# 251 is available for reuse (was briefly sys_set_zone_reclaim)
252 i386 exit_group sys_exit_group
253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
@ -278,7 +278,7 @@
269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
270 i386 tgkill sys_tgkill
271 i386 utimes sys_utimes compat_sys_utimes
272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64
272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64
273 i386 vserver
274 i386 mbind sys_mbind
275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
@ -306,7 +306,7 @@
297 i386 mknodat sys_mknodat
298 i386 fchownat sys_fchownat
299 i386 futimesat sys_futimesat compat_sys_futimesat
300 i386 fstatat64 sys_fstatat64 sys32_fstatat
300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat
301 i386 unlinkat sys_unlinkat
302 i386 renameat sys_renameat
303 i386 linkat sys_linkat
@ -320,7 +320,7 @@
311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list
312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list
313 i386 splice sys_splice
314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range
314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range
315 i386 tee sys_tee
316 i386 vmsplice sys_vmsplice compat_sys_vmsplice
317 i386 move_pages sys_move_pages compat_sys_move_pages
@ -330,7 +330,7 @@
321 i386 signalfd sys_signalfd compat_sys_signalfd
322 i386 timerfd_create sys_timerfd_create
323 i386 eventfd sys_eventfd
324 i386 fallocate sys_fallocate sys32_fallocate
324 i386 fallocate sys_fallocate compat_sys_x86_fallocate
325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4

View File

@ -5,8 +5,6 @@
#undef CONFIG_OPTIMIZE_INLINING
#endif
#undef CONFIG_X86_PPRO_FENCE
#ifdef CONFIG_X86_64
/*

View File

@ -42,10 +42,8 @@
#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"
static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
NATIVE;
#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
static enum { EMULATE, NONE } vsyscall_mode =
#ifdef CONFIG_LEGACY_VSYSCALL_NONE
NONE;
#else
EMULATE;
@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str)
if (str) {
if (!strcmp("emulate", str))
vsyscall_mode = EMULATE;
else if (!strcmp("native", str))
vsyscall_mode = NATIVE;
else if (!strcmp("none", str))
vsyscall_mode = NONE;
else
@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
WARN_ON_ONCE(address != regs->ip);
/* This should be unreachable in NATIVE mode. */
if (WARN_ON(vsyscall_mode == NATIVE))
return false;
if (vsyscall_mode == NONE) {
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall attempted with vsyscall=none");
@ -355,7 +347,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
#if CONFIG_PGTABLE_LEVELS >= 5
p4d->p4d |= _PAGE_USER;
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
#endif
pud = pud_offset(p4d, VSYSCALL_ADDR);
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
@ -370,9 +362,7 @@ void __init map_vsyscall(void)
if (vsyscall_mode != NONE) {
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
vsyscall_mode == NATIVE
? PAGE_KERNEL_VSYSCALL
: PAGE_KERNEL_VVAR);
PAGE_KERNEL_VVAR);
set_vsyscall_pgtable_user_bits(swapper_pg_dir);
}

View File

@ -2118,7 +2118,8 @@ static int x86_pmu_event_init(struct perf_event *event)
event->destroy(event);
}
if (READ_ONCE(x86_pmu.attr_rdpmc))
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
return err;

View File

@ -2952,9 +2952,9 @@ static void intel_pebs_aliases_skl(struct perf_event *event)
return intel_pebs_aliases_precdist(event);
}
static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
{
unsigned long flags = x86_pmu.free_running_flags;
unsigned long flags = x86_pmu.large_pebs_flags;
if (event->attr.use_clockid)
flags &= ~PERF_SAMPLE_TIME;
@ -2976,8 +2976,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (!event->attr.freq) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_free_running_flags(event)))
event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
~intel_pmu_large_pebs_flags(event)))
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
}
if (x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
@ -3194,7 +3194,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
X86_CONFIG(.event=0xc0, .umask=0x01)) {
if (left < 128)
left = 128;
left &= ~0x3fu;
left &= ~0x3fULL;
}
return left;
}
@ -3460,7 +3460,7 @@ static __initconst const struct x86_pmu core_pmu = {
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
.free_running_flags = PEBS_FREERUNNING_FLAGS,
.large_pebs_flags = LARGE_PEBS_FLAGS,
/*
* Intel PMCs cannot be accessed sanely above 32-bit width,
@ -3502,7 +3502,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
.free_running_flags = PEBS_FREERUNNING_FLAGS,
.large_pebs_flags = LARGE_PEBS_FLAGS,
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of

View File

@ -935,7 +935,7 @@ void intel_pmu_pebs_add(struct perf_event *event)
bool needed_cb = pebs_needs_sched_cb(cpuc);
cpuc->n_pebs++;
if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs++;
pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
@ -975,7 +975,7 @@ void intel_pmu_pebs_del(struct perf_event *event)
bool needed_cb = pebs_needs_sched_cb(cpuc);
cpuc->n_pebs--;
if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs--;
pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
@ -1530,7 +1530,7 @@ void __init intel_ds_init(void)
x86_pmu.pebs_record_size =
sizeof(struct pebs_record_skl);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
x86_pmu.free_running_flags |= PERF_SAMPLE_TIME;
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
break;
default:

View File

@ -3343,6 +3343,7 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
EVENT_EXTRA_END
};
@ -3562,24 +3563,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = {
NULL,
};
/*
* To determine the number of CHAs, it should read bits 27:0 in the CAPID6
* register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
*/
#define SKX_CAPID6 0x9c
#define SKX_CHA_BIT_MASK GENMASK(27, 0)
static int skx_count_chabox(void)
{
struct pci_dev *chabox_dev = NULL;
int bus, count = 0;
struct pci_dev *dev = NULL;
u32 val = 0;
while (1) {
chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
if (!chabox_dev)
break;
if (count == 0)
bus = chabox_dev->bus->number;
if (bus != chabox_dev->bus->number)
break;
count++;
}
dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
if (!dev)
goto out;
pci_dev_put(chabox_dev);
return count;
pci_read_config_dword(dev, SKX_CAPID6, &val);
val &= SKX_CHA_BIT_MASK;
out:
pci_dev_put(dev);
return hweight32(val);
}
void skx_uncore_cpu_init(void)
@ -3606,7 +3610,7 @@ static struct intel_uncore_type skx_uncore_imc = {
};
static struct attribute *skx_upi_uncore_formats_attr[] = {
&format_attr_event_ext.attr,
&format_attr_event.attr,
&format_attr_umask_ext.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,

View File

@ -69,7 +69,7 @@ struct event_constraint {
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
#define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */
#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
struct amd_nb {
@ -88,7 +88,7 @@ struct amd_nb {
* REGS_USER can be handled for events limited to ring 3.
*
*/
#define PEBS_FREERUNNING_FLAGS \
#define LARGE_PEBS_FLAGS \
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
@ -608,7 +608,7 @@ struct x86_pmu {
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
int max_pebs_events;
unsigned long free_running_flags;
unsigned long large_pebs_flags;
/*
* Intel LBR

View File

@ -51,15 +51,14 @@
#define AA(__x) ((unsigned long)(__x))
asmlinkage long sys32_truncate64(const char __user *filename,
unsigned long offset_low,
unsigned long offset_high)
COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
unsigned long, offset_low, unsigned long, offset_high)
{
return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
}
asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
unsigned long offset_high)
COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
unsigned long, offset_low, unsigned long, offset_high)
{
return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
}
@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
return 0;
}
asmlinkage long sys32_stat64(const char __user *filename,
struct stat64 __user *statbuf)
COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
struct stat64 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_stat(filename, &stat);
@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename,
return ret;
}
asmlinkage long sys32_lstat64(const char __user *filename,
struct stat64 __user *statbuf)
COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
struct stat64 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_lstat(filename, &stat);
@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename,
return ret;
}
asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
struct stat64 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_fstat(fd, &stat);
@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
return ret;
}
asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename,
struct stat64 __user *statbuf, int flag)
COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
const char __user *, filename,
struct stat64 __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
@ -153,7 +154,7 @@ struct mmap_arg_struct32 {
unsigned int offset;
};
asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
{
struct mmap_arg_struct32 a;
@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
a.offset>>PAGE_SHIFT);
}
asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
int options)
COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *,
stat_addr, int, options)
{
return compat_sys_wait4(pid, stat_addr, options, NULL);
}
/* warning: next two assume little endian */
asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
u32 poslo, u32 poshi)
COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
u32, count, u32, poslo, u32, poshi)
{
return sys_pread64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
u32 count, u32 poslo, u32 poshi)
COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
u32, count, u32, poslo, u32, poshi)
{
return sys_pwrite64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
* Some system calls that need sign extended arguments. This could be
* done by a generic wrapper.
*/
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
__u32 len_low, __u32 len_high, int advice)
COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
__u32, offset_high, __u32, len_low, __u32, len_high,
int, advice)
{
return sys_fadvise64_64(fd,
(((u64)offset_high)<<32) | offset_low,
@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
advice);
}
asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
size_t count)
COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
unsigned int, off_hi, size_t, count)
{
return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
}
asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
unsigned n_low, unsigned n_hi, int flags)
COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
unsigned int, off_hi, unsigned int, n_low,
unsigned int, n_hi, int, flags)
{
return sys_sync_file_range(fd,
((u64)off_hi << 32) | off_low,
((u64)n_hi << 32) | n_low, flags);
}
asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
size_t len, int advice)
COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
unsigned int, offset_hi, size_t, len, int, advice)
{
return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
len, advice);
}
asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
unsigned offset_hi, unsigned len_lo,
unsigned len_hi)
COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
unsigned int, offset_lo, unsigned int, offset_hi,
unsigned int, len_lo, unsigned int, len_hi)
{
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
/*
* The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
*/
COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
unsigned long, newsp, int __user *, parent_tidptr,
unsigned long, tls_val, int __user *, child_tidptr)
{
return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
tls_val);
}

View File

@ -52,11 +52,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
"lfence", X86_FEATURE_LFENCE_RDTSC)
#ifdef CONFIG_X86_PPRO_FENCE
#define dma_rmb() rmb()
#else
#define dma_rmb() barrier()
#endif
#define dma_wmb() barrier()
#ifdef CONFIG_X86_32
@ -68,30 +64,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#if defined(CONFIG_X86_PPRO_FENCE)
/*
* For this option x86 doesn't have a strong TSO memory
* model and we should fall back to full barriers.
*/
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
WRITE_ONCE(*p, v); \
} while (0)
#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
___p1; \
})
#else /* regular x86 TSO memory ordering */
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
@ -107,8 +79,6 @@ do { \
___p1; \
})
#endif
/* Atomic operations are already serializing on x86 */
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()

View File

@ -316,6 +316,7 @@
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
@ -328,6 +329,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */

View File

@ -232,21 +232,6 @@ extern void set_iounmap_nonlazy(void);
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
/*
* Cache management
*
* This needed for two cases
* 1. Out of order aware processors
* 2. Accidentally out of order processors (PPro errata #51)
*/
static inline void flush_write_buffers(void)
{
#if defined(CONFIG_X86_PPRO_FENCE)
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
#endif
}
#endif /* __KERNEL__ */
extern void native_io_delay(void);

View File

@ -39,6 +39,7 @@ struct device;
enum ucode_state {
UCODE_OK = 0,
UCODE_NEW,
UCODE_UPDATED,
UCODE_NFOUND,
UCODE_ERROR,

View File

@ -183,7 +183,10 @@
* otherwise we'll run out of registers. We don't care about CET
* here, anyway.
*/
# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
# define CALL_NOSPEC \
ALTERNATIVE( \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
" jmp 904f;\n" \
" .align 16\n" \
"901: call 903f;\n" \

View File

@ -174,7 +174,6 @@ enum page_cache_mode {
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@ -206,7 +205,6 @@ enum page_cache_mode {
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)

View File

@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
extern char __entry_trampoline_start[], __entry_trampoline_end[];
#endif
#endif /* _ASM_X86_SECTIONS_H */

View File

@ -20,31 +20,43 @@
#include <asm/ia32.h>
/* ia32/sys_ia32.c */
asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long,
unsigned long);
asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long,
unsigned long);
asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *);
asmlinkage long sys32_fstatat(unsigned int, const char __user *,
asmlinkage long compat_sys_x86_stat64(const char __user *,
struct stat64 __user *);
asmlinkage long compat_sys_x86_lstat64(const char __user *,
struct stat64 __user *);
asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *);
asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *,
struct stat64 __user *, int);
struct mmap_arg_struct32;
asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *);
asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *,
int);
asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32,
u32);
asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32,
u32, u32);
long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
long sys32_vm86_warning(void);
asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32,
int);
asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
unsigned, unsigned, int);
asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
asmlinkage long sys32_fallocate(int, int, unsigned,
unsigned, unsigned, unsigned);
asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int,
size_t);
asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int,
unsigned int, unsigned int,
int);
asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
size_t, int);
asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
unsigned int, unsigned int);
asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
unsigned long, int __user *);
/* ia32/ia32_signal.c */
asmlinkage long sys32_sigreturn(void);

View File

@ -352,6 +352,7 @@ enum vmcs_field {
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
/* GUEST_INTERRUPTIBILITY_INFO flags. */

View File

@ -30,6 +30,7 @@ struct mce {
__u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
__u64 ppin; /* Protected Processor Inventory Number */
__u32 microcode;/* Microcode revision */
};
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)

View File

@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
/*
* Early microcode releases for the Spectre v2 mitigation were broken.
* Information taken from;
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
* - https://kb.vmware.com/s/article/52345
* - Microcode revisions observed in the wild
* - Release note from 20180108 microcode release
@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
@ -144,6 +143,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
{
int i;
/*
* We know that the hypervisor lie to us on the microcode version so
* we may as well hope that it is running the correct version.
*/
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return false;
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)

View File

@ -56,6 +56,9 @@
static DEFINE_MUTEX(mce_log_mutex);
/* sysfs synchronization */
static DEFINE_MUTEX(mce_sysfs_mutex);
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
@ -130,6 +133,8 @@ void mce_setup(struct mce *m)
if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
rdmsrl(MSR_PPIN, m->ppin);
m->microcode = boot_cpu_data.microcode;
}
DEFINE_PER_CPU(struct mce, injectm);
@ -262,7 +267,7 @@ static void __print_mce(struct mce *m)
*/
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
cpu_data(m->extcpu).microcode);
m->microcode);
}
static void print_mce(struct mce *m)
@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s,
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s,
on_each_cpu(mce_enable_ce, (void *)1, 1);
}
}
mutex_unlock(&mce_sysfs_mutex);
return size;
}
@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s,
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s,
on_each_cpu(mce_enable_ce, NULL, 1);
}
}
mutex_unlock(&mce_sysfs_mutex);
return size;
}
@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret = device_store_int(s, attr, buf, size);
unsigned long old_check_interval = check_interval;
ssize_t ret = device_store_ulong(s, attr, buf, size);
if (check_interval == old_check_interval)
return ret;
if (check_interval < 1)
check_interval = 1;
mutex_lock(&mce_sysfs_mutex);
mce_restart();
mutex_unlock(&mce_sysfs_mutex);
return ret;
}

View File

@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
return -EINVAL;
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret != UCODE_OK)
if (ret > UCODE_UPDATED)
return -EINVAL;
return 0;
@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
static enum ucode_state
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
{
struct ucode_patch *p;
enum ucode_state ret;
/* free old equiv table */
free_equiv_cpu_table();
ret = __load_microcode_amd(family, data, size);
if (ret != UCODE_OK)
if (ret != UCODE_OK) {
cleanup();
#ifdef CONFIG_X86_32
/* save BSP's matching patch for early load */
if (save) {
struct ucode_patch *p = find_patch(0);
if (p) {
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
PATCH_MAX_SIZE));
}
return ret;
}
#endif
p = find_patch(0);
if (!p) {
return ret;
} else {
if (boot_cpu_data.microcode == p->patch_id)
return ret;
ret = UCODE_NEW;
}
/* save BSP's matching patch for early load */
if (!save)
return ret;
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
return ret;
}

View File

@ -22,13 +22,16 @@
#define pr_fmt(fmt) "microcode: " fmt
#include <linux/platform_device.h>
#include <linux/stop_machine.h>
#include <linux/syscore_ops.h>
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/mm.h>
@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
*/
static DEFINE_MUTEX(microcode_mutex);
/*
* Serialize late loading so that CPUs get updated one-by-one.
*/
static DEFINE_SPINLOCK(update_lock);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct cpu_info_ctx {
@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
return ret;
}
struct apply_microcode_ctx {
enum ucode_state err;
};
static void apply_microcode_local(void *arg)
{
struct apply_microcode_ctx *ctx = arg;
enum ucode_state *err = arg;
ctx->err = microcode_ops->apply_microcode(smp_processor_id());
*err = microcode_ops->apply_microcode(smp_processor_id());
}
static int apply_microcode_on_target(int cpu)
{
struct apply_microcode_ctx ctx = { .err = 0 };
enum ucode_state err;
int ret;
ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
if (!ret)
ret = ctx.err;
ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
if (!ret) {
if (err == UCODE_ERROR)
ret = 1;
}
return ret;
}
@ -489,19 +494,114 @@ static void __exit microcode_dev_exit(void)
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
static enum ucode_state reload_for_cpu(int cpu)
/*
* Late loading dance. Why the heavy-handed stomp_machine effort?
*
* - HT siblings must be idle and not execute other code while the other sibling
* is loading microcode in order to avoid any negative interactions caused by
* the loading.
*
* - In addition, microcode update on the cores must be serialized until this
* requirement can be relaxed in the future. Right now, this is conservative
* and good.
*/
#define SPINUNIT 100 /* 100 nsec */
static int check_online_cpus(void)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
enum ucode_state ustate;
if (num_online_cpus() == num_present_cpus())
return 0;
if (!uci->valid)
return UCODE_OK;
pr_err("Not all CPUs online, aborting microcode update.\n");
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
if (ustate != UCODE_OK)
return ustate;
return -EINVAL;
}
return apply_microcode_on_target(cpu);
static atomic_t late_cpus_in;
static atomic_t late_cpus_out;
static int __wait_for_cpus(atomic_t *t, long long timeout)
{
int all_cpus = num_online_cpus();
atomic_inc(t);
while (atomic_read(t) < all_cpus) {
if (timeout < SPINUNIT) {
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
all_cpus - atomic_read(t));
return 1;
}
ndelay(SPINUNIT);
timeout -= SPINUNIT;
touch_nmi_watchdog();
}
return 0;
}
/*
* Returns:
* < 0 - on error
* 0 - no update done
* 1 - microcode was updated
*/
static int __reload_late(void *info)
{
int cpu = smp_processor_id();
enum ucode_state err;
int ret = 0;
/*
* Wait for all CPUs to arrive. A load will not be attempted unless all
* CPUs show up.
* */
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
return -1;
spin_lock(&update_lock);
apply_microcode_local(&err);
spin_unlock(&update_lock);
if (err > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
return -1;
/* siblings return UCODE_OK because their engine got updated already */
} else if (err == UCODE_UPDATED || err == UCODE_OK) {
ret = 1;
} else {
return ret;
}
/*
* Increase the wait timeout to a safe value here since we're
* serializing the microcode update and that could take a while on a
* large number of CPUs. And that is fine as the *actual* timeout will
* be determined by the last CPU finished updating and thus cut short.
*/
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
panic("Timeout during microcode update!\n");
return ret;
}
/*
* Reload microcode late on all CPUs. Wait for a sec until they
* all gather together.
*/
static int microcode_reload_late(void)
{
int ret;
atomic_set(&late_cpus_in, 0);
atomic_set(&late_cpus_out, 0);
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
if (ret > 0)
microcode_check();
return ret;
}
static ssize_t reload_store(struct device *dev,
@ -509,10 +609,9 @@ static ssize_t reload_store(struct device *dev,
const char *buf, size_t size)
{
enum ucode_state tmp_ret = UCODE_OK;
bool do_callback = false;
int bsp = boot_cpu_data.cpu_index;
unsigned long val;
ssize_t ret = 0;
int cpu;
ret = kstrtoul(buf, 0, &val);
if (ret)
@ -521,29 +620,24 @@ static ssize_t reload_store(struct device *dev,
if (val != 1)
return size;
tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
if (tmp_ret != UCODE_NEW)
return size;
get_online_cpus();
ret = check_online_cpus();
if (ret)
goto put;
mutex_lock(&microcode_mutex);
for_each_online_cpu(cpu) {
tmp_ret = reload_for_cpu(cpu);
if (tmp_ret > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
/* set retval for the first encountered reload error */
if (!ret)
ret = -EINVAL;
}
if (tmp_ret == UCODE_UPDATED)
do_callback = true;
}
if (!ret && do_callback)
microcode_check();
ret = microcode_reload_late();
mutex_unlock(&microcode_mutex);
put:
put_online_cpus();
if (!ret)
if (ret >= 0)
ret = size;
return ret;
@ -611,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
if (system_state != SYSTEM_RUNNING)
return UCODE_NFOUND;
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
refresh_fw);
if (ustate == UCODE_OK) {
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
if (ustate == UCODE_NEW) {
pr_debug("CPU%d updated upon init\n", cpu);
apply_microcode_on_target(cpu);
}

View File

@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
if (!mc)
return 0;
/*
* Save us the MSR write below - which is a particular expensive
* operation - when the other hyperthread has updated the microcode
* already.
*/
rev = intel_get_microcode_revision();
if (rev >= mc->hdr.rev) {
uci->cpu_sig.rev = rev;
return UCODE_OK;
}
/*
* Writeback and invalidate caches before updating microcode to avoid
* internal issues depending on what the microcode is updating.
*/
native_wbinvd();
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
static enum ucode_state apply_microcode_intel(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
struct cpuinfo_x86 *c;
static int prev_rev;
u32 rev;
@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
if (WARN_ON(raw_smp_processor_id() != cpu))
return UCODE_ERROR;
uci = ucode_cpu_info + cpu;
mc = uci->mc;
/* Look for a newer patch in our cache: */
mc = find_patch(uci);
if (!mc) {
/* Look for a newer patch in our cache: */
mc = find_patch(uci);
mc = uci->mc;
if (!mc)
return UCODE_NFOUND;
}
/*
* Save us the MSR write below - which is a particular expensive
* operation - when the other hyperthread has updated the microcode
* already.
*/
rev = intel_get_microcode_revision();
if (rev >= mc->hdr.rev) {
uci->cpu_sig.rev = rev;
c->microcode = rev;
return UCODE_OK;
}
/*
* Writeback and invalidate caches before updating microcode to avoid
* internal issues depending on what the microcode is updating.
*/
native_wbinvd();
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
prev_rev = rev;
}
c = &cpu_data(cpu);
uci->cpu_sig.rev = rev;
c->microcode = rev;
@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
unsigned int leftover = size;
unsigned int curr_mc_size = 0, new_mc_size = 0;
unsigned int csig, cpf;
enum ucode_state ret = UCODE_OK;
while (leftover) {
struct microcode_header_intel mc_header;
@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
new_mc = mc;
new_mc_size = mc_size;
mc = NULL; /* trigger new vmalloc */
ret = UCODE_NEW;
}
ucode_ptr += mc_size;
@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
return UCODE_OK;
return ret;
}
static int get_ucode_fw(void *to, const void *from, size_t n)

View File

@ -160,7 +160,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
*/
static const __initconst struct idt_data dbg_idts[] = {
INTG(X86_TRAP_DB, debug),
INTG(X86_TRAP_BP, int3),
};
#endif
@ -183,7 +182,6 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
static const __initconst struct idt_data ist_idts[] = {
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
#ifdef CONFIG_X86_MCE
ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),

View File

@ -23,7 +23,7 @@
/*
* this changes the io permissions bitmap in the current task.
*/
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
{
struct thread_struct *t = &current->thread;
struct tss_struct *tss;

View File

@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
bool arch_within_kprobe_blacklist(unsigned long addr)
{
bool is_in_entry_trampoline_section = false;
#ifdef CONFIG_X86_64
is_in_entry_trampoline_section =
(addr >= (unsigned long)__entry_trampoline_start &&
addr < (unsigned long)__entry_trampoline_end);
#endif
return (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end) ||
(addr >= (unsigned long)__entry_text_start &&
addr < (unsigned long)__entry_text_end);
addr < (unsigned long)__entry_text_end) ||
is_in_entry_trampoline_section;
}
int __init arch_init_kprobes(void)

View File

@ -37,7 +37,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return NOMMU_MAPPING_ERROR;
flush_write_buffers();
return bus;
}
@ -72,25 +71,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
return 0;
s->dma_length = s->length;
}
flush_write_buffers();
return nents;
}
static void nommu_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
flush_write_buffers();
}
static void nommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
flush_write_buffers();
}
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == NOMMU_MAPPING_ERROR;
@ -101,8 +84,6 @@ const struct dma_map_ops nommu_dma_ops = {
.free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys = 1,
.mapping_error = nommu_mapping_error,
.dma_supported = x86_dma_supported,

View File

@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code) != 8);
/*
* Ensure that the size of each si_field never changes.
* If it does, it is a sign that the
@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void)
CHECK_CSI_SIZE (_kill, 2*sizeof(int));
CHECK_SI_SIZE (_kill, 2*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
CHECK_CSI_OFFSET(_timer);
CHECK_CSI_SIZE (_timer, 3*sizeof(int));
CHECK_SI_SIZE (_timer, 6*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
CHECK_CSI_OFFSET(_rt);
CHECK_CSI_SIZE (_rt, 3*sizeof(int));
CHECK_SI_SIZE (_rt, 4*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
CHECK_CSI_OFFSET(_sigchld);
CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x20);
BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x28);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime) != 0x1C);
#ifdef CONFIG_X86_X32_ABI
CHECK_CSI_OFFSET(_sigchld_x32);
CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
/* no _sigchld_x32 in the generic siginfo_t */
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) != 0x20);
#endif
CHECK_CSI_OFFSET(_sigfault);
CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20);
BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd) != 0x10);
CHECK_CSI_OFFSET(_sigsys);
CHECK_CSI_SIZE (_sigsys, 3*sizeof(int));
CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x1C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch) != 0x14);
/* any new si_fields should be added here */
}

View File

@ -577,7 +577,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
}
NOKPROBE_SYMBOL(do_general_protection);
/* May run on IST stack. */
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
{
#ifdef CONFIG_DYNAMIC_FTRACE
@ -592,6 +591,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
if (poke_int3_handler(regs))
return;
/*
* Use ist_enter despite the fact that we don't use an IST stack.
* We can be called from a kprobe in non-CONTEXT_KERNEL kernel
* mode or even during context tracking state changes.
*
* This means that we can't schedule. That's okay.
*/
ist_enter(regs);
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@ -609,15 +615,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
SIGTRAP) == NOTIFY_STOP)
goto exit;
/*
* Let others (NMI) know that the debug stack is in use
* as we may switch to the interrupt stack.
*/
debug_stack_usage_inc();
cond_local_irq_enable(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
cond_local_irq_disable(regs);
debug_stack_usage_dec();
exit:
ist_exit(regs);
}

View File

@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
return;
check_vip:
if (VEFLAGS & X86_EFLAGS_VIP) {
if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
(X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
save_v86_state(regs, VM86_STI);
return;
}

View File

@ -118,9 +118,11 @@ SECTIONS
#ifdef CONFIG_X86_64
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(__entry_trampoline_start) = .;
_entry_trampoline = .;
*(.entry_trampoline)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(__entry_trampoline_end) = .;
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
#endif

View File

@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
else
pte_access &= ~ACC_WRITE_MASK;
if (!kvm_is_mmio_pfn(pfn))
spte |= shadow_me_mask;
spte |= (u64)pfn << PAGE_SHIFT;
spte |= shadow_me_mask;
if (pte_access & ACC_WRITE_MASK) {

View File

@ -1045,6 +1045,13 @@ static inline bool is_machine_check(u32 intr_info)
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}
/* Undocumented: icebp/int1 */
static inline bool is_icebp(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
}
static inline bool cpu_has_vmx_msr_bitmap(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
@ -6179,7 +6186,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= dr6 | DR6_RTM;
if (!(dr6 & ~DR6_RESERVED)) /* icebp */
if (is_icebp(intr_info))
skip_emulated_instruction(vcpu);
kvm_queue_exception(vcpu, DB_VECTOR);

View File

@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (!pmd_k)
return -1;
if (pmd_huge(*pmd_k))
if (pmd_large(*pmd_k))
return 0;
pte_k = pte_offset_kernel(pmd_k, address);
@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
BUG();
if (pud_huge(*pud))
if (pud_large(*pud))
return 0;
pmd = pmd_offset(pud, address);
@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
BUG();
if (pmd_huge(*pmd))
if (pmd_large(*pmd))
return 0;
pte_ref = pte_offset_kernel(pmd_ref, address);

View File

@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
#define PAGE_INUSE 0xFD
static void __meminit free_pagetable(struct page *page, int order,
struct vmem_altmap *altmap)
static void __meminit free_pagetable(struct page *page, int order)
{
unsigned long magic;
unsigned int nr_pages = 1 << order;
if (altmap) {
vmem_altmap_free(altmap, nr_pages);
return;
}
/* bootmem page has reserved flag */
if (PageReserved(page)) {
__ClearPageReserved(page);
@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order,
free_pages((unsigned long)page_address(page), order);
}
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
static void __meminit free_hugepage_table(struct page *page,
struct vmem_altmap *altmap)
{
if (altmap)
vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
else
free_pagetable(page, get_order(PMD_SIZE));
}
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
{
pte_t *pte;
int i;
@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
}
/* free a pte talbe */
free_pagetable(pmd_page(*pmd), 0, altmap);
free_pagetable(pmd_page(*pmd), 0);
spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock);
}
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
struct vmem_altmap *altmap)
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
{
pmd_t *pmd;
int i;
@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
}
/* free a pmd talbe */
free_pagetable(pud_page(*pud), 0, altmap);
free_pagetable(pud_page(*pud), 0);
spin_lock(&init_mm.page_table_lock);
pud_clear(pud);
spin_unlock(&init_mm.page_table_lock);
}
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
struct vmem_altmap *altmap)
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
{
pud_t *pud;
int i;
@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
}
/* free a pud talbe */
free_pagetable(p4d_page(*p4d), 0, altmap);
free_pagetable(p4d_page(*p4d), 0);
spin_lock(&init_mm.page_table_lock);
p4d_clear(p4d);
spin_unlock(&init_mm.page_table_lock);
@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
struct vmem_altmap *altmap, bool direct)
bool direct)
{
unsigned long next, pages = 0;
pte_t *pte;
@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
* freed when offlining, or simplely not in use.
*/
if (!direct)
free_pagetable(pte_page(*pte), 0, altmap);
free_pagetable(pte_page(*pte), 0);
spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte);
@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
page_addr = page_address(pte_page(*pte));
if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
free_pagetable(pte_page(*pte), 0, altmap);
free_pagetable(pte_page(*pte), 0);
spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte);
@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
if (!direct)
free_pagetable(pmd_page(*pmd),
get_order(PMD_SIZE),
altmap);
free_hugepage_table(pmd_page(*pmd),
altmap);
spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd);
@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
page_addr = page_address(pmd_page(*pmd));
if (!memchr_inv(page_addr, PAGE_INUSE,
PMD_SIZE)) {
free_pagetable(pmd_page(*pmd),
get_order(PMD_SIZE),
altmap);
free_hugepage_table(pmd_page(*pmd),
altmap);
spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd);
@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
}
pte_base = (pte_t *)pmd_page_vaddr(*pmd);
remove_pte_table(pte_base, addr, next, altmap, direct);
free_pte_table(pte_base, pmd, altmap);
remove_pte_table(pte_base, addr, next, direct);
free_pte_table(pte_base, pmd);
}
/* Call free_pmd_table() in remove_pud_table(). */
@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PUD_SIZE)) {
if (!direct)
free_pagetable(pud_page(*pud),
get_order(PUD_SIZE),
altmap);
get_order(PUD_SIZE));
spin_lock(&init_mm.page_table_lock);
pud_clear(pud);
@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
if (!memchr_inv(page_addr, PAGE_INUSE,
PUD_SIZE)) {
free_pagetable(pud_page(*pud),
get_order(PUD_SIZE),
altmap);
get_order(PUD_SIZE));
spin_lock(&init_mm.page_table_lock);
pud_clear(pud);
@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
pmd_base = pmd_offset(pud, 0);
remove_pmd_table(pmd_base, addr, next, direct, altmap);
free_pmd_table(pmd_base, pud, altmap);
free_pmd_table(pmd_base, pud);
}
if (direct)
@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
* to adapt for boot-time switching between 4 and 5 level page tables.
*/
if (CONFIG_PGTABLE_LEVELS == 5)
free_pud_table(pud_base, p4d, altmap);
free_pud_table(pud_base, p4d);
}
if (direct)

View File

@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
/**
* pud_free_pmd_page - Clear pud entry and free pmd page.
* @pud: Pointer to a PUD.
*
* Context: The pud range has been unmaped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
int pud_free_pmd_page(pud_t *pud)
{
pmd_t *pmd;
int i;
if (pud_none(*pud))
return 1;
pmd = (pmd_t *)pud_page_vaddr(*pud);
for (i = 0; i < PTRS_PER_PMD; i++)
if (!pmd_free_pte_page(&pmd[i]))
return 0;
pud_clear(pud);
free_page((unsigned long)pmd);
return 1;
}
/**
* pmd_free_pte_page - Clear pmd entry and free pte page.
* @pmd: Pointer to a PMD.
*
* Context: The pmd range has been unmaped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
int pmd_free_pte_page(pmd_t *pmd)
{
pte_t *pte;
if (pmd_none(*pmd))
return 1;
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
free_page((unsigned long)pte);
return 1;
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */

View File

@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void)
}
/*
* Clone the ESPFIX P4D into the user space visinble page table
* Clone the ESPFIX P4D into the user space visible page table
*/
static void __init pti_setup_espfix64(void)
{

View File

@ -1188,7 +1188,7 @@ skip_init_addrs:
* may converge on the last pass. In such case do one more
* pass to emit the final image
*/
for (pass = 0; pass < 10 || image; pass++) {
for (pass = 0; pass < 20 || image; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) {
image = NULL;
@ -1215,6 +1215,7 @@ skip_init_addrs:
}
}
oldproglen = proglen;
cond_resched();
}
if (bpf_jit_enable > 1)

View File

@ -227,7 +227,7 @@ int __init efi_alloc_page_tables(void)
if (!pud) {
if (CONFIG_PGTABLE_LEVELS > 4)
free_page((unsigned long) pgd_page_vaddr(*pgd));
free_page((unsigned long)efi_pgd);
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
return -ENOMEM;
}

Some files were not shown because too many files have changed in this diff Show More