mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
Merge 5.15-rc3 into usb-next
We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
ae9a614988
@ -259,7 +259,7 @@ Configuring the kernel
|
||||
Compiling the kernel
|
||||
--------------------
|
||||
|
||||
- Make sure you have at least gcc 4.9 available.
|
||||
- Make sure you have at least gcc 5.1 available.
|
||||
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
|
||||
|
||||
Please note that you can still run a.out user programs with this kernel.
|
||||
|
@ -175,9 +175,10 @@ for IRQ numbers that are passed to struct device registrations. In that
|
||||
case the Linux IRQ numbers cannot be dynamically assigned and the legacy
|
||||
mapping should be used.
|
||||
|
||||
As the name implies, the *_legacy() functions are deprecated and only
|
||||
As the name implies, the \*_legacy() functions are deprecated and only
|
||||
exist to ease the support of ancient platforms. No new users should be
|
||||
added.
|
||||
added. Same goes for the \*_simple() functions when their use results
|
||||
in the legacy behaviour.
|
||||
|
||||
The legacy map assumes a contiguous range of IRQ numbers has already
|
||||
been allocated for the controller and that the IRQ number can be
|
||||
|
@ -54,7 +54,7 @@ properties:
|
||||
- const: toradex,apalis_t30
|
||||
- const: nvidia,tegra30
|
||||
- items:
|
||||
- const: toradex,apalis_t30-eval-v1.1
|
||||
- const: toradex,apalis_t30-v1.1-eval
|
||||
- const: toradex,apalis_t30-eval
|
||||
- const: toradex,apalis_t30-v1.1
|
||||
- const: toradex,apalis_t30
|
||||
|
@ -9,7 +9,7 @@ function block.
|
||||
|
||||
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
|
||||
For a description of the MMSYS_CONFIG binding, see
|
||||
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
|
||||
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
|
||||
|
||||
DISP function blocks
|
||||
====================
|
||||
|
@ -19,7 +19,9 @@ properties:
|
||||
- const: allwinner,sun8i-v3s-emac
|
||||
- const: allwinner,sun50i-a64-emac
|
||||
- items:
|
||||
- const: allwinner,sun50i-h6-emac
|
||||
- enum:
|
||||
- allwinner,sun20i-d1-emac
|
||||
- allwinner,sun50i-h6-emac
|
||||
- const: allwinner,sun50i-a64-emac
|
||||
|
||||
reg:
|
||||
|
@ -0,0 +1,89 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Samsung SoC series UFS host controller Device Tree Bindings
|
||||
|
||||
maintainers:
|
||||
- Alim Akhtar <alim.akhtar@samsung.com>
|
||||
|
||||
description: |
|
||||
Each Samsung UFS host controller instance should have its own node.
|
||||
This binding define Samsung specific binding other then what is used
|
||||
in the common ufshcd bindings
|
||||
[1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
|
||||
|
||||
properties:
|
||||
|
||||
compatible:
|
||||
enum:
|
||||
- samsung,exynos7-ufs
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: HCI register
|
||||
- description: vendor specific register
|
||||
- description: unipro register
|
||||
- description: UFS protector register
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: hci
|
||||
- const: vs_hci
|
||||
- const: unipro
|
||||
- const: ufsp
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: ufs link core clock
|
||||
- description: unipro main clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: core_clk
|
||||
- const: sclk_unipro_main
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
phy-names:
|
||||
const: ufs-phy
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- phys
|
||||
- phy-names
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/exynos7-clk.h>
|
||||
|
||||
ufs: ufs@15570000 {
|
||||
compatible = "samsung,exynos7-ufs";
|
||||
reg = <0x15570000 0x100>,
|
||||
<0x15570100 0x100>,
|
||||
<0x15571000 0x200>,
|
||||
<0x15572000 0x300>;
|
||||
reg-names = "hci", "vs_hci", "unipro", "ufsp";
|
||||
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
|
||||
<&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
|
||||
clock-names = "core_clk", "sclk_unipro_main";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
|
||||
phys = <&ufs_phy>;
|
||||
phy-names = "ufs-phy";
|
||||
};
|
||||
...
|
@ -851,7 +851,7 @@ NOTES:
|
||||
- 0x88A8 traffic will not be received unless VLAN stripping is disabled with
|
||||
the following command::
|
||||
|
||||
# ethool -K <ethX> rxvlan off
|
||||
# ethtool -K <ethX> rxvlan off
|
||||
|
||||
- 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
|
||||
configured on the same port. 0x88a8/0x8100 traffic will not be received if
|
||||
|
@ -296,7 +296,7 @@ not available.
|
||||
Device Tree bindings and board design
|
||||
=====================================
|
||||
|
||||
This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
|
||||
This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
|
||||
and aims to showcase some potential switch caveats.
|
||||
|
||||
RMII PHY role and out-of-band signaling
|
||||
|
@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
|
||||
====================== =============== ========================================
|
||||
Program Minimal version Command to check the version
|
||||
====================== =============== ========================================
|
||||
GNU C 4.9 gcc --version
|
||||
GNU C 5.1 gcc --version
|
||||
Clang/LLVM (optional) 10.0.1 clang --version
|
||||
GNU make 3.81 make --version
|
||||
binutils 2.23 ld -v
|
||||
|
@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
|
||||
编译内核
|
||||
---------
|
||||
|
||||
- 确保您至少有gcc 4.9可用。
|
||||
- 确保您至少有gcc 5.1可用。
|
||||
有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。
|
||||
|
||||
请注意,您仍然可以使用此内核运行a.out用户程序。
|
||||
|
@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
|
||||
編譯內核
|
||||
---------
|
||||
|
||||
- 確保您至少有gcc 4.9可用。
|
||||
- 確保您至少有gcc 5.1可用。
|
||||
有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 。
|
||||
|
||||
請注意,您仍然可以使用此內核運行a.out用戶程序。
|
||||
|
38
MAINTAINERS
38
MAINTAINERS
@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/amd-pmc.*
|
||||
|
||||
AMD POWERPLAY
|
||||
AMD POWERPLAY AND SWSMU
|
||||
M: Evan Quan <evan.quan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/pm/powerplay/
|
||||
F: drivers/gpu/drm/amd/pm/
|
||||
|
||||
AMD PTDMA DRIVER
|
||||
M: Sanjay R Mehta <sanju.mehta@amd.com>
|
||||
@ -2804,9 +2804,8 @@ F: arch/arm/mach-pxa/include/mach/vpac270.h
|
||||
F: arch/arm/mach-pxa/vpac270.c
|
||||
|
||||
ARM/VT8500 ARM ARCHITECTURE
|
||||
M: Tony Prisk <linux@prisktech.co.nz>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
|
||||
F: arch/arm/mach-vt8500/
|
||||
F: drivers/clocksource/timer-vt8500.c
|
||||
@ -13255,9 +13254,9 @@ F: Documentation/scsi/NinjaSCSI.rst
|
||||
F: drivers/scsi/nsp32*
|
||||
|
||||
NIOS2 ARCHITECTURE
|
||||
M: Ley Foon Tan <ley.foon.tan@intel.com>
|
||||
M: Dinh Nguyen <dinguyen@kernel.org>
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
|
||||
F: arch/nios2/
|
||||
|
||||
NITRO ENCLAVES (NE)
|
||||
@ -14342,7 +14341,8 @@ F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
|
||||
F: drivers/pci/controller/pci-ixp4xx.c
|
||||
|
||||
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
|
||||
M: Jonathan Derrick <jonathan.derrick@intel.com>
|
||||
M: Nirmal Patel <nirmal.patel@linux.intel.com>
|
||||
R: Jonathan Derrick <jonathan.derrick@linux.dev>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/pci/controller/vmd.c
|
||||
@ -16650,13 +16650,6 @@ M: Lubomir Rintel <lkundrak@v3.sk>
|
||||
S: Supported
|
||||
F: drivers/char/pcmcia/scr24x_cs.c
|
||||
|
||||
SCSI CDROM DRIVER
|
||||
M: Jens Axboe <axboe@kernel.dk>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.kernel.dk
|
||||
F: drivers/scsi/sr*
|
||||
|
||||
SCSI RDMA PROTOCOL (SRP) INITIATOR
|
||||
M: Bart Van Assche <bvanassche@acm.org>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
@ -16955,7 +16948,6 @@ F: drivers/misc/sgi-xp/
|
||||
|
||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Guvenc Gulce <guvenc@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
@ -17968,10 +17960,11 @@ F: Documentation/admin-guide/svga.rst
|
||||
F: arch/x86/boot/video*
|
||||
|
||||
SWIOTLB SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
M: Christoph Hellwig <hch@infradead.org>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
|
||||
W: http://git.infradead.org/users/hch/dma-mapping.git
|
||||
T: git git://git.infradead.org/users/hch/dma-mapping.git
|
||||
F: arch/*/kernel/pci-swiotlb.c
|
||||
F: include/linux/swiotlb.h
|
||||
F: kernel/dma/swiotlb.c
|
||||
@ -19288,13 +19281,12 @@ S: Maintained
|
||||
F: drivers/usb/misc/chaoskey.c
|
||||
|
||||
USB CYPRESS C67X00 DRIVER
|
||||
M: Peter Korsgaard <jacmet@sunsite.dk>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/usb/c67x00/
|
||||
|
||||
USB DAVICOM DM9601 DRIVER
|
||||
M: Peter Korsgaard <jacmet@sunsite.dk>
|
||||
M: Peter Korsgaard <peter@korsgaard.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linux-usb.org/usbnet
|
||||
@ -20474,7 +20466,6 @@ F: samples/bpf/xdpsock*
|
||||
F: tools/lib/bpf/xsk*
|
||||
|
||||
XEN BLOCK SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
M: Roger Pau Monné <roger.pau@citrix.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
@ -20522,7 +20513,7 @@ S: Supported
|
||||
F: drivers/net/xen-netback/*
|
||||
|
||||
XEN PCI SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
M: Juergen Gross <jgross@suse.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: arch/x86/pci/*xen*
|
||||
@ -20545,7 +20536,8 @@ S: Supported
|
||||
F: sound/xen/*
|
||||
|
||||
XEN SWIOTLB SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
M: Juergen Gross <jgross@suse.com>
|
||||
M: Stefano Stabellini <sstabellini@kernel.org>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
L: iommu@lists.linux-foundation.org
|
||||
S: Supported
|
||||
|
8
Makefile
8
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Opossums on Parade
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -849,12 +849,6 @@ endif
|
||||
|
||||
DEBUG_CFLAGS :=
|
||||
|
||||
# Workaround for GCC versions < 5.0
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO_SPLIT
|
||||
|
@ -20,7 +20,7 @@ config ALPHA
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select VIRT_TO_BUS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_PCI_IOMAP if PCI
|
||||
select GENERIC_PCI_IOMAP
|
||||
select AUTO_IRQ_AFFINITY if SMP
|
||||
select GENERIC_IRQ_SHOW
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
@ -199,7 +199,6 @@ config ALPHA_EIGER
|
||||
|
||||
config ALPHA_JENSEN
|
||||
bool "Jensen"
|
||||
depends on BROKEN
|
||||
select HAVE_EISA
|
||||
help
|
||||
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
|
||||
|
@ -16,3 +16,4 @@ extern void __divlu(void);
|
||||
extern void __remlu(void);
|
||||
extern void __divqu(void);
|
||||
extern void __remqu(void);
|
||||
extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
|
||||
|
@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
*/
|
||||
#ifdef USE_48_BIT_KSEG
|
||||
static inline unsigned long virt_to_phys(void *address)
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return (unsigned long)address - IDENT_ADDR;
|
||||
}
|
||||
@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
|
||||
return (void *) (address + IDENT_ADDR);
|
||||
}
|
||||
#else
|
||||
static inline unsigned long virt_to_phys(void *address)
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
unsigned long phys = (unsigned long)address;
|
||||
|
||||
@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
|
||||
extern unsigned long __direct_map_base;
|
||||
extern unsigned long __direct_map_size;
|
||||
|
||||
static inline unsigned long __deprecated virt_to_bus(void *address)
|
||||
static inline unsigned long __deprecated virt_to_bus(volatile void *address)
|
||||
{
|
||||
unsigned long phys = virt_to_phys(address);
|
||||
unsigned long bus = phys + __direct_map_base;
|
||||
|
@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
|
||||
* convinced that I need one of the newer machines.
|
||||
*/
|
||||
|
||||
static inline unsigned int jensen_local_inb(unsigned long addr)
|
||||
__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
|
||||
{
|
||||
return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
|
||||
}
|
||||
|
||||
static inline void jensen_local_outb(u8 b, unsigned long addr)
|
||||
__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
|
||||
{
|
||||
*(vuip)((addr << 9) + EISA_VL82C106) = b;
|
||||
mb();
|
||||
}
|
||||
|
||||
static inline unsigned int jensen_bus_inb(unsigned long addr)
|
||||
__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
|
||||
{
|
||||
long result;
|
||||
|
||||
@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
|
||||
return __kernel_extbl(result, addr & 3);
|
||||
}
|
||||
|
||||
static inline void jensen_bus_outb(u8 b, unsigned long addr)
|
||||
__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
|
||||
{
|
||||
jensen_set_hae(0);
|
||||
*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
|
||||
|
43
arch/alpha/include/asm/setup.h
Normal file
43
arch/alpha/include/asm/setup.h
Normal file
@ -0,0 +1,43 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __ALPHA_SETUP_H
|
||||
#define __ALPHA_SETUP_H
|
||||
|
||||
#include <uapi/asm/setup.h>
|
||||
|
||||
/*
|
||||
* We leave one page for the initial stack page, and one page for
|
||||
* the initial process structure. Also, the console eats 3 MB for
|
||||
* the initial bootloader (one of which we can reclaim later).
|
||||
*/
|
||||
#define BOOT_PCB 0x20000000
|
||||
#define BOOT_ADDR 0x20000000
|
||||
/* Remove when official MILO sources have ELF support: */
|
||||
#define BOOT_SIZE (16*1024)
|
||||
|
||||
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
|
||||
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
|
||||
#else
|
||||
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
|
||||
#endif
|
||||
|
||||
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
|
||||
#define SWAPPER_PGD KERNEL_START
|
||||
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
|
||||
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
|
||||
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
|
||||
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
|
||||
|
||||
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
|
||||
|
||||
/*
|
||||
* This is setup by the secondary bootstrap loader. Because
|
||||
* the zero page is zeroed out as soon as the vm system is
|
||||
* initialized, we need to copy things out into a more permanent
|
||||
* place.
|
||||
*/
|
||||
#define PARAM ZERO_PGE
|
||||
#define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000)))
|
||||
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
|
||||
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
|
||||
|
||||
#endif
|
@ -1,43 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef __ALPHA_SETUP_H
|
||||
#define __ALPHA_SETUP_H
|
||||
#ifndef _UAPI__ALPHA_SETUP_H
|
||||
#define _UAPI__ALPHA_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 256
|
||||
|
||||
/*
|
||||
* We leave one page for the initial stack page, and one page for
|
||||
* the initial process structure. Also, the console eats 3 MB for
|
||||
* the initial bootloader (one of which we can reclaim later).
|
||||
*/
|
||||
#define BOOT_PCB 0x20000000
|
||||
#define BOOT_ADDR 0x20000000
|
||||
/* Remove when official MILO sources have ELF support: */
|
||||
#define BOOT_SIZE (16*1024)
|
||||
|
||||
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
|
||||
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
|
||||
#else
|
||||
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
|
||||
#endif
|
||||
|
||||
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
|
||||
#define SWAPPER_PGD KERNEL_START
|
||||
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
|
||||
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
|
||||
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
|
||||
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
|
||||
|
||||
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
|
||||
|
||||
/*
|
||||
* This is setup by the secondary bootstrap loader. Because
|
||||
* the zero page is zeroed out as soon as the vm system is
|
||||
* initialized, we need to copy things out into a more permanent
|
||||
* place.
|
||||
*/
|
||||
#define PARAM ZERO_PGE
|
||||
#define COMMAND_LINE ((char*)(PARAM + 0x0000))
|
||||
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
|
||||
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
|
||||
|
||||
#endif
|
||||
#endif /* _UAPI__ALPHA_SETUP_H */
|
||||
|
@ -7,6 +7,11 @@
|
||||
*
|
||||
* Code supporting the Jensen.
|
||||
*/
|
||||
#define __EXTERN_INLINE
|
||||
#include <asm/io.h>
|
||||
#include <asm/jensen.h>
|
||||
#undef __EXTERN_INLINE
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
@ -17,11 +22,6 @@
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __EXTERN_INLINE inline
|
||||
#include <asm/io.h>
|
||||
#include <asm/jensen.h>
|
||||
#undef __EXTERN_INLINE
|
||||
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
|
||||
ev67-$(CONFIG_ALPHA_EV67) := ev67-
|
||||
|
||||
lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
|
||||
udiv-qrnnd.o \
|
||||
udelay.o \
|
||||
$(ev6-y)memset.o \
|
||||
$(ev6-y)memcpy.o \
|
||||
|
@ -25,6 +25,7 @@
|
||||
# along with GCC; see the file COPYING. If not, write to the
|
||||
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
|
||||
# MA 02111-1307, USA.
|
||||
#include <asm/export.h>
|
||||
|
||||
.set noreorder
|
||||
.set noat
|
||||
@ -161,3 +162,4 @@ $Odd:
|
||||
ret $31,($26),1
|
||||
|
||||
.end __udiv_qrnnd
|
||||
EXPORT_SYMBOL(__udiv_qrnnd)
|
@ -7,4 +7,4 @@ ccflags-y := -w
|
||||
|
||||
obj-$(CONFIG_MATHEMU) += math-emu.o
|
||||
|
||||
math-emu-objs := math.o qrnnd.o
|
||||
math-emu-objs := math.o
|
||||
|
@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
|
||||
egress:
|
||||
return si_code;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__udiv_qrnnd);
|
||||
|
@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
uprobe_notify_resume(regs);
|
||||
} else {
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
local_irq_disable();
|
||||
|
@ -86,7 +86,7 @@ config ARM64
|
||||
select ARCH_SUPPORTS_LTO_CLANG_THIN
|
||||
select ARCH_SUPPORTS_CFI_CLANG
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
|
@ -487,7 +487,6 @@
|
||||
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&qusb_phy_0>, <&usb0_ssphy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
tx-fifo-resize;
|
||||
snps,is-utmi-l1-suspend;
|
||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
@ -528,7 +527,6 @@
|
||||
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&qusb_phy_1>, <&usb1_ssphy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
tx-fifo-resize;
|
||||
snps,is-utmi-l1-suspend;
|
||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
|
@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
|
||||
#define acpi_os_ioremap acpi_os_ioremap
|
||||
|
||||
void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
|
||||
#define acpi_os_memmap acpi_os_memmap
|
||||
|
||||
typedef u64 phys_cpuid_t;
|
||||
#define PHYS_CPUID_INVALID INVALID_HWID
|
||||
|
||||
|
@ -525,6 +525,11 @@ alternative_endif
|
||||
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define EXPORT_SYMBOL_NOHWKASAN(name)
|
||||
#else
|
||||
#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
|
||||
#endif
|
||||
/*
|
||||
* Emit a 64-bit absolute little endian symbol reference in a way that
|
||||
* ensures that it will be resolved at build time, even when building a
|
||||
|
@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
|
||||
|
||||
static inline void mte_check_tfsr_entry(void)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
mte_check_tfsr_el1();
|
||||
}
|
||||
|
||||
static inline void mte_check_tfsr_exit(void)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
/*
|
||||
* The asynchronous faults are sync'ed automatically with
|
||||
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
|
||||
|
@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
|
||||
#define __HAVE_ARCH_STRCHR
|
||||
extern char *strchr(const char *, int c);
|
||||
|
||||
#ifndef CONFIG_KASAN_HW_TAGS
|
||||
#define __HAVE_ARCH_STRCMP
|
||||
extern int strcmp(const char *, const char *);
|
||||
|
||||
#define __HAVE_ARCH_STRNCMP
|
||||
extern int strncmp(const char *, const char *, __kernel_size_t);
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_STRLEN
|
||||
extern __kernel_size_t strlen(const char *);
|
||||
|
@ -273,8 +273,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
|
||||
return __pgprot(PROT_DEVICE_nGnRnE);
|
||||
}
|
||||
|
||||
static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
|
||||
acpi_size size, bool memory)
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
efi_memory_desc_t *md, *region = NULL;
|
||||
pgprot_t prot;
|
||||
@ -300,11 +299,9 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
|
||||
* It is fine for AML to remap regions that are not represented in the
|
||||
* EFI memory map at all, as it only describes normal memory, and MMIO
|
||||
* regions that require a virtual mapping to make them accessible to
|
||||
* the EFI runtime services. Determine the region default
|
||||
* attributes by checking the requested memory semantics.
|
||||
* the EFI runtime services.
|
||||
*/
|
||||
prot = memory ? __pgprot(PROT_NORMAL_NC) :
|
||||
__pgprot(PROT_DEVICE_nGnRnE);
|
||||
prot = __pgprot(PROT_DEVICE_nGnRnE);
|
||||
if (region) {
|
||||
switch (region->type) {
|
||||
case EFI_LOADER_CODE:
|
||||
@ -364,16 +361,6 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
|
||||
return __ioremap(phys, size, prot);
|
||||
}
|
||||
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return __acpi_os_ioremap(phys, size, false);
|
||||
}
|
||||
|
||||
void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return __acpi_os_ioremap(phys, size, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Claim Synchronous External Aborts as a firmware first notification.
|
||||
*
|
||||
|
@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
/*
|
||||
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
||||
* ThunderX leads to apparent I-cache corruption of kernel text, which
|
||||
* ends as well as you might imagine. Don't even try.
|
||||
* ends as well as you might imagine. Don't even try. We cannot rely
|
||||
* on the cpus_have_*cap() helpers here to detect the CPU erratum
|
||||
* because cpucap detection order may change. However, since we know
|
||||
* affected CPUs are always in a homogeneous configuration, it is
|
||||
* safe to rely on this_cpu_has_cap() here.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
|
||||
if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
|
||||
str = "ARM64_WORKAROUND_CAVIUM_27456";
|
||||
__kpti_forced = -1;
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
|
||||
void sve_alloc(struct task_struct *task)
|
||||
{
|
||||
if (task->thread.sve_state) {
|
||||
memset(task->thread.sve_state, 0, sve_state_size(current));
|
||||
memset(task->thread.sve_state, 0, sve_state_size(task));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
void mte_check_tfsr_el1(void)
|
||||
{
|
||||
u64 tfsr_el1;
|
||||
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
|
||||
u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
|
||||
|
||||
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
|
||||
/*
|
||||
@ -199,6 +194,9 @@ void mte_thread_init_user(void)
|
||||
|
||||
void mte_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
mte_update_sctlr_user(next);
|
||||
|
||||
/*
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/unistd.h>
|
||||
@ -58,7 +57,7 @@
|
||||
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
unsigned long __stack_chk_guard __ro_after_init;
|
||||
EXPORT_SYMBOL(__stack_chk_guard);
|
||||
#endif
|
||||
|
||||
|
@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
|
||||
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
if (thread_flags & _TIF_FOREIGN_FPSTATE)
|
||||
fpsimd_restore_current_state();
|
||||
|
@ -173,4 +173,4 @@ L(done):
|
||||
ret
|
||||
|
||||
SYM_FUNC_END_PI(strcmp)
|
||||
EXPORT_SYMBOL_NOKASAN(strcmp)
|
||||
EXPORT_SYMBOL_NOHWKASAN(strcmp)
|
||||
|
@ -258,4 +258,4 @@ L(ret0):
|
||||
ret
|
||||
|
||||
SYM_FUNC_END_PI(strncmp)
|
||||
EXPORT_SYMBOL_NOKASAN(strncmp)
|
||||
EXPORT_SYMBOL_NOHWKASAN(strncmp)
|
||||
|
@ -260,8 +260,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
|
@ -17,21 +17,21 @@
|
||||
* two accesses to memory, which may be undesirable for some devices.
|
||||
*/
|
||||
#define in_8(addr) \
|
||||
({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
|
||||
({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
|
||||
#define in_be16(addr) \
|
||||
({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
|
||||
({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
|
||||
#define in_be32(addr) \
|
||||
({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
|
||||
({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
|
||||
#define in_le16(addr) \
|
||||
({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
|
||||
({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
|
||||
#define in_le32(addr) \
|
||||
({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
|
||||
({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
|
||||
|
||||
#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
|
||||
#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
|
||||
#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
|
||||
#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
|
||||
#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
|
||||
#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
|
||||
#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
|
||||
#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
|
||||
#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
|
||||
#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
|
||||
|
||||
#define raw_inb in_8
|
||||
#define raw_inw in_be16
|
||||
|
@ -171,7 +171,6 @@ static int bcd2int (unsigned char b)
|
||||
|
||||
int mvme147_hwclk(int op, struct rtc_time *t)
|
||||
{
|
||||
#warning check me!
|
||||
if (!op) {
|
||||
m147_rtc->ctrl = RTC_READ;
|
||||
t->tm_year = bcd2int (m147_rtc->bcd_year);
|
||||
@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t)
|
||||
m147_rtc->ctrl = 0;
|
||||
if (t->tm_year < 70)
|
||||
t->tm_year += 100;
|
||||
} else {
|
||||
/* FIXME Setting the time is not yet supported */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -436,7 +436,6 @@ int bcd2int (unsigned char b)
|
||||
|
||||
int mvme16x_hwclk(int op, struct rtc_time *t)
|
||||
{
|
||||
#warning check me!
|
||||
if (!op) {
|
||||
rtc->ctrl = RTC_READ;
|
||||
t->tm_year = bcd2int (rtc->bcd_year);
|
||||
@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
|
||||
rtc->ctrl = 0;
|
||||
if (t->tm_year < 70)
|
||||
t->tm_year += 100;
|
||||
} else {
|
||||
/* FIXME Setting the time is not yet supported */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ extern int npmem_ranges;
|
||||
#include <asm-generic/getorder.h>
|
||||
#include <asm/pdc.h>
|
||||
|
||||
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
|
||||
#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
|
||||
|
||||
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
|
||||
/* based on work by Jason Eckhardt (jason@equator.com) */
|
||||
|
@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
||||
{
|
||||
if (!INDIRECT_ADDR(addr)) {
|
||||
iounmap(addr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_iounmap);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(ioread8);
|
||||
EXPORT_SYMBOL(ioread16);
|
||||
@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
|
||||
EXPORT_SYMBOL(iowrite32_rep);
|
||||
EXPORT_SYMBOL(ioport_map);
|
||||
EXPORT_SYMBOL(ioport_unmap);
|
||||
EXPORT_SYMBOL(pci_iounmap);
|
||||
|
@ -35,7 +35,6 @@ endif
|
||||
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
|
||||
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
|
||||
-include $(srctree)/include/linux/compiler_attributes.h \
|
||||
$(LINUXINCLUDE)
|
||||
|
||||
ifdef CONFIG_PPC64_BOOT_WRAPPER
|
||||
@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
|
||||
BOOTCFLAGS += -fno-stack-protector
|
||||
endif
|
||||
|
||||
BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h
|
||||
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
|
||||
|
||||
DTC_FLAGS ?= -p 1024
|
||||
|
@ -12,16 +12,6 @@
|
||||
# define ASM_CONST(x) __ASM_CONST(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Inline assembly memory constraint
|
||||
*
|
||||
* GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
|
||||
*
|
||||
*/
|
||||
#if defined(GCC_VERSION) && GCC_VERSION < 50000
|
||||
#define UPD_CONSTR ""
|
||||
#else
|
||||
#define UPD_CONSTR "<>"
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_POWERPC_ASM_CONST_H */
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/tm.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
|
||||
@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
||||
*/
|
||||
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
|
||||
|
||||
/*
|
||||
* If system call is called with TM active, set _TIF_RESTOREALL to
|
||||
* prevent RFSCV being used to return to userspace, because POWER9
|
||||
* TM implementation has problems with this instruction returning to
|
||||
* transactional state. Final register values are not relevant because
|
||||
* the transaction will be aborted upon return anyway. Or in the case
|
||||
* of unsupported_scv SIGILL fault, the return state does not much
|
||||
* matter because it's an edge case.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
|
||||
unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
|
||||
current_thread_info()->flags |= _TIF_RESTOREALL;
|
||||
|
||||
/*
|
||||
* If the system call was made with a transaction active, doom it and
|
||||
* return without performing the system call. Unless it was an
|
||||
* unsupported scv vector, in which case it's treated like an illegal
|
||||
* instruction.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
|
||||
!trap_is_unsupported_scv(regs)) {
|
||||
/* Enable TM in the kernel, and disable EE (for scv) */
|
||||
hard_irq_disable();
|
||||
mtmsr(mfmsr() | MSR_TM);
|
||||
|
||||
/* tabort, this dooms the transaction, nothing else */
|
||||
asm volatile(".long 0x7c00071d | ((%0) << 16)"
|
||||
:: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
|
||||
|
||||
/*
|
||||
* Userspace will never see the return value. Execution will
|
||||
* resume after the tbegin. of the aborted transaction with the
|
||||
* checkpointed register state. A context switch could occur
|
||||
* or signal delivered to the process before resuming the
|
||||
* doomed transaction context, but that should all be handled
|
||||
* as expected.
|
||||
*/
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif // CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/tm.h>
|
||||
|
||||
.section ".toc","aw"
|
||||
SYS_CALL_TABLE:
|
||||
@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
|
||||
.globl system_call_vectored_\name
|
||||
system_call_vectored_\name:
|
||||
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
|
||||
bne tabort_syscall
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
SCV_INTERRUPT_TO_KERNEL
|
||||
mr r10,r1
|
||||
ld r1,PACAKSAVE(r13)
|
||||
@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
|
||||
.globl system_call_common
|
||||
system_call_common:
|
||||
_ASM_NOKPROBE_SYMBOL(system_call_common)
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
|
||||
bne tabort_syscall
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
mr r10,r1
|
||||
ld r1,PACAKSAVE(r13)
|
||||
std r10,0(r1)
|
||||
@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
|
||||
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tabort_syscall:
|
||||
_ASM_NOKPROBE_SYMBOL(tabort_syscall)
|
||||
/* Firstly we need to enable TM in the kernel */
|
||||
mfmsr r10
|
||||
li r9, 1
|
||||
rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r10, 0
|
||||
|
||||
/* tabort, this dooms the transaction, nothing else */
|
||||
li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
|
||||
TABORT(R9)
|
||||
|
||||
/*
|
||||
* Return directly to userspace. We have corrupted user register state,
|
||||
* but userspace will never see that register state. Execution will
|
||||
* resume after the tbegin of the aborted transaction with the
|
||||
* checkpointed register state.
|
||||
*/
|
||||
li r9, MSR_RI
|
||||
andc r10, r10, r9
|
||||
mtmsrd r10, 1
|
||||
mtspr SPRN_SRR0, r11
|
||||
mtspr SPRN_SRR1, r12
|
||||
RFI_TO_USER
|
||||
b . /* prevent speculative execution */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
|
||||
* touched, no exit work created, then this can be used.
|
||||
|
@ -249,6 +249,7 @@ void machine_check_queue_event(void)
|
||||
{
|
||||
int index;
|
||||
struct machine_check_event evt;
|
||||
unsigned long msr;
|
||||
|
||||
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
|
||||
return;
|
||||
@ -262,8 +263,20 @@ void machine_check_queue_event(void)
|
||||
memcpy(&local_paca->mce_info->mce_event_queue[index],
|
||||
&evt, sizeof(evt));
|
||||
|
||||
/* Queue irq work to process this event later. */
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
/*
|
||||
* Queue irq work to process this event later. Before
|
||||
* queuing the work enable translation for non radix LPAR,
|
||||
* as irq_work_queue may try to access memory outside RMO
|
||||
* region.
|
||||
*/
|
||||
if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
msr = mfmsr();
|
||||
mtmsr(msr | MSR_IR | MSR_DR);
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
mtmsr(msr);
|
||||
} else {
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
}
|
||||
}
|
||||
|
||||
void mce_common_process_ue(struct pt_regs *regs,
|
||||
|
@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
do_signal(current);
|
||||
}
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long get_tm_stackpointer(struct task_struct *tsk)
|
||||
|
@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
/* The following code handles the fake_suspend = 1 case */
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
stdu r1, -TM_FRAME_SIZE(r1)
|
||||
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
nop
|
||||
|
||||
/*
|
||||
* It's possible that treclaim. may modify registers, if we have lost
|
||||
* track of fake-suspend state in the guest due to it using rfscv.
|
||||
* Save and restore registers in case this occurs.
|
||||
*/
|
||||
mfspr r3, SPRN_DSCR
|
||||
mfspr r4, SPRN_XER
|
||||
mfspr r5, SPRN_AMR
|
||||
/* SPRN_TAR would need to be saved here if the kernel ever used it */
|
||||
mfcr r12
|
||||
SAVE_NVGPRS(r1)
|
||||
SAVE_GPR(2, r1)
|
||||
SAVE_GPR(3, r1)
|
||||
SAVE_GPR(4, r1)
|
||||
SAVE_GPR(5, r1)
|
||||
stw r12, 8(r1)
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
|
||||
/* We have to treclaim here because that's the only way to do S->N */
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
TRECLAIM(R3)
|
||||
|
||||
GET_PACA(r13)
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
REST_GPR(2, r1)
|
||||
REST_GPR(3, r1)
|
||||
REST_GPR(4, r1)
|
||||
REST_GPR(5, r1)
|
||||
lwz r12, 8(r1)
|
||||
REST_NVGPRS(r1)
|
||||
mtspr SPRN_DSCR, r3
|
||||
mtspr SPRN_XER, r4
|
||||
mtspr SPRN_AMR, r5
|
||||
mtcr r12
|
||||
HMT_MEDIUM
|
||||
|
||||
/*
|
||||
* We were in fake suspend, so we are not going to save the
|
||||
* register state as the guest checkpointed state (since
|
||||
@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
addi r1, r1, TM_FRAME_SIZE
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
|
||||
if (xics_ics->check(xics_ics, hwirq))
|
||||
return -EINVAL;
|
||||
|
||||
/* No chip data for the XICS domain */
|
||||
/* Let the ICS be the chip data for the XICS domain. For ICS native */
|
||||
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
|
||||
NULL, handle_fasteoi_irq, NULL, NULL);
|
||||
xics_ics, handle_fasteoi_irq, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ config ARCH_RV32I
|
||||
config ARCH_RV64I
|
||||
bool "RV64I"
|
||||
select 64BIT
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
|
@ -685,16 +685,6 @@ config STACK_GUARD
|
||||
The minimum size for the stack guard should be 256 for 31 bit and
|
||||
512 for 64 bit.
|
||||
|
||||
config WARN_DYNAMIC_STACK
|
||||
def_bool n
|
||||
prompt "Emit compiler warnings for function with dynamic stack usage"
|
||||
help
|
||||
This option enables the compiler option -mwarn-dynamicstack. If the
|
||||
compiler supports this options generates warnings for functions
|
||||
that dynamically allocate stack space using alloca.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "I/O subsystem"
|
||||
|
@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_WARN_DYNAMIC_STACK
|
||||
ifneq ($(call cc-option,-mwarn-dynamicstack),)
|
||||
KBUILD_CFLAGS += -mwarn-dynamicstack
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_EXPOLINE
|
||||
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
|
||||
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
|
||||
|
@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_BPF_LSM=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_SCHED_CORE=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
@ -503,6 +504,7 @@ CONFIG_NLMON=m
|
||||
# CONFIG_NET_VENDOR_HUAWEI is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
# CONFIG_NET_VENDOR_MICROSOFT is not set
|
||||
# CONFIG_NET_VENDOR_LITEX is not set
|
||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||
CONFIG_MLX4_EN=m
|
||||
CONFIG_MLX5_CORE=m
|
||||
@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
|
||||
CONFIG_NFSD_V4=y
|
||||
CONFIG_NFSD_V4_SECURITY_LABEL=y
|
||||
CONFIG_CIFS=m
|
||||
CONFIG_CIFS_WEAK_PW_HASH=y
|
||||
CONFIG_CIFS_UPCALL=y
|
||||
CONFIG_CIFS_XATTR=y
|
||||
CONFIG_CIFS_POSIX=y
|
||||
@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_DMA_API_DEBUG=y
|
||||
CONFIG_STRING_SELFTEST=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
|
||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_TEST_MIN_HEAP=y
|
||||
CONFIG_TEST_SORT=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
CONFIG_RBTREE_TEST=y
|
||||
CONFIG_INTERVAL_TREE_TEST=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_STRING_SELFTEST=y
|
||||
CONFIG_TEST_BITOPS=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_LIVEPATCH=m
|
||||
|
@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_BPF_LSM=y
|
||||
CONFIG_SCHED_CORE=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
@ -494,6 +495,7 @@ CONFIG_NLMON=m
|
||||
# CONFIG_NET_VENDOR_HUAWEI is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
# CONFIG_NET_VENDOR_MICROSOFT is not set
|
||||
# CONFIG_NET_VENDOR_LITEX is not set
|
||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||
CONFIG_MLX4_EN=m
|
||||
CONFIG_MLX5_CORE=m
|
||||
@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
|
||||
CONFIG_NFSD_V4=y
|
||||
CONFIG_NFSD_V4_SECURITY_LABEL=y
|
||||
CONFIG_CIFS=m
|
||||
CONFIG_CIFS_WEAK_PW_HASH=y
|
||||
CONFIG_CIFS_UPCALL=y
|
||||
CONFIG_CIFS_XATTR=y
|
||||
CONFIG_CIFS_POSIX=y
|
||||
@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
|
@ -55,7 +55,7 @@ int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
|
||||
int num_devices, const char *buf);
|
||||
|
||||
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
|
||||
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
|
||||
int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
|
||||
|
||||
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
|
||||
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
|
||||
|
@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
|
||||
|
||||
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
|
||||
({ \
|
||||
/* Branch instruction needs 6 bytes */ \
|
||||
int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
|
||||
int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
|
||||
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
|
||||
REG_SET_SEEN(b1); \
|
||||
REG_SET_SEEN(b2); \
|
||||
@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb9080000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* alfi %dst,imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, imm);
|
||||
if (imm != 0) {
|
||||
/* alfi %dst,imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
|
||||
@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb9090000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* alfi %dst,-imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
|
||||
if (imm != 0) {
|
||||
/* alfi %dst,-imm */
|
||||
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* agfi %dst,-imm */
|
||||
EMIT6_IMM(0xc2080000, dst_reg, -imm);
|
||||
if (imm == -0x80000000) {
|
||||
/* algfi %dst,0x80000000 */
|
||||
EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
|
||||
} else {
|
||||
/* agfi %dst,-imm */
|
||||
EMIT6_IMM(0xc2080000, dst_reg, -imm);
|
||||
}
|
||||
break;
|
||||
/*
|
||||
* BPF_MUL
|
||||
@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb90c0000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
|
||||
if (imm == 1)
|
||||
break;
|
||||
/* msfi %r5,imm */
|
||||
EMIT6_IMM(0xc2010000, dst_reg, imm);
|
||||
if (imm != 1) {
|
||||
/* msfi %r5,imm */
|
||||
EMIT6_IMM(0xc2010000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
|
||||
@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
if (BPF_OP(insn->code) == BPF_MOD)
|
||||
/* lhgi %dst,0 */
|
||||
EMIT4_IMM(0xa7090000, dst_reg, 0);
|
||||
else
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
}
|
||||
/* lhi %w0,0 */
|
||||
@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT4(0xb9820000, dst_reg, src_reg);
|
||||
break;
|
||||
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
|
||||
if (!imm)
|
||||
break;
|
||||
/* xilf %dst,imm */
|
||||
EMIT6_IMM(0xc0070000, dst_reg, imm);
|
||||
if (imm != 0) {
|
||||
/* xilf %dst,imm */
|
||||
EMIT6_IMM(0xc0070000, dst_reg, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
|
||||
@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* sll %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* sll %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
|
||||
@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* srl %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* srl %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
|
||||
@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
|
||||
break;
|
||||
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
|
||||
if (imm == 0)
|
||||
break;
|
||||
/* sra %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
|
||||
if (imm != 0) {
|
||||
/* sra %dst,imm(%r0) */
|
||||
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
|
||||
}
|
||||
EMIT_ZERO(dst_reg);
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
|
||||
|
@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
ret = -EINVAL;
|
||||
vma = find_vma(current->mm, mmio_addr);
|
||||
vma = vma_lookup(current->mm, mmio_addr);
|
||||
if (!vma)
|
||||
goto out_unlock_mmap;
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
ret = -EINVAL;
|
||||
vma = find_vma(current->mm, mmio_addr);
|
||||
vma = vma_lookup(current->mm, mmio_addr);
|
||||
if (!vma)
|
||||
goto out_unlock_mmap;
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
|
@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
|
||||
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,lzo)
|
||||
|
||||
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
|
||||
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
|
||||
$(call if_changed,uimage,bzip2)
|
||||
|
||||
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
|
||||
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
|
||||
$(call if_changed,uimage,gzip)
|
||||
|
||||
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
|
||||
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
|
||||
$(call if_changed,uimage,lzma)
|
||||
|
||||
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
|
||||
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
|
||||
$(call if_changed,uimage,xz)
|
||||
|
||||
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
|
||||
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
|
||||
$(call if_changed,uimage,lzo)
|
||||
|
||||
$(obj)/uImage.bin: $(obj)/vmlinux.bin
|
||||
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,uimage,none)
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
|
||||
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
|
||||
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
OBJCOPYFLAGS_uImage.srec := -I binary -O srec
|
||||
$(obj)/uImage.srec: $(obj)/uImage
|
||||
$(obj)/uImage.srec: $(obj)/uImage FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
|
||||
|
@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
|
||||
|
||||
static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
{
|
||||
return (pmd_t *)pud_val(pud);
|
||||
return (pmd_t *)(unsigned long)pud_val(pud);
|
||||
}
|
||||
|
||||
/* only used by the stubbed out hugetlb gup code, should never be called */
|
||||
|
@ -356,7 +356,9 @@ err_nomem:
|
||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (!sparc_dma_free_resource(cpu_addr, size))
|
||||
return;
|
||||
|
||||
dma_make_coherent(dma_addr, size);
|
||||
|
@ -39,6 +39,7 @@ struct mdesc_hdr {
|
||||
u32 node_sz; /* node block size */
|
||||
u32 name_sz; /* name block size */
|
||||
u32 data_sz; /* data block size */
|
||||
char data[];
|
||||
} __attribute__((aligned(16)));
|
||||
|
||||
struct mdesc_elem {
|
||||
@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
|
||||
|
||||
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
|
||||
{
|
||||
return (struct mdesc_elem *) (mdesc + 1);
|
||||
return (struct mdesc_elem *) mdesc->data;
|
||||
}
|
||||
|
||||
static void *name_block(struct mdesc_hdr *mdesc)
|
||||
|
@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
|
||||
EXPORT_SYMBOL(ioport_map);
|
||||
EXPORT_SYMBOL(ioport_unmap);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
EXPORT_SYMBOL(pci_iounmap);
|
||||
#endif
|
||||
|
@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y
|
||||
|
||||
config ARCH_NR_GPIO
|
||||
int
|
||||
default 1024 if X86_64
|
||||
default 512
|
||||
|
||||
config ARCH_SUSPEND_POSSIBLE
|
||||
def_bool y
|
||||
|
||||
@ -2605,7 +2610,6 @@ config PCI_OLPC
|
||||
config PCI_XEN
|
||||
def_bool y
|
||||
depends on PCI && XEN
|
||||
select SWIOTLB_XEN
|
||||
|
||||
config MMCONF_FAM10H
|
||||
def_bool y
|
||||
|
@ -4,6 +4,12 @@
|
||||
|
||||
tune = $(call cc-option,-mtune=$(1),$(2))
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
|
||||
else
|
||||
align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
endif
|
||||
|
||||
cflags-$(CONFIG_M486SX) += -march=i486
|
||||
cflags-$(CONFIG_M486) += -march=i486
|
||||
cflags-$(CONFIG_M586) += -march=i586
|
||||
@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6
|
||||
# They make zero difference whatsosever to performance at this time.
|
||||
cflags-$(CONFIG_MK7) += -march=athlon
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
|
||||
cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
|
||||
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
|
||||
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
|
||||
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
|
||||
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
|
||||
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
|
||||
cflags-$(CONFIG_MVIAC7) += -march=i686
|
||||
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
|
||||
|
@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
|
||||
/*
|
||||
* IPI implementation on Hyper-V.
|
||||
*/
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
{
|
||||
struct hv_send_ipi_ex **arg;
|
||||
struct hv_send_ipi_ex *ipi_arg;
|
||||
@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
|
||||
if (!cpumask_equal(mask, cpu_present_mask)) {
|
||||
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
|
||||
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
|
||||
if (exclude_self)
|
||||
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
|
||||
else
|
||||
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
|
||||
}
|
||||
if (nr_bank < 0)
|
||||
goto ipi_mask_ex_done;
|
||||
@ -138,15 +142,25 @@ ipi_mask_ex_done:
|
||||
return hv_result_success(status);
|
||||
}
|
||||
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
{
|
||||
int cur_cpu, vcpu;
|
||||
int cur_cpu, vcpu, this_cpu = smp_processor_id();
|
||||
struct hv_send_ipi ipi_arg;
|
||||
u64 status;
|
||||
unsigned int weight;
|
||||
|
||||
trace_hyperv_send_ipi_mask(mask, vector);
|
||||
|
||||
if (cpumask_empty(mask))
|
||||
weight = cpumask_weight(mask);
|
||||
|
||||
/*
|
||||
* Do nothing if
|
||||
* 1. the mask is empty
|
||||
* 2. the mask only contains self when exclude_self is true
|
||||
*/
|
||||
if (weight == 0 ||
|
||||
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
|
||||
return true;
|
||||
|
||||
if (!hv_hypercall_pg)
|
||||
@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
ipi_arg.cpu_mask = 0;
|
||||
|
||||
for_each_cpu(cur_cpu, mask) {
|
||||
if (exclude_self && cur_cpu == this_cpu)
|
||||
continue;
|
||||
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
|
||||
if (vcpu == VP_INVAL)
|
||||
return false;
|
||||
@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
return hv_result_success(status);
|
||||
|
||||
do_ex_hypercall:
|
||||
return __send_ipi_mask_ex(mask, vector);
|
||||
return __send_ipi_mask_ex(mask, vector, exclude_self);
|
||||
}
|
||||
|
||||
static bool __send_ipi_one(int cpu, int vector)
|
||||
@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
|
||||
return false;
|
||||
|
||||
if (vp >= 64)
|
||||
return __send_ipi_mask_ex(cpumask_of(cpu), vector);
|
||||
return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
|
||||
|
||||
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
|
||||
return hv_result_success(status);
|
||||
@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
|
||||
|
||||
static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
if (!__send_ipi_mask(mask, vector))
|
||||
if (!__send_ipi_mask(mask, vector, false))
|
||||
orig_apic.send_IPI_mask(mask, vector);
|
||||
}
|
||||
|
||||
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
struct cpumask new_mask;
|
||||
const struct cpumask *local_mask;
|
||||
|
||||
cpumask_copy(&new_mask, mask);
|
||||
cpumask_clear_cpu(this_cpu, &new_mask);
|
||||
local_mask = &new_mask;
|
||||
if (!__send_ipi_mask(local_mask, vector))
|
||||
if (!__send_ipi_mask(mask, vector, true))
|
||||
orig_apic.send_IPI_mask_allbutself(mask, vector);
|
||||
}
|
||||
|
||||
@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
|
||||
|
||||
static void hv_send_ipi_all(int vector)
|
||||
{
|
||||
if (!__send_ipi_mask(cpu_online_mask, vector))
|
||||
if (!__send_ipi_mask(cpu_online_mask, vector, false))
|
||||
orig_apic.send_IPI_all(vector);
|
||||
}
|
||||
|
||||
|
@ -2,8 +2,6 @@
|
||||
#ifndef _ASM_X86_PKEYS_H
|
||||
#define _ASM_X86_PKEYS_H
|
||||
|
||||
#define ARCH_DEFAULT_PKEY 0
|
||||
|
||||
/*
|
||||
* If more than 16 keys are ever supported, a thorough audit
|
||||
* will be necessary to ensure that the types that store key
|
||||
|
@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
|
||||
{
|
||||
const struct { char _[64]; } *__src = src;
|
||||
struct { char _[64]; } __iomem *__dst = dst;
|
||||
int zf;
|
||||
bool zf;
|
||||
|
||||
/*
|
||||
* ENQCMDS %(rdx), rax
|
||||
|
@ -301,8 +301,8 @@ do { \
|
||||
unsigned int __gu_low, __gu_high; \
|
||||
const unsigned int __user *__gu_ptr; \
|
||||
__gu_ptr = (const void __user *)(ptr); \
|
||||
__get_user_asm(__gu_low, ptr, "l", "=r", label); \
|
||||
__get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
|
||||
__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
|
||||
__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
|
||||
(x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
|
||||
} while (0)
|
||||
#else
|
||||
|
@ -3,14 +3,10 @@
|
||||
#define _ASM_X86_SWIOTLB_XEN_H
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_XEN
|
||||
extern int xen_swiotlb;
|
||||
extern int __init pci_xen_swiotlb_detect(void);
|
||||
extern void __init pci_xen_swiotlb_init(void);
|
||||
extern int pci_xen_swiotlb_init_late(void);
|
||||
#else
|
||||
#define xen_swiotlb (0)
|
||||
static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
|
||||
static inline void __init pci_xen_swiotlb_init(void) { }
|
||||
#define pci_xen_swiotlb_detect NULL
|
||||
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
|
||||
#endif
|
||||
|
||||
|
@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
|
||||
|
||||
static void kill_me_now(struct callback_head *ch)
|
||||
{
|
||||
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
|
||||
|
||||
p->mce_count = 0;
|
||||
force_sig(SIGBUS);
|
||||
}
|
||||
|
||||
@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
|
||||
int flags = MF_ACTION_REQUIRED;
|
||||
int ret;
|
||||
|
||||
p->mce_count = 0;
|
||||
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
|
||||
|
||||
if (!p->mce_ripv)
|
||||
@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
|
||||
}
|
||||
}
|
||||
|
||||
static void queue_task_work(struct mce *m, int kill_current_task)
|
||||
static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
|
||||
{
|
||||
current->mce_addr = m->addr;
|
||||
current->mce_kflags = m->kflags;
|
||||
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
||||
current->mce_whole_page = whole_page(m);
|
||||
int count = ++current->mce_count;
|
||||
|
||||
if (kill_current_task)
|
||||
current->mce_kill_me.func = kill_me_now;
|
||||
else
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
/* First call, save all the details */
|
||||
if (count == 1) {
|
||||
current->mce_addr = m->addr;
|
||||
current->mce_kflags = m->kflags;
|
||||
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
||||
current->mce_whole_page = whole_page(m);
|
||||
|
||||
if (kill_current_task)
|
||||
current->mce_kill_me.func = kill_me_now;
|
||||
else
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
}
|
||||
|
||||
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
|
||||
if (count > 10)
|
||||
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
|
||||
|
||||
/* Second or later call, make sure page address matches the one from first call */
|
||||
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
|
||||
mce_panic("Consecutive machine checks to different user pages", m, msg);
|
||||
|
||||
/* Do not call task_work_add() more than once */
|
||||
if (count > 1)
|
||||
return;
|
||||
|
||||
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
|
||||
}
|
||||
@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
||||
/* If this triggers there is no way to recover. Die hard. */
|
||||
BUG_ON(!on_thread_stack() || !user_mode(regs));
|
||||
|
||||
queue_task_work(&m, kill_current_task);
|
||||
queue_task_work(&m, msg, kill_current_task);
|
||||
|
||||
} else {
|
||||
/*
|
||||
@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
if (m.kflags & MCE_IN_KERNEL_COPYIN)
|
||||
queue_task_work(&m, kill_current_task);
|
||||
queue_task_work(&m, msg, kill_current_task);
|
||||
}
|
||||
out:
|
||||
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
|
||||
|
@ -830,6 +830,20 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
x86_init.oem.arch_setup();
|
||||
|
||||
/*
|
||||
* Do some memory reservations *before* memory is added to memblock, so
|
||||
* memblock allocations won't overwrite it.
|
||||
*
|
||||
* After this point, everything still needed from the boot loader or
|
||||
* firmware or kernel text should be early reserved or marked not RAM in
|
||||
* e820. All other memory is free game.
|
||||
*
|
||||
* This call needs to happen before e820__memory_setup() which calls the
|
||||
* xen_memory_setup() on Xen dom0 which relies on the fact that those
|
||||
* early reservations have happened already.
|
||||
*/
|
||||
early_reserve_memory();
|
||||
|
||||
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
|
||||
e820__memory_setup();
|
||||
parse_setup_data();
|
||||
@ -876,18 +890,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
parse_early_param();
|
||||
|
||||
/*
|
||||
* Do some memory reservations *before* memory is added to
|
||||
* memblock, so memblock allocations won't overwrite it.
|
||||
* Do it after early param, so we could get (unlikely) panic from
|
||||
* serial.
|
||||
*
|
||||
* After this point everything still needed from the boot loader or
|
||||
* firmware or kernel text should be early reserved or marked not
|
||||
* RAM in e820. All other memory is free game.
|
||||
*/
|
||||
early_reserve_memory();
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
* Memory used by the kernel cannot be hot-removed because Linux
|
||||
|
@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
||||
|
||||
static void __init pcpu_fc_free(void *ptr, size_t size)
|
||||
{
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_free_ptr(ptr, size);
|
||||
}
|
||||
|
||||
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||
|
@ -37,10 +37,10 @@
|
||||
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
|
||||
|
||||
#define __get_next(t, insn) \
|
||||
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
|
||||
({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
|
||||
|
||||
#define __peek_nbyte_next(t, insn, n) \
|
||||
({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
|
||||
({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
|
||||
|
||||
#define get_next(t, insn) \
|
||||
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
|
||||
|
@ -710,7 +710,8 @@ oops:
|
||||
|
||||
static noinline void
|
||||
kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address, int signal, int si_code)
|
||||
unsigned long address, int signal, int si_code,
|
||||
u32 pkey)
|
||||
{
|
||||
WARN_ON_ONCE(user_mode(regs));
|
||||
|
||||
@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
|
||||
|
||||
set_signal_archinfo(address, error_code);
|
||||
|
||||
/* XXX: hwpoison faults will set the wrong code. */
|
||||
force_sig_fault(signal, si_code, (void __user *)address);
|
||||
if (si_code == SEGV_PKUERR) {
|
||||
force_sig_pkuerr((void __user *)address, pkey);
|
||||
} else {
|
||||
/* XXX: hwpoison faults will set the wrong code. */
|
||||
force_sig_fault(signal, si_code, (void __user *)address);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
if (!user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGSEGV, si_code, pkey);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
|
||||
{
|
||||
/* Kernel mode? Handle exceptions or die: */
|
||||
if (!user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1396,7 +1403,8 @@ good_area:
|
||||
*/
|
||||
if (!user_mode(regs))
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGBUS, BUS_ADRERR);
|
||||
SIGBUS, BUS_ADRERR,
|
||||
ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1416,7 +1424,8 @@ good_area:
|
||||
return;
|
||||
|
||||
if (fatal_signal_pending(current) && !user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
0, 0, ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1424,7 +1433,8 @@ good_area:
|
||||
/* Kernel mode? Handle exceptions or die: */
|
||||
if (!user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGSEGV, SEGV_MAPERR);
|
||||
SIGSEGV, SEGV_MAPERR,
|
||||
ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
|
||||
return 0;
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d))
|
||||
if (!p4d_present(*p4d))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud))
|
||||
if (!pud_present(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_large(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
if (!pmd_present(*pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_large(*pmd))
|
||||
|
@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||
p = early_alloc(PMD_SIZE, nid, false);
|
||||
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
|
||||
return;
|
||||
else if (p)
|
||||
memblock_free(__pa(p), PMD_SIZE);
|
||||
memblock_free_ptr(p, PMD_SIZE);
|
||||
}
|
||||
|
||||
p = early_alloc(PAGE_SIZE, nid, true);
|
||||
@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
|
||||
p = early_alloc(PUD_SIZE, nid, false);
|
||||
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
|
||||
return;
|
||||
else if (p)
|
||||
memblock_free(__pa(p), PUD_SIZE);
|
||||
memblock_free_ptr(p, PUD_SIZE);
|
||||
}
|
||||
|
||||
p = early_alloc(PAGE_SIZE, nid, true);
|
||||
|
@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
|
||||
|
||||
/* numa_distance could be 1LU marking allocation failure, test cnt */
|
||||
if (numa_distance_cnt)
|
||||
memblock_free(__pa(numa_distance), size);
|
||||
memblock_free_ptr(numa_distance, size);
|
||||
numa_distance_cnt = 0;
|
||||
numa_distance = NULL; /* enable table creation */
|
||||
}
|
||||
|
@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
|
||||
}
|
||||
|
||||
/* free the copied physical distance table */
|
||||
if (phys_dist)
|
||||
memblock_free(__pa(phys_dist), phys_size);
|
||||
memblock_free_ptr(phys_dist, phys_size);
|
||||
return;
|
||||
|
||||
no_emu:
|
||||
|
@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
|
||||
int err = 0;
|
||||
|
||||
start = sanitize_phys(start);
|
||||
end = sanitize_phys(end);
|
||||
|
||||
/*
|
||||
* The end address passed into this function is exclusive, but
|
||||
* sanitize_phys() expects an inclusive address.
|
||||
*/
|
||||
end = sanitize_phys(end - 1) + 1;
|
||||
if (start >= end) {
|
||||
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
|
||||
start, end - 1, cattr_name(req_type));
|
||||
|
@ -755,8 +755,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void xen_convert_trap_info(const struct desc_ptr *desc,
|
||||
struct trap_info *traps)
|
||||
static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
|
||||
struct trap_info *traps, bool full)
|
||||
{
|
||||
unsigned in, out, count;
|
||||
|
||||
@ -766,17 +766,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
|
||||
for (in = out = 0; in < count; in++) {
|
||||
gate_desc *entry = (gate_desc *)(desc->address) + in;
|
||||
|
||||
if (cvt_gate_to_trap(in, entry, &traps[out]))
|
||||
if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
|
||||
out++;
|
||||
}
|
||||
traps[out].address = 0;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
void xen_copy_trap_info(struct trap_info *traps)
|
||||
{
|
||||
const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
|
||||
|
||||
xen_convert_trap_info(desc, traps);
|
||||
xen_convert_trap_info(desc, traps, true);
|
||||
}
|
||||
|
||||
/* Load a new IDT into Xen. In principle this can be per-CPU, so we
|
||||
@ -786,6 +787,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
|
||||
{
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
static struct trap_info traps[257];
|
||||
unsigned out;
|
||||
|
||||
trace_xen_cpu_load_idt(desc);
|
||||
|
||||
@ -793,7 +795,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
|
||||
|
||||
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
|
||||
|
||||
xen_convert_trap_info(desc, traps);
|
||||
out = xen_convert_trap_info(desc, traps, false);
|
||||
memset(&traps[out], 0, sizeof(traps[0]));
|
||||
|
||||
xen_mc_flush();
|
||||
if (HYPERVISOR_set_trap_table(traps))
|
||||
@ -1214,6 +1217,11 @@ static void __init xen_dom0_set_legacy_features(void)
|
||||
x86_platform.legacy.rtc = 1;
|
||||
}
|
||||
|
||||
static void __init xen_domu_set_legacy_features(void)
|
||||
{
|
||||
x86_platform.legacy.rtc = 0;
|
||||
}
|
||||
|
||||
/* First C function to be called on Xen boot */
|
||||
asmlinkage __visible void __init xen_start_kernel(void)
|
||||
{
|
||||
@ -1359,6 +1367,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
add_preferred_console("xenboot", 0, NULL);
|
||||
if (pci_xen)
|
||||
x86_init.pci.arch_init = pci_xen_init;
|
||||
x86_platform.set_legacy_features =
|
||||
xen_domu_set_legacy_features;
|
||||
} else {
|
||||
const struct dom0_vga_console_info *info =
|
||||
(void *)((char *)xen_start_info +
|
||||
|
@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
|
||||
if (pinned) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
if (static_branch_likely(&xen_struct_pages_ready))
|
||||
pinned = false;
|
||||
if (static_branch_likely(&xen_struct_pages_ready)) {
|
||||
pinned = PagePinned(page);
|
||||
SetPagePinned(page);
|
||||
}
|
||||
|
||||
xen_mc_batch();
|
||||
|
||||
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
|
||||
|
||||
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
|
||||
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
|
||||
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
|
||||
|
||||
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||
|
@ -18,7 +18,7 @@
|
||||
#endif
|
||||
#include <linux/export.h>
|
||||
|
||||
int xen_swiotlb __read_mostly;
|
||||
static int xen_swiotlb __read_mostly;
|
||||
|
||||
/*
|
||||
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
|
||||
@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
|
||||
return xen_swiotlb;
|
||||
}
|
||||
|
||||
void __init pci_xen_swiotlb_init(void)
|
||||
static void __init pci_xen_swiotlb_init(void)
|
||||
{
|
||||
if (xen_swiotlb) {
|
||||
xen_swiotlb_init_early();
|
||||
|
@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
gdt = get_cpu_gdt_rw(cpu);
|
||||
|
||||
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
|
||||
|
||||
/*
|
||||
* Bring up the CPU in cpu_bringup_and_idle() with the stack
|
||||
* pointing just below where pt_regs would be if it were a normal
|
||||
@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
xen_copy_trap_info(ctxt->trap_ctxt);
|
||||
|
||||
ctxt->ldt_ents = 0;
|
||||
|
||||
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
|
||||
|
||||
gdt_mfn = arbitrary_virt_to_mfn(gdt);
|
||||
|
@ -1466,7 +1466,7 @@ again:
|
||||
if (!bio_integrity_endio(bio))
|
||||
return;
|
||||
|
||||
if (bio->bi_bdev)
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
|
||||
rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
|
||||
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
|
@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
|
||||
if (preloaded)
|
||||
radix_tree_preload_end();
|
||||
|
||||
ret = blk_iolatency_init(q);
|
||||
if (ret)
|
||||
goto err_destroy_all;
|
||||
|
||||
ret = blk_ioprio_init(q);
|
||||
if (ret)
|
||||
goto err_destroy_all;
|
||||
@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
|
||||
if (ret)
|
||||
goto err_destroy_all;
|
||||
|
||||
ret = blk_iolatency_init(q);
|
||||
if (ret) {
|
||||
blk_throtl_exit(q);
|
||||
goto err_destroy_all;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_all:
|
||||
@ -1364,10 +1366,14 @@ enomem:
|
||||
/* alloc failed, nothing's initialized yet, free everything */
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||
struct blkcg *blkcg = blkg->blkcg;
|
||||
|
||||
spin_lock(&blkcg->lock);
|
||||
if (blkg->pd[pol->plid]) {
|
||||
pol->pd_free_fn(blkg->pd[pol->plid]);
|
||||
blkg->pd[pol->plid] = NULL;
|
||||
}
|
||||
spin_unlock(&blkcg->lock);
|
||||
}
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
ret = -ENOMEM;
|
||||
@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
||||
__clear_bit(pol->plid, q->blkcg_pols);
|
||||
|
||||
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||
struct blkcg *blkcg = blkg->blkcg;
|
||||
|
||||
spin_lock(&blkcg->lock);
|
||||
if (blkg->pd[pol->plid]) {
|
||||
if (pol->pd_offline_fn)
|
||||
pol->pd_offline_fn(blkg->pd[pol->plid]);
|
||||
pol->pd_free_fn(blkg->pd[pol->plid]);
|
||||
blkg->pd[pol->plid] = NULL;
|
||||
}
|
||||
spin_unlock(&blkcg->lock);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
|
@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
|
||||
*/
|
||||
void blk_integrity_unregister(struct gendisk *disk)
|
||||
{
|
||||
struct blk_integrity *bi = &disk->queue->integrity;
|
||||
|
||||
if (!bi->profile)
|
||||
return;
|
||||
|
||||
/* ensure all bios are off the integrity workqueue */
|
||||
blk_flush_integrity();
|
||||
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
|
||||
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
|
||||
memset(bi, 0, sizeof(*bi));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_integrity_unregister);
|
||||
|
||||
|
@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
|
||||
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
rq = tags->rqs[bitnr];
|
||||
if (!rq || !refcount_inc_not_zero(&rq->ref))
|
||||
if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
|
||||
rq = NULL;
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
return rq;
|
||||
|
23
block/bsg.c
23
block/bsg.c
@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static void bsg_device_release(struct device *dev)
|
||||
{
|
||||
struct bsg_device *bd = container_of(dev, struct bsg_device, device);
|
||||
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
kfree(bd);
|
||||
}
|
||||
|
||||
void bsg_unregister_queue(struct bsg_device *bd)
|
||||
{
|
||||
if (bd->queue->kobj.sd)
|
||||
sysfs_remove_link(&bd->queue->kobj, "bsg");
|
||||
cdev_device_del(&bd->cdev, &bd->device);
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
kfree(bd);
|
||||
put_device(&bd->device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
|
||||
|
||||
@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOSPC)
|
||||
dev_err(parent, "bsg: too many bsg devices\n");
|
||||
goto out_kfree;
|
||||
kfree(bd);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
bd->device.devt = MKDEV(bsg_major, ret);
|
||||
bd->device.class = bsg_class;
|
||||
bd->device.parent = parent;
|
||||
bd->device.release = bsg_device_release;
|
||||
dev_set_name(&bd->device, "%s", name);
|
||||
device_initialize(&bd->device);
|
||||
|
||||
@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
bd->cdev.owner = THIS_MODULE;
|
||||
ret = cdev_device_add(&bd->cdev, &bd->device);
|
||||
if (ret)
|
||||
goto out_ida_remove;
|
||||
goto out_put_device;
|
||||
|
||||
if (q->kobj.sd) {
|
||||
ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
|
||||
@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
|
||||
out_device_del:
|
||||
cdev_device_del(&bd->cdev, &bd->device);
|
||||
out_ida_remove:
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
out_kfree:
|
||||
kfree(bd);
|
||||
out_put_device:
|
||||
put_device(&bd->device);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_register_queue);
|
||||
|
21
block/fops.c
21
block/fops.c
@ -14,6 +14,7 @@
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/fs.h>
|
||||
#include "blk.h"
|
||||
|
||||
static struct inode *bdev_file_inode(struct file *file)
|
||||
@ -553,7 +554,8 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
loff_t len)
|
||||
{
|
||||
struct block_device *bdev = I_BDEV(bdev_file_inode(file));
|
||||
struct inode *inode = bdev_file_inode(file);
|
||||
struct block_device *bdev = I_BDEV(inode);
|
||||
loff_t end = start + len - 1;
|
||||
loff_t isize;
|
||||
int error;
|
||||
@ -580,10 +582,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
error = truncate_bdev_range(bdev, file->f_mode, start, end);
|
||||
if (error)
|
||||
return error;
|
||||
goto fail;
|
||||
|
||||
switch (mode) {
|
||||
case FALLOC_FL_ZERO_RANGE:
|
||||
@ -600,17 +604,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
GFP_KERNEL, 0);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
error = -EOPNOTSUPP;
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Invalidate the page cache again; if someone wandered in and dirtied
|
||||
* a page, we just discard it - userspace has no way of knowing whether
|
||||
* the write happened before or after discard completing...
|
||||
*/
|
||||
return truncate_bdev_range(bdev, file->f_mode, start, end);
|
||||
fail:
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
return error;
|
||||
}
|
||||
|
||||
const struct file_operations def_blk_fops = {
|
||||
|
@ -284,8 +284,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
||||
#define should_use_kmap(pfn) page_is_ram(pfn)
|
||||
#endif
|
||||
|
||||
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
|
||||
bool memory)
|
||||
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
@ -295,8 +294,7 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
|
||||
return NULL;
|
||||
return (void __iomem __force *)kmap(pfn_to_page(pfn));
|
||||
} else
|
||||
return memory ? acpi_os_memmap(pg_off, pg_sz) :
|
||||
acpi_os_ioremap(pg_off, pg_sz);
|
||||
return acpi_os_ioremap(pg_off, pg_sz);
|
||||
}
|
||||
|
||||
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
||||
@ -311,10 +309,9 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __acpi_os_map_iomem - Get a virtual address for a given physical address range.
|
||||
* acpi_os_map_iomem - Get a virtual address for a given physical address range.
|
||||
* @phys: Start of the physical address range to map.
|
||||
* @size: Size of the physical address range to map.
|
||||
* @memory: true if remapping memory, false if IO
|
||||
*
|
||||
* Look up the given physical address range in the list of existing ACPI memory
|
||||
* mappings. If found, get a reference to it and return a pointer to it (its
|
||||
@ -324,8 +321,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
||||
* During early init (when acpi_permanent_mmap has not been set yet) this
|
||||
* routine simply calls __acpi_map_table() to get the job done.
|
||||
*/
|
||||
static void __iomem __ref
|
||||
*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
|
||||
void __iomem __ref
|
||||
*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
struct acpi_ioremap *map;
|
||||
void __iomem *virt;
|
||||
@ -356,7 +353,7 @@ static void __iomem __ref
|
||||
|
||||
pg_off = round_down(phys, PAGE_SIZE);
|
||||
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
|
||||
virt = acpi_map(phys, size, memory);
|
||||
virt = acpi_map(phys, size);
|
||||
if (!virt) {
|
||||
mutex_unlock(&acpi_ioremap_lock);
|
||||
kfree(map);
|
||||
@ -375,17 +372,11 @@ out:
|
||||
mutex_unlock(&acpi_ioremap_lock);
|
||||
return map->virt + (phys - map->phys);
|
||||
}
|
||||
|
||||
void __iomem *__ref
|
||||
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return __acpi_os_map_iomem(phys, size, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
|
||||
|
||||
void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return (void *)__acpi_os_map_iomem(phys, size, true);
|
||||
return (void *)acpi_os_map_iomem(phys, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
|
||||
|
||||
|
@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
|
||||
}
|
||||
|
||||
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
struct binder_thread *thread,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t failed_at,
|
||||
bool is_failure)
|
||||
@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
&proc->alloc, &fd, buffer,
|
||||
offset, sizeof(fd));
|
||||
WARN_ON(err);
|
||||
if (!err)
|
||||
if (!err) {
|
||||
binder_deferred_fd_close(fd);
|
||||
/*
|
||||
* Need to make sure the thread goes
|
||||
* back to userspace to complete the
|
||||
* deferred close
|
||||
*/
|
||||
if (thread)
|
||||
thread->looper_need_return = true;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
if (reply) {
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (target_thread->is_dead || target_proc->is_frozen) {
|
||||
return_error = target_thread->is_dead ?
|
||||
BR_DEAD_REPLY : BR_FROZEN_REPLY;
|
||||
if (target_thread->is_dead) {
|
||||
return_error = BR_DEAD_REPLY;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
goto err_dead_proc_or_thread;
|
||||
}
|
||||
@ -3105,7 +3113,7 @@ err_bad_parent:
|
||||
err_copy_data_failed:
|
||||
binder_free_txn_fixups(t);
|
||||
trace_binder_transaction_failed_buffer_release(t->buffer);
|
||||
binder_transaction_buffer_release(target_proc, t->buffer,
|
||||
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
|
||||
buffer_offset, true);
|
||||
if (target_node)
|
||||
binder_dec_node_tmpref(target_node);
|
||||
@ -3184,7 +3192,9 @@ err_invalid_target_handle:
|
||||
* Cleanup buffer and free it.
|
||||
*/
|
||||
static void
|
||||
binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
|
||||
binder_free_buf(struct binder_proc *proc,
|
||||
struct binder_thread *thread,
|
||||
struct binder_buffer *buffer)
|
||||
{
|
||||
binder_inner_proc_lock(proc);
|
||||
if (buffer->transaction) {
|
||||
@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
|
||||
binder_node_inner_unlock(buf_node);
|
||||
}
|
||||
trace_binder_transaction_buffer_release(buffer);
|
||||
binder_transaction_buffer_release(proc, buffer, 0, false);
|
||||
binder_transaction_buffer_release(proc, thread, buffer, 0, false);
|
||||
binder_alloc_free_buf(&proc->alloc, buffer);
|
||||
}
|
||||
|
||||
@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
proc->pid, thread->pid, (u64)data_ptr,
|
||||
buffer->debug_id,
|
||||
buffer->transaction ? "active" : "finished");
|
||||
binder_free_buf(proc, buffer);
|
||||
binder_free_buf(proc, thread, buffer);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -4107,7 +4117,7 @@ retry:
|
||||
buffer->transaction = NULL;
|
||||
binder_cleanup_transaction(t, "fd fixups failed",
|
||||
BR_FAILED_REPLY);
|
||||
binder_free_buf(proc, buffer);
|
||||
binder_free_buf(proc, thread, buffer);
|
||||
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
||||
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
|
||||
proc->pid, thread->pid,
|
||||
@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
|
||||
{
|
||||
struct rb_node *n;
|
||||
struct binder_thread *thread;
|
||||
|
||||
if (proc->outstanding_txns > 0)
|
||||
return true;
|
||||
|
||||
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
|
||||
thread = rb_entry(n, struct binder_thread, rb_node);
|
||||
if (thread->transaction_stack)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int binder_ioctl_freeze(struct binder_freeze_info *info,
|
||||
struct binder_proc *target_proc)
|
||||
{
|
||||
@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
|
||||
(!target_proc->outstanding_txns),
|
||||
msecs_to_jiffies(info->timeout_ms));
|
||||
|
||||
if (!ret && target_proc->outstanding_txns)
|
||||
ret = -EAGAIN;
|
||||
/* Check pending transactions that wait for reply */
|
||||
if (ret >= 0) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (binder_txns_pending_ilocked(target_proc))
|
||||
ret = -EAGAIN;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
|
||||
{
|
||||
struct binder_proc *target_proc;
|
||||
bool found = false;
|
||||
__u32 txns_pending;
|
||||
|
||||
info->sync_recv = 0;
|
||||
info->async_recv = 0;
|
||||
@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
|
||||
if (target_proc->pid == info->pid) {
|
||||
found = true;
|
||||
binder_inner_proc_lock(target_proc);
|
||||
info->sync_recv |= target_proc->sync_recv;
|
||||
txns_pending = binder_txns_pending_ilocked(target_proc);
|
||||
info->sync_recv |= target_proc->sync_recv |
|
||||
(txns_pending << 1);
|
||||
info->async_recv |= target_proc->async_recv;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
|
@ -378,6 +378,8 @@ struct binder_ref {
|
||||
* binder transactions
|
||||
* (protected by @inner_lock)
|
||||
* @sync_recv: process received sync transactions since last frozen
|
||||
* bit 0: received sync transaction after being frozen
|
||||
* bit 1: new pending sync transaction during freezing
|
||||
* (protected by @inner_lock)
|
||||
* @async_recv: process received async transactions since last frozen
|
||||
* (protected by @inner_lock)
|
||||
|
@ -264,7 +264,7 @@ void __init numa_free_distance(void)
|
||||
size = numa_distance_cnt * numa_distance_cnt *
|
||||
sizeof(numa_distance[0]);
|
||||
|
||||
memblock_free(__pa(numa_distance), size);
|
||||
memblock_free_ptr(numa_distance, size);
|
||||
numa_distance_cnt = 0;
|
||||
numa_distance = NULL;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/mc146818rtc.h>
|
||||
|
||||
@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
|
||||
const char *file = *(const char **)(tracedata + 2);
|
||||
unsigned int user_hash_value, file_hash_value;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return;
|
||||
|
||||
user_hash_value = user % USERHASH;
|
||||
file_hash_value = hash_string(lineno, file, FILEHASH);
|
||||
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
|
||||
@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
|
||||
|
||||
static int __init early_resume_init(void)
|
||||
{
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
hash_value_early_read = read_magic_time();
|
||||
register_pm_notifier(&pm_trace_nb);
|
||||
return 0;
|
||||
@ -277,6 +284,9 @@ static int __init late_resume_init(void)
|
||||
unsigned int val = hash_value_early_read;
|
||||
unsigned int user, file, dev;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
user = val % USERHASH;
|
||||
val = val / USERHASH;
|
||||
file = val % FILEHASH;
|
||||
|
@ -1116,6 +1116,9 @@ int device_create_managed_software_node(struct device *dev,
|
||||
to_swnode(fwnode)->managed = true;
|
||||
set_secondary_fwnode(dev, fwnode);
|
||||
|
||||
if (device_is_registered(dev))
|
||||
software_node_notify(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_create_managed_software_node);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user