Merge branch 'core/urgent' into x86/asm, to pick up dependency

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-10-14 10:11:45 +02:00
commit 6edcf57233
480 changed files with 6917 additions and 3019 deletions

View File

@ -39,8 +39,8 @@ up.
Although MT wq wasted a lot of resource, the level of concurrency
provided was unsatisfactory. The limitation was common to both ST and
MT wq albeit less severe on MT. Each wq maintained its own separate
worker pool. A MT wq could provide only one execution context per CPU
while a ST wq one for the whole system. Work items had to compete for
worker pool. An MT wq could provide only one execution context per CPU
while an ST wq one for the whole system. Work items had to compete for
those very limited execution contexts leading to various problems
including proneness to deadlocks around the single execution context.
@ -151,7 +151,7 @@ Application Programming Interface (API)
``alloc_workqueue()`` allocates a wq. The original
``create_*workqueue()`` functions are deprecated and scheduled for
removal. ``alloc_workqueue()`` takes three arguments - @``name``,
removal. ``alloc_workqueue()`` takes three arguments - ``@name``,
``@flags`` and ``@max_active``. ``@name`` is the name of the wq and
also used as the name of the rescuer thread if there is one.
@ -197,7 +197,7 @@ resources, scheduled and executed.
served by worker threads with elevated nice level.
Note that normal and highpri worker-pools don't interact with
each other. Each maintain its separate pool of workers and
each other. Each maintains its separate pool of workers and
implements concurrency management among its workers.
``WQ_CPU_INTENSIVE``
@ -249,8 +249,8 @@ unbound worker-pools and only one work item could be active at any given
time thus achieving the same ordering property as ST wq.
In the current implementation the above configuration only guarantees
ST behavior within a given NUMA node. Instead alloc_ordered_queue should
be used to achieve system wide ST behavior.
ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should
be used to achieve system-wide ST behavior.
Example Execution Scenarios

View File

@ -32,8 +32,6 @@ cpufreq-stats.txt - General description of sysfs cpufreq stats.
index.txt - File index, Mailing list and Links (this document)
intel-pstate.txt - Intel pstate cpufreq driver specific file.
pcc-cpufreq.txt - PCC cpufreq driver specific file.

View File

@ -32,7 +32,7 @@ Example:
compatible = "st,stm32h743-rcc", "st,stm32-rcc";
reg = <0x58024400 0x400>;
#reset-cells = <1>;
#clock-cells = <2>;
#clock-cells = <1>;
clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>;
st,syscfg = <&pwrcfg>;

View File

@ -15,11 +15,14 @@ Required properties
compatible : Must be "ams,as3645a".
reg : The I2C address of the device. Typically 0x30.
#address-cells : 1
#size-cells : 0
Required properties of the "flash" child node
=============================================
Required properties of the flash child node (0)
===============================================
reg: 0
flash-timeout-us: Flash timeout in microseconds. The value must be in
the range [100000, 850000] and divisible by 50000.
flash-max-microamp: Maximum flash current in microamperes. Has to be
@ -33,20 +36,21 @@ ams,input-max-microamp: Maximum flash controller input current. The
and divisible by 50000.
Optional properties of the "flash" child node
=============================================
Optional properties of the flash child node
===========================================
label : The label of the flash LED.
Required properties of the "indicator" child node
=================================================
Required properties of the indicator child node (1)
===================================================
reg: 1
led-max-microamp: Maximum indicator current. The allowed values are
2500, 5000, 7500 and 10000.
Optional properties of the "indicator" child node
=================================================
Optional properties of the indicator child node
===============================================
label : The label of the indicator LED.
@ -55,16 +59,20 @@ Example
=======
as3645a@30 {
#address-cells: 1
#size-cells: 0
reg = <0x30>;
compatible = "ams,as3645a";
flash {
flash@0 {
reg = <0x0>;
flash-timeout-us = <150000>;
flash-max-microamp = <320000>;
led-max-microamp = <60000>;
ams,input-max-microamp = <1750000>;
label = "as3645a:flash";
};
indicator {
indicator@1 {
reg = <0x1>;
led-max-microamp = <10000>;
label = "as3645a:indicator";
};

View File

@ -8,6 +8,12 @@ Required properties:
the firmware event log
- linux,sml-size : size of the memory allocated for the firmware event log
Optional properties:
- powered-while-suspended: present when the TPM is left powered on between
suspend and resume (makes the suspend/resume
callbacks do nothing).
Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C)
----------------------------------------------------------

View File

@ -41,6 +41,8 @@ Required properties:
- "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
- "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
- "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
- "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
- "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
- "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
- "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART.
- "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.

View File

@ -3,8 +3,8 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order.
This isn't an exhaustive list, but you should add new prefixes to it before
using them to avoid name-space collisions.
abcn Abracon Corporation
abilis Abilis Systems
abracon Abracon Corporation
actions Actions Semiconductor Co., Ltd.
active-semi Active-Semi International Inc
ad Avionic Design GmbH

View File

@ -196,12 +196,13 @@ struct driver_attribute {
};
Device drivers can export attributes via their sysfs directories.
Drivers can declare attributes using a DRIVER_ATTR macro that works
identically to the DEVICE_ATTR macro.
Drivers can declare attributes using a DRIVER_ATTR_RW and DRIVER_ATTR_RO
macro that works identically to the DEVICE_ATTR_RW and DEVICE_ATTR_RO
macros.
Example:
DRIVER_ATTR(debug,0644,show_debug,store_debug);
DRIVER_ATTR_RW(debug);
This is equivalent to declaring:

View File

@ -366,7 +366,8 @@ struct driver_attribute {
Declaring:
DRIVER_ATTR(_name, _mode, _show, _store)
DRIVER_ATTR_RO(_name)
DRIVER_ATTR_RW(_name)
Creation/Removal:

View File

@ -1680,6 +1680,9 @@ accept_dad - INTEGER
2: Enable DAD, and disable IPv6 operation if MAC-based duplicate
link-local address has been found.
DAD operation and mode on a given interface will be selected according
to the maximum value of conf/{all,interface}/accept_dad.
force_tllao - BOOLEAN
Enable sending the target link-layer address option even when
responding to a unicast neighbor solicitation.
@ -1727,16 +1730,23 @@ suppress_frag_ndisc - INTEGER
optimistic_dad - BOOLEAN
Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
0: disabled (default)
1: enabled
0: disabled (default)
1: enabled
Optimistic Duplicate Address Detection for the interface will be enabled
if at least one of conf/{all,interface}/optimistic_dad is set to 1,
it will be disabled otherwise.
use_optimistic - BOOLEAN
If enabled, do not classify optimistic addresses as deprecated during
source address selection. Preferred addresses will still be chosen
before optimistic addresses, subject to other ranking in the source
address selection algorithm.
0: disabled (default)
1: enabled
0: disabled (default)
1: enabled
This will be enabled if at least one of
conf/{all,interface}/use_optimistic is set to 1, disabled otherwise.
stable_secret - IPv6 address
This IPv6 address will be used as a secret to generate IPv6

View File

@ -13,42 +13,42 @@ an example setup using a data-center-class switch ASIC chip. Other setups
with SR-IOV or soft switches, such as OVS, are possible.
User-spacetools
User-space tools
userspace|
+-------------------------------------------------------------------+
kernel|Netlink
|
+--------------+-------------------------------+
|Networkstack|
|(Linux)|
||
+----------------------------------------------+
user space |
+-------------------------------------------------------------------+
kernel | Netlink
|
+--------------+-------------------------------+
| Network stack |
| (Linux) |
| |
+----------------------------------------------+
sw1p2 sw1p4 sw1p6
sw1p1 + sw1p3 + sw1p5 + eth1
+|+|+|+
|||||||
+--+----+----+----+-+--+----+---++-----+-----+
|Switchdriver||mgmt|
|(thisdocument)||driver|
||||
+--------------+----------------++-----------+
|
kernel|HWbus(egPCI)
+-------------------------------------------------------------------+
hardware|
+--------------+---+------------+
|Switchdevice (sw1)|
|+----++--------+
||voffloadeddatapath|mgmtport
||||
+--|----|----+----+----+----+---+
||||||
++++++
p1p2p3p4p5p6
sw1p1 + sw1p3 + sw1p5 + eth1
+ | + | + | +
| | | | | | |
+--+----+----+----+----+----+---+ +-----+-----+
| Switch driver | | mgmt |
| (this document) | | driver |
| | | |
+--------------+----------------+ +-----------+
|
kernel | HW bus (eg PCI)
+-------------------------------------------------------------------+
hardware |
+--------------+----------------+
| Switch device (sw1) |
| +----+ +--------+
| | v offloaded data path | mgmt port
| | | |
+--|----|----+----+----+----+---+
| | | | | |
+ + + + + +
p1 p2 p3 p4 p5 p6
front-panelports
front-panel ports
Fig 1.

View File

@ -2865,7 +2865,6 @@ S: Supported
F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
M: Yuval Mintz <Yuval.Mintz@cavium.com>
M: Ariel Elior <ariel.elior@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
@ -6655,8 +6654,8 @@ M: Alexander Aring <alex.aring@gmail.com>
M: Stefan Schmidt <stefan@osg.samsung.com>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
S: Maintained
F: net/ieee802154/
F: net/mac802154/
@ -8598,6 +8597,12 @@ M: Sean Wang <sean.wang@mediatek.com>
S: Maintained
F: drivers/media/rc/mtk-cir.c
MEDIATEK PMIC LED DRIVER
M: Sean Wang <sean.wang@mediatek.com>
S: Maintained
F: drivers/leds/leds-mt6323.c
F: Documentation/devicetree/bindings/leds/leds-mt6323.txt
MEDIATEK ETHERNET DRIVER
M: Felix Fietkau <nbd@openwrt.org>
M: John Crispin <john@phrozen.org>
@ -11059,7 +11064,6 @@ S: Supported
F: drivers/scsi/qedi/
QLOGIC QL4xxx ETHERNET DRIVER
M: Yuval Mintz <Yuval.Mintz@cavium.com>
M: Ariel Elior <Ariel.Elior@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc3
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
$(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
ifdef CONFIG_ORC_UNWINDER
$(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
else
$(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
endif
SKIP_STACK_VALIDATION := 1
export SKIP_STACK_VALIDATION
endif
@ -1172,11 +1176,11 @@ headers_check: headers_install
PHONY += kselftest
kselftest:
$(Q)$(MAKE) -C tools/testing/selftests run_tests
$(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
PHONY += kselftest-clean
kselftest-clean:
$(Q)$(MAKE) -C tools/testing/selftests clean
$(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
PHONY += kselftest-merge
kselftest-merge:

View File

@ -267,15 +267,19 @@
clock-frequency = <400000>;
as3645a@30 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x30>;
compatible = "ams,as3645a";
flash {
flash@0 {
reg = <0x0>;
flash-timeout-us = <150000>;
flash-max-microamp = <320000>;
led-max-microamp = <60000>;
peak-current-limit = <1750000>;
ams,input-max-microamp = <1750000>;
};
indicator {
indicator@1 {
reg = <0x1>;
led-max-microamp = <10000>;
};
};

View File

@ -139,11 +139,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */
#define TIF_SYSCALL_TRACE 5 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 7 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
@ -154,7 +153,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@ -168,9 +166,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_FSCHECK)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */

View File

@ -70,8 +70,6 @@ static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);
}
#define segment_eq(a, b) ((a) == (b))

View File

@ -12,6 +12,7 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
#include <asm/memory.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif
@ -48,12 +49,14 @@ ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK
bne fast_work_pending
tst r1, #_TIF_WORK_MASK
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne fast_work_pending
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
@ -76,16 +79,16 @@ ret_fast_syscall:
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK
bne fast_work_pending
tst r1, #_TIF_WORK_MASK
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
fast_work_pending:
#endif
tst r1, #_TIF_SYSCALL_WORK
@ -111,6 +114,9 @@ ENTRY(ret_to_user)
ret_slow_syscall:
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne slow_work_pending

View File

@ -614,10 +614,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
* Update the trace code with the current status.
*/
trace_hardirqs_off();
/* Check valid user FS if needed */
addr_limit_user_check();
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
@ -678,3 +674,9 @@ struct page *get_signal_page(void)
return page;
}
/* Defer to generic check */
asmlinkage void addr_limit_check_failed(void)
{
addr_limit_user_check();
}

View File

@ -401,7 +401,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))

View File

@ -384,6 +384,7 @@ ENTRY(kimage_vaddr)
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq 1f

View File

@ -751,10 +751,10 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
*/
trace_hardirqs_off();
/* Check valid user FS if needed */
addr_limit_user_check();
do {
/* Check valid user FS if needed */
addr_limit_user_check();
if (thread_flags & _TIF_NEED_RESCHED) {
schedule();
} else {

View File

@ -651,7 +651,7 @@ static const struct fault_info fault_info[] = {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },

View File

@ -39,7 +39,7 @@ config MICROBLAZE
# Endianness selection
choice
prompt "Endianness selection"
default CPU_BIG_ENDIAN
default CPU_LITTLE_ENDIAN
help
microblaze architectures can be configured for either little or
big endian formats. Be sure to select the appropriate mode.

View File

@ -7,6 +7,7 @@ generic-y += fcntl.h
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += kvm_para.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h

View File

@ -165,7 +165,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;

View File

@ -257,6 +257,18 @@ config PARISC_PAGE_SIZE_64KB
endchoice
config PARISC_SELF_EXTRACT
bool "Build kernel as self-extracting executable"
default y
help
Say Y if you want to build the parisc kernel as a kind of
self-extracting executable.
If you say N here, the kernel will be compressed with gzip
which can be loaded by the palo bootloader directly too.
If you don't know what to do here, say Y.
config SMP
bool "Symmetric multi-processing support"
---help---

View File

@ -129,8 +129,13 @@ Image: vmlinux
bzImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
ifdef CONFIG_PARISC_SELF_EXTRACT
vmlinuz: bzImage
$(OBJCOPY) $(boot)/bzImage $@
else
vmlinuz: vmlinux
@gzip -cf -9 $< > $@
endif
install:
$(CONFIG_SHELL) $(src)/arch/parisc/install.sh \

View File

@ -15,7 +15,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs
KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
ifndef CONFIG_64BIT
KBUILD_CFLAGS += -mfast-indirect-calls
endif

View File

@ -24,7 +24,8 @@
/* Symbols defined by linker scripts */
extern char input_data[];
extern int input_len;
extern __le32 output_len; /* at unaligned address, little-endian */
/* output_len is inserted by the linker possibly at an unaligned address */
extern __le32 output_len __aligned(1);
extern char _text, _end;
extern char _bss, _ebss;
extern char _startcode_end;

View File

@ -280,6 +280,7 @@ void setup_pdc(void); /* in inventory.c */
/* wrapper-functions from pdc.c */
int pdc_add_valid(unsigned long address);
int pdc_instr(unsigned int *instr);
int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len);
int pdc_chassis_disp(unsigned long disp);
int pdc_chassis_warn(unsigned long *warn);

View File

@ -1,6 +1,7 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
extern int init_per_cpu(int cpuid);
#if defined(CONFIG_SMP)

View File

@ -232,6 +232,26 @@ int pdc_add_valid(unsigned long address)
}
EXPORT_SYMBOL(pdc_add_valid);
/**
* pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler.
* @instr: Pointer to variable which will get instruction opcode.
*
* The return value is PDC_OK (0) in case call succeeded.
*/
int __init pdc_instr(unsigned int *instr)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result));
convert_to_wide(pdc_result);
*instr = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_chassis_info - Return chassis information.
* @result: The return buffer.

View File

@ -15,6 +15,7 @@
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/kthread.h>
#include <linux/initrd.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
@ -216,8 +217,16 @@ void __init pdc_pdt_init(void)
}
for (i = 0; i < pdt_status.pdt_entries; i++) {
unsigned long addr;
report_mem_err(pdt_entry[i]);
addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK;
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
addr >= initrd_start && addr < initrd_end)
pr_crit("CRITICAL: initrd possibly broken "
"due to bad memory!\n");
/* mark memory page bad */
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
}

View File

@ -317,7 +317,7 @@ void __init collect_boot_cpu_data(void)
*
* o Enable CPU profiling hooks.
*/
int init_per_cpu(int cpunum)
int __init init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;

View File

@ -38,6 +38,7 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/start_kernel.h>
#include <asm/processor.h>
#include <asm/sections.h>
@ -48,6 +49,7 @@
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/unwind.h>
#include <asm/smp.h>
static char __initdata command_line[COMMAND_LINE_SIZE];
@ -115,7 +117,6 @@ void __init dma_ops_init(void)
}
#endif
extern int init_per_cpu(int cpuid);
extern void collect_boot_cpu_data(void);
void __init setup_arch(char **cmdline_p)
@ -398,9 +399,8 @@ static int __init parisc_init(void)
}
arch_initcall(parisc_init);
void start_parisc(void)
void __init start_parisc(void)
{
extern void start_kernel(void);
extern void early_trap_init(void);
int ret, cpunum;

View File

@ -255,12 +255,11 @@ void arch_send_call_function_single_ipi(int cpu)
static void __init
smp_cpu_init(int cpunum)
{
extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
/* Set modes and Enable floating point coprocessor */
(void) init_per_cpu(cpunum);
init_per_cpu(cpunum);
disable_sr_hashing();

View File

@ -817,7 +817,7 @@ void __init initialize_ivt(const void *iva)
u32 check = 0;
u32 *ivap;
u32 *hpmcp;
u32 length;
u32 length, instr;
if (strcmp((const char *)iva, "cows can fly"))
panic("IVT invalid");
@ -827,6 +827,14 @@ void __init initialize_ivt(const void *iva)
for (i = 0; i < 8; i++)
*ivap++ = 0;
/*
* Use PDC_INSTR firmware function to get instruction that invokes
* PDCE_CHECK in HPMC handler. See programming note at page 1-31 of
* the PA 1.1 Firmware Architecture document.
*/
if (pdc_instr(&instr) == PDC_OK)
ivap[0] = instr;
/* Compute Checksum for HPMC handler */
length = os_hpmc_size;
ivap[7] = length;

View File

@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/sort.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/assembly.h>
@ -279,6 +280,17 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
info->prev_sp = sp - 64;
info->prev_ip = 0;
/* The stack is at the end inside the thread_union
* struct. If we reach data, we have reached the
* beginning of the stack and should stop unwinding. */
if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
info->prev_sp < ((unsigned long) task_thread_info(info->t)
+ THREAD_SZ_ALGN)) {
info->prev_sp = 0;
break;
}
if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
break;
info->prev_ip = tmp;

View File

@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/traps.h>
@ -261,7 +262,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long acc_type;
int fault;
int fault = 0;
unsigned int flags;
if (faulthandler_disabled())
@ -315,7 +316,8 @@ good_area:
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
goto bad_area;
BUG();
}
@ -352,8 +354,7 @@ bad_area:
if (user_mode(regs)) {
struct siginfo si;
show_signal_msg(regs, code, address, tsk, vma);
unsigned int lsb = 0;
switch (code) {
case 15: /* Data TLB miss fault/Data page fault */
@ -386,6 +387,30 @@ bad_area:
si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
break;
}
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n",
tsk->comm, tsk->pid, address);
si.si_signo = SIGBUS;
si.si_code = BUS_MCEERR_AR;
}
#endif
/*
* Either small page or large page may be poisoned.
* In other words, VM_FAULT_HWPOISON_LARGE and
* VM_FAULT_HWPOISON are mutually exclusive.
*/
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
else if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
else
show_signal_msg(regs, code, address, tsk, vma);
si.si_addr_lsb = lsb;
si.si_errno = 0;
si.si_addr = (void __user *) address;
force_sig_info(si.si_signo, &si, current);

View File

@ -1121,6 +1121,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
mtspr SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Move canary into DSISR to check for later */
BEGIN_FTR_SECTION
li r0, 0x7fff
mtspr SPRN_HDSISR, r0
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r0, VCPU_GPR(R0)(r4)
ld r4, VCPU_GPR(R4)(r4)
@ -1956,9 +1963,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
kvmppc_hdsi:
ld r3, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r3)
cmpwi r0, 0
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
BEGIN_FTR_SECTION
/* Look for DSISR canary. If we find it, retry instruction */
cmpdi r6, 0x7fff
beq 6f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
cmpwi r0, 0
bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h

View File

@ -98,7 +98,7 @@ static struct clocksource timer_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init timer_setup(void)
static void __init um_timer_setup(void)
{
int err;
@ -132,5 +132,5 @@ void read_persistent_clock(struct timespec *ts)
void __init time_init(void)
{
timer_set_signal_handler();
late_time_init = timer_setup;
late_time_init = um_timer_setup;
}

View File

@ -552,6 +552,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
@ -560,6 +561,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);

View File

@ -775,6 +775,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init),
{},
};

View File

@ -3462,7 +3462,7 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
static struct intel_uncore_type skx_uncore_iio = {
.name = "iio",
.num_counters = 4,
.num_boxes = 5,
.num_boxes = 6,
.perf_ctr_bits = 48,
.event_ctl = SKX_IIO0_MSR_PMON_CTL0,
.perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
@ -3492,7 +3492,7 @@ static const struct attribute_group skx_uncore_format_group = {
static struct intel_uncore_type skx_uncore_irp = {
.name = "irp",
.num_counters = 2,
.num_boxes = 5,
.num_boxes = 6,
.perf_ctr_bits = 48,
.event_ctl = SKX_IRP0_MSR_PMON_CTL0,
.perf_ctr = SKX_IRP0_MSR_PMON_CTR0,

View File

@ -63,6 +63,14 @@ static bool test_intel(int idx)
case INTEL_FAM6_ATOM_SILVERMONT1:
case INTEL_FAM6_ATOM_SILVERMONT2:
case INTEL_FAM6_ATOM_AIRMONT:
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_DENVERTON:
case INTEL_FAM6_ATOM_GEMINI_LAKE:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
if (idx == PERF_MSR_SMI)
return true;
break;

View File

@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
if (fpu->fpstate_active) {
if (fpu->initialized) {
unsigned long fx_aligned, math_size;
sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);

View File

@ -218,10 +218,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
output, input...) \
{ \
register void *__sp asm(_ASM_SP); \
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2) \
: output, "+r" (__sp) \
: output, ASM_CALL_CONSTRAINT \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input); \
}

View File

@ -11,10 +11,12 @@
# define __ASM_FORM_COMMA(x) " " #x ","
#endif
#ifdef CONFIG_X86_32
#ifndef __x86_64__
/* 32 bit */
# define __ASM_SEL(a,b) __ASM_FORM(a)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
/* 64 bit */
# define __ASM_SEL(a,b) __ASM_FORM(b)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
@ -132,4 +134,15 @@
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
#ifndef __ASSEMBLY__
/*
* This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer
* gets set up by the containing function. If you forget to do this, objtool
* may print a "call without frame pointer save/setup" warning.
*/
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif
#endif /* _ASM_X86_ASM_H */

View File

@ -23,11 +23,9 @@
/*
* High level FPU state handling functions:
*/
extern void fpu__activate_curr(struct fpu *fpu);
extern void fpu__activate_fpstate_read(struct fpu *fpu);
extern void fpu__activate_fpstate_write(struct fpu *fpu);
extern void fpu__current_fpstate_write_begin(void);
extern void fpu__current_fpstate_write_end(void);
extern void fpu__initialize(struct fpu *fpu);
extern void fpu__prepare_read(struct fpu *fpu);
extern void fpu__prepare_write(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu);
extern void fpu__restore(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
err; \
})
#define check_insn(insn, output, input...) \
({ \
int err; \
#define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err), output \
: "0"(0), input); \
err; \
})
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
: output : input)
static inline int copy_fregs_to_user(struct fregs_state __user *fx)
{
@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
{
int err;
if (IS_ENABLED(CONFIG_X86_32)) {
err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
/* See comment in copy_fxregs_to_kernel() below. */
err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
}
}
/* Copying from a kernel buffer to FPU registers should never fail: */
WARN_ON_FPU(err);
}
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
static inline void copy_kernel_to_fregs(struct fregs_state *fx)
{
int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
WARN_ON_FPU(err);
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
* XSAVE area format.
*/
#define XSTATE_XRESTORE(st, lmask, hmask, err) \
#define XSTATE_XRESTORE(st, lmask, hmask) \
asm volatile(ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
".pushsection .fixup,\"ax\"\n" \
"4: movl $-2, %[err]\n" \
"jmp 3b\n" \
".popsection\n" \
_ASM_EXTABLE(661b, 4b) \
: [err] "=r" (err) \
_ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
: \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
/* We should never fault when copying from a kernel buffer: */
/*
* We should never fault when copying from a kernel buffer, and the FPU
* state we set at boot time should be valid.
*/
WARN_ON_FPU(err);
}
@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
u32 hmask = mask >> 32;
int err;
WARN_ON(!alternatives_patched);
WARN_ON_FPU(!alternatives_patched);
XSTATE_XSAVE(xstate, lmask, hmask, err);
@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
XSTATE_XRESTORE(xstate, lmask, hmask, err);
/* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err);
XSTATE_XRESTORE(xstate, lmask, hmask);
}
/*
@ -526,37 +503,16 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
*/
static inline void fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
trace_x86_fpu_regs_deactivated(fpu);
}
static inline void fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
trace_x86_fpu_regs_activated(fpu);
}
/*
* The question "does this thread have fpu access?"
* is slightly racy, since preemption could come in
* and revoke it immediately after the test.
*
* However, even in that very unlikely scenario,
* we can just assume we have FPU access - typically
* to save the FP state - we'll just take a #NM
* fault and get the FPU access back.
*/
static inline int fpregs_active(void)
{
return current->thread.fpu.fpregs_active;
}
/*
* FPU state switching for scheduling.
*
@ -571,14 +527,13 @@ static inline int fpregs_active(void)
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (old_fpu->fpregs_active) {
if (old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
old_fpu->last_cpu = cpu;
/* But leave fpu_fpregs_owner_ctx! */
old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
bool preload = static_cpu_has(X86_FEATURE_FPU) &&
new_fpu->fpstate_active;
new_fpu->initialized;
if (preload) {
if (!fpregs_state_valid(new_fpu, cpu))
@ -617,8 +572,7 @@ static inline void user_fpu_begin(void)
struct fpu *fpu = &current->thread.fpu;
preempt_disable();
if (!fpregs_active())
fpregs_activate(fpu);
fpregs_activate(fpu);
preempt_enable();
}

View File

@ -68,6 +68,9 @@ struct fxregs_state {
/* Default value for fxregs_state.mxcsr: */
#define MXCSR_DEFAULT 0x1f80
/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */
#define MXCSR_AND_FLAGS_SIZE sizeof(u64)
/*
* Software based FPU emulation state. This is arbitrary really,
* it matches the x87 format to make it easier to understand:
@ -290,36 +293,13 @@ struct fpu {
unsigned int last_cpu;
/*
* @fpstate_active:
* @initialized:
*
* This flag indicates whether this context is active: if the task
* This flag indicates whether this context is initialized: if the task
* is not running then we can restore from this context, if the task
* is running then we should save into this context.
*/
unsigned char fpstate_active;
/*
* @fpregs_active:
*
* This flag determines whether a given context is actively
* loaded into the FPU's registers and that those registers
* represent the task's current FPU state.
*
* Note the interaction with fpstate_active:
*
* # task does not use the FPU:
* fpstate_active == 0
*
* # task uses the FPU and regs are active:
* fpstate_active == 1 && fpregs_active == 1
*
* # the regs are inactive but still match fpstate:
* fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
*
* The third state is what we use for the lazy restore optimization
* on lazy-switching CPUs.
*/
unsigned char fpregs_active;
unsigned char initialized;
/*
* @state:

View File

@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void);
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void);
int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
void __user *ubuf, struct xregs_state *xsave);
int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
struct xregs_state *xsave);
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
extern int validate_xstate_header(const struct xstate_header *hdr);
#endif

View File

@ -286,6 +286,32 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
return __pkru_allows_pkey(vma_pkey(vma), write);
}
/*
* If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
* bits. This serves two purposes. It prevents a nasty situation in
* which PCID-unaware code saves CR3, loads some other value (with PCID
* == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
* the saved ASID was nonzero. It also means that any bugs involving
* loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
* deterministically.
*/
static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
{
if (static_cpu_has(X86_FEATURE_PCID)) {
VM_WARN_ON_ONCE(asid > 4094);
return __sme_pa(mm->pgd) | (asid + 1);
} else {
VM_WARN_ON_ONCE(asid != 0);
return __sme_pa(mm->pgd);
}
}
static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
{
VM_WARN_ON_ONCE(asid > 4094);
return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
}
/*
* This can be used from process context to figure out what the value of
@ -296,10 +322,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
*/
static inline unsigned long __get_current_cr3_fast(void)
{
unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
if (static_cpu_has(X86_FEATURE_PCID))
cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid);
unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
this_cpu_read(cpu_tlbstate.loaded_mm_asid));
/* For now, be very restrictive about when this can be called. */
VM_WARN_ON(in_nmi() || preemptible());

View File

@ -179,7 +179,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
u64 input_address = input ? virt_to_phys(input) : 0;
u64 output_address = output ? virt_to_phys(output) : 0;
u64 hv_status;
register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64
if (!hv_hypercall_pg)
@ -187,7 +186,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
__asm__ __volatile__("mov %4, %%r8\n"
"call *%5"
: "=a" (hv_status), "+r" (__sp),
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input_address)
: "r" (output_address), "m" (hv_hypercall_pg)
: "cc", "memory", "r8", "r9", "r10", "r11");
@ -202,7 +201,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
__asm__ __volatile__("call *%7"
: "=A" (hv_status),
"+c" (input_address_lo), "+r" (__sp)
"+c" (input_address_lo), ASM_CALL_CONSTRAINT
: "A" (control),
"b" (input_address_hi),
"D"(output_address_hi), "S"(output_address_lo),
@ -224,12 +223,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
{
u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64
{
__asm__ __volatile__("call *%4"
: "=a" (hv_status), "+r" (__sp),
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input1)
: "m" (hv_hypercall_pg)
: "cc", "r8", "r9", "r10", "r11");
@ -242,7 +240,7 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
__asm__ __volatile__ ("call *%5"
: "=A"(hv_status),
"+c"(input1_lo),
"+r"(__sp)
ASM_CALL_CONSTRAINT
: "A" (control),
"b" (input1_hi),
"m" (hv_hypercall_pg)

View File

@ -459,8 +459,8 @@ int paravirt_disable_iospace(void);
*/
#ifdef CONFIG_X86_32
#define PVOP_VCALL_ARGS \
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \
register void *__sp asm("esp")
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
@ -480,8 +480,8 @@ int paravirt_disable_iospace(void);
/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx, __eax = __eax; \
register void *__sp asm("rsp")
__edx = __edx, __ecx = __ecx, __eax = __eax;
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
@ -532,7 +532,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, "+r" (__sp) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
@ -542,7 +542,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, "+r" (__sp) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
@ -569,7 +569,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, "+r" (__sp) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \

View File

@ -100,19 +100,14 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPT
extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() \
({ \
register void *__sp asm(_ASM_SP); \
asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \
})
# define __preempt_schedule() \
asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
extern asmlinkage void preempt_schedule(void);
extern asmlinkage void ___preempt_schedule_notrace(void);
# define __preempt_schedule_notrace() \
({ \
register void *__sp asm(_ASM_SP); \
asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \
})
# define __preempt_schedule_notrace() \
asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT)
extern asmlinkage void preempt_schedule_notrace(void);
#endif

View File

@ -677,8 +677,6 @@ static inline void sync_core(void)
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/
register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_32
asm volatile (
"pushfl\n\t"
@ -686,7 +684,7 @@ static inline void sync_core(void)
"pushl $1f\n\t"
"iret\n\t"
"1:"
: "+r" (__sp) : : "memory");
: ASM_CALL_CONSTRAINT : : "memory");
#else
unsigned int tmp;
@ -703,7 +701,7 @@ static inline void sync_core(void)
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
: "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
: "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
#endif
}

View File

@ -103,7 +103,6 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
({ \
long tmp; \
struct rw_semaphore* ret; \
register void *__sp asm(_ASM_SP); \
\
asm volatile("# beginning down_write\n\t" \
LOCK_PREFIX " xadd %1,(%4)\n\t" \
@ -114,7 +113,8 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
: "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
: "+m" (sem->count), "=d" (tmp), \
"=a" (ret), ASM_CALL_CONSTRAINT \
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \

View File

@ -158,17 +158,6 @@ struct thread_info {
*/
#ifndef __ASSEMBLY__
static inline unsigned long current_stack_pointer(void)
{
unsigned long sp;
#ifdef CONFIG_X86_64
asm("mov %%rsp,%0" : "=g" (sp));
#else
asm("mov %%esp,%0" : "=g" (sp));
#endif
return sp;
}
/*
* Walks up the stack frames to make sure that the specified object is
* entirely contained by a single stack frame.

View File

@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
TP_STRUCT__entry(
__field(struct fpu *, fpu)
__field(bool, fpregs_active)
__field(bool, fpstate_active)
__field(bool, initialized)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
TP_fast_assign(
__entry->fpu = fpu;
__entry->fpregs_active = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
__entry->initialized = fpu->initialized;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
__entry->fpregs_active,
__entry->fpstate_active,
__entry->initialized,
__entry->xfeatures,
__entry->xcomp_bv
)

View File

@ -166,11 +166,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
({ \
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
register void *__sp asm(_ASM_SP); \
__chk_user_ptr(ptr); \
might_fault(); \
asm volatile("call __get_user_%P4" \
: "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
@ -337,7 +337,7 @@ do { \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (retval), "=&A"(x) \
: "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
: "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
"i" (errret), "0" (retval)); \
})

View File

@ -113,10 +113,9 @@ extern struct { char _entry[32]; } hypercall_page[];
register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \
register void *__sp asm(_ASM_SP);
register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
#define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp)
#define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT
#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
@ -552,13 +551,13 @@ static inline void
MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc)
{
u32 *p = (u32 *) &desc;
mcl->op = __HYPERVISOR_update_descriptor;
if (sizeof(maddr) == sizeof(long)) {
mcl->args[0] = maddr;
mcl->args[1] = *(unsigned long *)&desc;
} else {
u32 *p = (u32 *)&desc;
mcl->args[0] = maddr;
mcl->args[1] = maddr >> 32;
mcl->args[2] = *p++;

View File

@ -21,14 +21,6 @@
void __init check_bugs(void)
{
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif
identify_boot_cpu();
if (!IS_ENABLED(CONFIG_SMP)) {

View File

@ -904,6 +904,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
fpu__init_system(c);
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif
}
void __init early_cpu_init(void)

View File

@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)
kernel_fpu_disable();
if (fpu->fpregs_active) {
if (fpu->initialized) {
/*
* Ignore return value -- we don't care if reg state
* is clobbered.
@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
{
struct fpu *fpu = &current->thread.fpu;
if (fpu->fpregs_active)
if (fpu->initialized)
copy_kernel_to_fpregs(&fpu->state);
kernel_fpu_enable();
@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)
preempt_disable();
trace_x86_fpu_before_save(fpu);
if (fpu->fpregs_active) {
if (fpu->initialized) {
if (!copy_fpregs_to_fpstate(fpu)) {
copy_kernel_to_fpregs(&fpu->state);
}
@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init);
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
return 0;
WARN_ON_FPU(src_fpu != &current->thread.fpu);
@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
/*
* Save current FPU registers directly into the child
* FPU context, without any memory-to-memory copying.
* In lazy mode, if the FPU context isn't loaded into
* fpregs, CR0.TS will be set and do_device_not_available
* will load the FPU context.
*
* We have to do all this with preemption disabled,
* mostly because of the FNSAVE case, because in that
* case we must not allow preemption in the window
* between the FNSAVE and us marking the context lazy.
*
* It shouldn't be an issue as even FNSAVE is plenty
* fast in terms of critical section length.
* ( The function 'fails' in the FNSAVE case, which destroys
* register contents so we have to copy them back. )
*/
preempt_disable();
if (!copy_fpregs_to_fpstate(dst_fpu)) {
memcpy(&src_fpu->state, &dst_fpu->state,
fpu_kernel_xstate_size);
memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
trace_x86_fpu_copy_src(src_fpu);
trace_x86_fpu_copy_dst(dst_fpu);
@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Activate the current task's in-memory FPU context,
* if it has not been used before:
*/
void fpu__activate_curr(struct fpu *fpu)
void fpu__initialize(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
if (!fpu->fpstate_active) {
if (!fpu->initialized) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for the current task: */
fpu->fpstate_active = 1;
fpu->initialized = 1;
}
}
EXPORT_SYMBOL_GPL(fpu__activate_curr);
EXPORT_SYMBOL_GPL(fpu__initialize);
/*
* This function must be called before we read a task's fpstate.
*
* If the task has not used the FPU before then initialize its
* fpstate.
* There's two cases where this gets called:
*
* - for the current task (when coredumping), in which case we have
* to save the latest FPU registers into the fpstate,
*
* - or it's called for stopped tasks (ptrace), in which case the
* registers were already saved by the context-switch code when
* the task scheduled out - we only have to initialize the registers
* if they've never been initialized.
*
* If the task has used the FPU before then save it.
*/
void fpu__activate_fpstate_read(struct fpu *fpu)
void fpu__prepare_read(struct fpu *fpu)
{
/*
* If fpregs are active (in the current CPU), then
* copy them to the fpstate:
*/
if (fpu->fpregs_active) {
if (fpu == &current->thread.fpu) {
fpu__save(fpu);
} else {
if (!fpu->fpstate_active) {
if (!fpu->initialized) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for current and for stopped child tasks: */
fpu->fpstate_active = 1;
fpu->initialized = 1;
}
}
}
@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
/*
* This function must be called before we write a task's fpstate.
*
* If the task has used the FPU before then unlazy it.
* If the task has used the FPU before then invalidate any cached FPU registers.
* If the task has not used the FPU before then initialize its fpstate.
*
* After this function call, after registers in the fpstate are
* modified and the child task has woken up, the child task will
* restore the modified FPU state from the modified context. If we
* didn't clear its lazy status here then the lazy in-registers
* didn't clear its cached status here then the cached in-registers
* state pending on its former CPU could be restored, corrupting
* the modifications.
*/
void fpu__activate_fpstate_write(struct fpu *fpu)
void fpu__prepare_write(struct fpu *fpu)
{
/*
* Only stopped child tasks can be used to modify the FPU
@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
*/
WARN_ON_FPU(fpu == &current->thread.fpu);
if (fpu->fpstate_active) {
/* Invalidate any lazy state: */
if (fpu->initialized) {
/* Invalidate any cached state: */
__fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
@ -310,73 +300,10 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
trace_x86_fpu_activate_state(fpu);
/* Safe to do for stopped child tasks: */
fpu->fpstate_active = 1;
fpu->initialized = 1;
}
}
/*
* This function must be called before we write the current
* task's fpstate.
*
* This call gets the current FPU register state and moves
* it in to the 'fpstate'. Preemption is disabled so that
* no writes to the 'fpstate' can occur from context
* swiches.
*
* Must be followed by a fpu__current_fpstate_write_end().
*/
void fpu__current_fpstate_write_begin(void)
{
struct fpu *fpu = &current->thread.fpu;
/*
* Ensure that the context-switching code does not write
* over the fpstate while we are doing our update.
*/
preempt_disable();
/*
* Move the fpregs in to the fpu's 'fpstate'.
*/
fpu__activate_fpstate_read(fpu);
/*
* The caller is about to write to 'fpu'. Ensure that no
* CPU thinks that its fpregs match the fpstate. This
* ensures we will not be lazy and skip a XRSTOR in the
* future.
*/
__fpu_invalidate_fpregs_state(fpu);
}
/*
* This function must be paired with fpu__current_fpstate_write_begin()
*
* This will ensure that the modified fpstate gets placed back in
* the fpregs if necessary.
*
* Note: This function may be called whether or not an _actual_
* write to the fpstate occurred.
*/
void fpu__current_fpstate_write_end(void)
{
struct fpu *fpu = &current->thread.fpu;
/*
* 'fpu' now has an updated copy of the state, but the
* registers may still be out of date. Update them with
* an XRSTOR if they are active.
*/
if (fpregs_active())
copy_kernel_to_fpregs(&fpu->state);
/*
* Our update is done and the fpregs/fpstate are in sync
* if necessary. Context switches can happen again.
*/
preempt_enable();
}
/*
* 'fpu__restore()' is called to copy FPU registers from
* the FPU fpstate to the live hw registers and to activate
@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void)
*/
void fpu__restore(struct fpu *fpu)
{
fpu__activate_curr(fpu);
fpu__initialize(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu)
{
preempt_disable();
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
fpregs_deactivate(fpu);
if (fpu == &current->thread.fpu) {
if (fpu->initialized) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
fpregs_deactivate(fpu);
}
}
fpu->fpstate_active = 0;
fpu->initialized = 0;
trace_x86_fpu_dropped(fpu);
@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu)
* Make sure fpstate is cleared and initialized.
*/
if (static_cpu_has(X86_FEATURE_FPU)) {
fpu__activate_curr(fpu);
preempt_disable();
fpu__initialize(fpu);
user_fpu_begin();
copy_init_fpstate_to_fpregs();
preempt_enable();
}
}

View File

@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void)
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.fpstate_active);
WARN_ON_FPU(current->thread.fpu.initialized);
}
/*

View File

@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r
{
struct fpu *target_fpu = &target->thread.fpu;
return target_fpu->fpstate_active ? regset->n : 0;
return target_fpu->initialized ? regset->n : 0;
}
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
struct fpu *target_fpu = &target->thread.fpu;
if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
return regset->n;
else
return 0;
@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_read(fpu);
fpu__prepare_read(fpu);
fpstate_sanitize_xstate(fpu);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
fpu__activate_fpstate_write(fpu);
fpu__prepare_write(fpu);
fpstate_sanitize_xstate(fpu);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
xsave = &fpu->state.xsave;
fpu__activate_fpstate_read(fpu);
fpu__prepare_read(fpu);
if (using_compacted_format()) {
ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave);
if (kbuf)
ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
else
ret = copy_xstate_to_user(ubuf, xsave, pos, count);
} else {
fpstate_sanitize_xstate(fpu);
/*
@ -129,12 +132,23 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
xsave = &fpu->state.xsave;
fpu__activate_fpstate_write(fpu);
fpu__prepare_write(fpu);
if (boot_cpu_has(X86_FEATURE_XSAVES))
ret = copyin_to_xsaves(kbuf, ubuf, xsave);
else
if (using_compacted_format()) {
if (kbuf)
ret = copy_kernel_to_xstate(xsave, kbuf);
else
ret = copy_user_to_xstate(xsave, ubuf);
} else {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
if (!ret)
ret = validate_xstate_header(&xsave->header);
}
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
/*
* In case of failure, mark all states as init:
@ -142,16 +156,6 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
if (ret)
fpstate_init(&fpu->state);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
xsave->header.xfeatures &= xfeatures_mask;
/*
* These bits must be zero.
*/
memset(&xsave->header.reserved, 0, 48);
return ret;
}
@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
fpu__activate_fpstate_read(fpu);
fpu__prepare_read(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
struct user_i387_ia32_struct env;
int ret;
fpu__activate_fpstate_write(fpu);
fpu__prepare_write(fpu);
fpstate_sanitize_xstate(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
fpvalid = fpu->fpstate_active;
fpvalid = fpu->initialized;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),

View File

@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
*/
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
{
struct xregs_state *xsave = &current->thread.fpu.state.xsave;
struct fpu *fpu = &current->thread.fpu;
struct xregs_state *xsave = &fpu->state.xsave;
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
if (fpregs_active() || using_compacted_format()) {
if (fpu->initialized || using_compacted_format()) {
/* Save the live register state to the user directly. */
if (copy_fpregs_to_sigframe(buf_fx))
return -1;
/* Update the thread's fxstate to save the fsave header. */
if (ia32_fxstate)
copy_fxregs_to_kernel(&tsk->thread.fpu);
copy_fxregs_to_kernel(fpu);
} else {
/*
* It is a *bug* if kernel uses compacted-format for xsave
@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
return -1;
}
fpstate_sanitize_xstate(&tsk->thread.fpu);
fpstate_sanitize_xstate(fpu);
if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
return -1;
}
@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk,
struct xstate_header *header = &xsave->header;
if (use_xsave()) {
/* These bits must be zero. */
memset(header->reserved, 0, 48);
/*
* Note: we don't need to zero the reserved bits in the
* xstate_header here because we either didn't copy them at all,
* or we checked earlier that they aren't set.
*/
/*
* Init the state that is not present in the memory
@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
if (fx_only)
header->xfeatures = XFEATURE_MASK_FPSSE;
else
header->xfeatures &= (xfeatures_mask & xfeatures);
header->xfeatures &= xfeatures;
}
if (use_fxsr()) {
@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
fpu__activate_curr(fpu);
fpu__initialize(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
/*
* For 32-bit frames with fxstate, copy the user state to the
* thread's fpu state, reconstruct fxstate from the fsave
* header. Sanitize the copied state etc.
* header. Validate and sanitize the copied state.
*/
struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
/*
* Drop the current fpu which clears fpu->fpstate_active. This ensures
* Drop the current fpu which clears fpu->initialized. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
* fpu->fpstate_active is again set.
* fpu->initialized is again set.
*/
fpu__drop(fpu);
if (using_compacted_format()) {
err = copyin_to_xsaves(NULL, buf_fx,
&fpu->state.xsave);
err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
} else {
err = __copy_from_user(&fpu->state.xsave,
buf_fx, state_size);
err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
if (!err && state_size > offsetof(struct xregs_state, header))
err = validate_xstate_header(&fpu->state.xsave.header);
}
if (err || __copy_from_user(&env, buf, sizeof(env))) {
@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
}
fpu->fpstate_active = 1;
fpu->initialized = 1;
preempt_disable();
fpu__restore(fpu);
preempt_enable();

View File

@ -483,6 +483,30 @@ int using_compacted_format(void)
return boot_cpu_has(X86_FEATURE_XSAVES);
}
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
int validate_xstate_header(const struct xstate_header *hdr)
{
/* No unknown or supervisor features may be set */
if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR))
return -EINVAL;
/* Userspace must use the uncompacted format */
if (hdr->xcomp_bv)
return -EINVAL;
/*
* If 'reserved' is shrunken to add a new field, make sure to validate
* that new field here!
*/
BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
/* No reserved bits may be set */
if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
return -EINVAL;
return 0;
}
static void __xstate_dump_leaves(void)
{
int i;
@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state)
{
struct fpu *fpu = &current->thread.fpu;
if (!fpu->fpstate_active)
if (!fpu->initialized)
return NULL;
/*
* fpu__save() takes the CPU's xstate registers
@ -920,48 +944,55 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
}
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
/*
* Weird legacy quirk: SSE and YMM states store information in the
* MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
* area is marked as unused in the xfeatures header, we need to copy
* MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
*/
static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
{
if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
return false;
if (xfeatures & XFEATURE_MASK_FP)
return false;
return true;
}
/*
* This is similar to user_regset_copyout(), but will not add offset to
* the source data pointer or increment pos, count, kbuf, and ubuf.
*/
static inline int xstate_copyout(unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf,
const void *data, const int start_pos,
const int end_pos)
static inline void
__copy_xstate_to_kernel(void *kbuf, const void *data,
unsigned int offset, unsigned int size, unsigned int size_total)
{
if ((count == 0) || (pos < start_pos))
return 0;
if (offset < size_total) {
unsigned int copy = min(size, size_total - offset);
if (end_pos < 0 || pos < end_pos) {
unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
if (kbuf) {
memcpy(kbuf + pos, data, copy);
} else {
if (__copy_to_user(ubuf + pos, data, copy))
return -EFAULT;
}
memcpy(kbuf + offset, data, copy);
}
return 0;
}
/*
* Convert from kernel XSAVES compacted format to standard format and copy
* to a ptrace buffer. It supports partial copy but pos always starts from
* zero. This is called from xstateregs_get() and there we check the CPU
* has XSAVES.
* to a kernel-space ptrace buffer.
*
* It supports partial copy but pos always starts from zero. This is called
* from xstateregs_get() and there we check the CPU has XSAVES.
*/
int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
void __user *ubuf, struct xregs_state *xsave)
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{
unsigned int offset, size;
int ret, i;
struct xstate_header header;
int i;
/*
* Currently copy_regset_to_user() starts from pos 0:
*/
if (unlikely(pos != 0))
if (unlikely(offset_start != 0))
return -EFAULT;
/*
@ -977,8 +1008,91 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
offset = offsetof(struct xregs_state, header);
size = sizeof(header);
ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count);
__copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
for (i = 0; i < XFEATURE_MAX; i++) {
/*
* Copy only in-use xstates:
*/
if ((header.xfeatures >> i) & 1) {
void *src = __raw_xsave_addr(xsave, 1 << i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
/* The next component has to fit fully into the output buffer: */
if (offset + size > size_total)
break;
__copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
}
}
if (xfeatures_mxcsr_quirk(header.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
__copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
}
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:
*/
offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes);
__copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
return 0;
}
static inline int
__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
{
if (!size)
return 0;
if (offset < size_total) {
unsigned int copy = min(size, size_total - offset);
if (__copy_to_user(ubuf + offset, data, copy))
return -EFAULT;
}
return 0;
}
/*
* Convert from kernel XSAVES compacted format to standard format and copy
* to a user-space buffer. It supports partial copy but pos always starts from
* zero. This is called from xstateregs_get() and there we check the CPU
* has XSAVES.
*/
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{
unsigned int offset, size;
int ret, i;
struct xstate_header header;
/*
* Currently copy_regset_to_user() starts from pos 0:
*/
if (unlikely(offset_start != 0))
return -EFAULT;
/*
* The destination is a ptrace buffer; we put in only user xstates:
*/
memset(&header, 0, sizeof(header));
header.xfeatures = xsave->header.xfeatures;
header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
/*
* Copy xregs_state->header:
*/
offset = offsetof(struct xregs_state, header);
size = sizeof(header);
ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
if (ret)
return ret;
@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
offset = xstate_offsets[i];
size = xstate_sizes[i];
ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count);
/* The next component has to fit fully into the output buffer: */
if (offset + size > size_total)
break;
ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
if (ret)
return ret;
if (offset + size >= count)
break;
}
}
if (xfeatures_mxcsr_quirk(header.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
__copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
}
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:
*/
offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes);
ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count);
ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
if (ret)
return ret;
@ -1018,55 +1137,42 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
}
/*
* Convert from a ptrace standard-format buffer to kernel XSAVES format
* and copy to the target thread. This is called from xstateregs_set() and
* there we check the CPU has XSAVES and a whole standard-sized buffer
* exists.
* Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
* and copy to the target thread. This is called from xstateregs_set().
*/
int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
struct xregs_state *xsave)
int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
{
unsigned int offset, size;
int i;
u64 xfeatures;
u64 allowed_features;
struct xstate_header hdr;
offset = offsetof(struct xregs_state, header);
size = sizeof(xfeatures);
size = sizeof(hdr);
if (kbuf) {
memcpy(&xfeatures, kbuf + offset, size);
} else {
if (__copy_from_user(&xfeatures, ubuf + offset, size))
return -EFAULT;
}
memcpy(&hdr, kbuf + offset, size);
/*
* Reject if the user sets any disabled or supervisor features:
*/
allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR;
if (xfeatures & ~allowed_features)
if (validate_xstate_header(&hdr))
return -EINVAL;
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = ((u64)1 << i);
if (xfeatures & mask) {
if (hdr.xfeatures & mask) {
void *dst = __raw_xsave_addr(xsave, 1 << i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
if (kbuf) {
memcpy(dst, kbuf + offset, size);
} else {
if (__copy_from_user(dst, ubuf + offset, size))
return -EFAULT;
}
memcpy(dst, kbuf + offset, size);
}
}
if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
}
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
@ -1076,7 +1182,63 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
/*
* Add back in the features that came in from userspace:
*/
xsave->header.xfeatures |= xfeatures;
xsave->header.xfeatures |= hdr.xfeatures;
return 0;
}
/*
* Convert from a ptrace or sigreturn standard-format user-space buffer to
* kernel XSAVES format and copy to the target thread. This is called from
* xstateregs_set(), as well as potentially from the sigreturn() and
* rt_sigreturn() system calls.
*/
int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
{
unsigned int offset, size;
int i;
struct xstate_header hdr;
offset = offsetof(struct xregs_state, header);
size = sizeof(hdr);
if (__copy_from_user(&hdr, ubuf + offset, size))
return -EFAULT;
if (validate_xstate_header(&hdr))
return -EINVAL;
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = ((u64)1 << i);
if (hdr.xfeatures & mask) {
void *dst = __raw_xsave_addr(xsave, 1 << i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
if (__copy_from_user(dst, ubuf + offset, size))
return -EFAULT;
}
}
if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
offset = offsetof(struct fxregs_state, mxcsr);
size = MXCSR_AND_FLAGS_SIZE;
if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
return -EFAULT;
}
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
*/
xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
/*
* Add back in the features that came in from userspace:
*/
xsave->header.xfeatures |= hdr.xfeatures;
return 0;
}

View File

@ -64,7 +64,7 @@ static void call_on_stack(void *func, void *stack)
static inline void *current_stack(void)
{
return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
}
static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
@ -88,7 +88,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
/* Save the next esp at the bottom of the stack */
prev_esp = (u32 *)irqstk;
*prev_esp = current_stack_pointer();
*prev_esp = current_stack_pointer;
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
@ -139,7 +139,7 @@ void do_softirq_own_stack(void)
/* Push the previous esp onto the stack */
prev_esp = (u32 *)irqstk;
*prev_esp = current_stack_pointer();
*prev_esp = current_stack_pointer;
call_on_stack(__do_softirq, isp);
}

View File

@ -299,7 +299,7 @@ static int __init create_setup_data_nodes(struct kobject *parent)
return 0;
out_clean_nodes:
for (j = i - 1; j > 0; j--)
for (j = i - 1; j >= 0; j--)
cleanup_setup_data_node(*(kobjp + j));
kfree(kobjp);
out_setup_data_kobj:

View File

@ -140,7 +140,8 @@ void kvm_async_pf_task_wait(u32 token)
n.token = token;
n.cpu = smp_processor_id();
n.halted = is_idle_task(current) || preempt_count() > 1;
n.halted = is_idle_task(current) || preempt_count() > 1 ||
rcu_preempt_depth();
init_swait_queue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
raw_spin_unlock(&b->lock);

View File

@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
sp = (unsigned long) ka->sa.sa_restorer;
}
if (fpu->fpstate_active) {
if (fpu->initialized) {
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L;
/* save i387 and extended state */
if (fpu->fpstate_active &&
if (fpu->initialized &&
copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
return (void __user *)-1L;
@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/*
* Ensure the signal handler starts with the new fpu state.
*/
if (fpu->fpstate_active)
if (fpu->initialized)
fpu__clear(fpu);
}
signal_setup_done(failed, ksig, stepping);

View File

@ -232,12 +232,6 @@ static void notrace start_secondary(void *unused)
*/
if (boot_cpu_has(X86_FEATURE_PCID))
__write_cr4(__read_cr4() | X86_CR4_PCIDE);
cpu_init();
x86_cpuinit.early_percpu_clock_init();
preempt_disable();
smp_callin();
enable_start_cpu0 = 0;
#ifdef CONFIG_X86_32
/* switch away from the initial page table */
@ -245,6 +239,13 @@ static void notrace start_secondary(void *unused)
__flush_tlb_all();
#endif
cpu_init();
x86_cpuinit.early_percpu_clock_init();
preempt_disable();
smp_callin();
enable_start_cpu0 = 0;
/* otherwise gcc will move up smp_processor_id before the cpu_init */
barrier();
/*

View File

@ -142,7 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
* from double_fault.
*/
BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE);
current_stack_pointer) >= THREAD_SIZE);
preempt_enable_no_resched();
}

View File

@ -5298,7 +5298,6 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp))
@ -5306,7 +5305,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop), "+r"(__sp)
[fastop]"+S"(fop), ASM_CALL_CONSTRAINT
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);

View File

@ -200,6 +200,8 @@ struct loaded_vmcs {
int cpu;
bool launched;
bool nmi_known_unmasked;
unsigned long vmcs_host_cr3; /* May not match real cr3 */
unsigned long vmcs_host_cr4; /* May not match real cr4 */
struct list_head loaded_vmcss_on_cpu_link;
};
@ -600,8 +602,6 @@ struct vcpu_vmx {
int gs_ldt_reload_needed;
int fs_reload_needed;
u64 msr_host_bndcfgs;
unsigned long vmcs_host_cr3; /* May not match real cr3 */
unsigned long vmcs_host_cr4; /* May not match real cr4 */
} host_state;
struct {
int vm86_active;
@ -2202,46 +2202,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
struct pi_desc old, new;
unsigned int dest;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
!kvm_vcpu_apicv_active(vcpu))
/*
* In case of hot-plug or hot-unplug, we may have to undo
* vmx_vcpu_pi_put even if there is no assigned device. And we
* always keep PI.NDST up to date for simplicity: it makes the
* code easier, and CPU migration is not a fast path.
*/
if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
return;
/*
* First handle the simple case where no cmpxchg is necessary; just
* allow posting non-urgent interrupts.
*
* If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
* PI.NDST: pi_post_block will do it for us and the wakeup_handler
* expects the VCPU to be on the blocked_vcpu_list that matches
* PI.NDST.
*/
if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
vcpu->cpu == cpu) {
pi_clear_sn(pi_desc);
return;
}
/* The full case. */
do {
old.control = new.control = pi_desc->control;
/*
* If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
* are two possible cases:
* 1. After running 'pre_block', context switch
* happened. For this case, 'sn' was set in
* vmx_vcpu_put(), so we need to clear it here.
* 2. After running 'pre_block', we were blocked,
* and woken up by some other guy. For this case,
* we don't need to do anything, 'pi_post_block'
* will do everything for us. However, we cannot
* check whether it is case #1 or case #2 here
* (maybe, not needed), so we also clear sn here,
* I think it is not a big deal.
*/
if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
if (vcpu->cpu != cpu) {
dest = cpu_physical_id(cpu);
dest = cpu_physical_id(cpu);
if (x2apic_enabled())
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
}
if (x2apic_enabled())
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
/* set 'NV' to 'notification vector' */
new.nv = POSTED_INTR_VECTOR;
}
/* Allow posting non-urgent interrupts */
new.sn = 0;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
}
static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
@ -5178,12 +5176,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
*/
cr3 = __read_cr3();
vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
vmx->host_state.vmcs_host_cr3 = cr3;
vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
vmx->host_state.vmcs_host_cr4 = cr4;
vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
#ifdef CONFIG_X86_64
@ -9045,7 +9043,6 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
{
u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
register void *__sp asm(_ASM_SP);
if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
@ -9074,7 +9071,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
[sp]"=&r"(tmp),
#endif
"+r"(__sp)
ASM_CALL_CONSTRAINT
:
[entry]"r"(entry),
[ss]"i"(__KERNEL_DS),
@ -9274,15 +9271,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr3 = __get_current_cr3_fast();
if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
vmcs_writel(HOST_CR3, cr3);
vmx->host_state.vmcs_host_cr3 = cr3;
vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
}
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
vmcs_writel(HOST_CR4, cr4);
vmx->host_state.vmcs_host_cr4 = cr4;
vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
}
/* When single-stepping over STI and MOV SS, we must clear the
@ -9592,6 +9589,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
/*
* Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
* or POSTED_INTR_WAKEUP_VECTOR.
*/
vmx->pi_desc.nv = POSTED_INTR_VECTOR;
vmx->pi_desc.sn = 1;
return &vmx->vcpu;
free_vmcs:
@ -9840,7 +9844,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
WARN_ON(!is_guest_mode(vcpu));
if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) {
if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
!to_vmx(vcpu)->nested.nested_run_pending) {
vmcs12->vm_exit_intr_error_code = fault->error_code;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
@ -11705,6 +11710,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
}
static void __pi_post_block(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct pi_desc old, new;
unsigned int dest;
do {
old.control = new.control = pi_desc->control;
WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
"Wakeup handler not enabled while the VCPU is blocked\n");
dest = cpu_physical_id(vcpu->cpu);
if (x2apic_enabled())
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
/* set 'NV' to 'notification vector' */
new.nv = POSTED_INTR_VECTOR;
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
list_del(&vcpu->blocked_vcpu_list);
spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
vcpu->pre_pcpu = -1;
}
}
/*
* This routine does the following things for vCPU which is going
* to be blocked if VT-d PI is enabled.
@ -11720,7 +11756,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
*/
static int pi_pre_block(struct kvm_vcpu *vcpu)
{
unsigned long flags;
unsigned int dest;
struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@ -11730,34 +11765,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
!kvm_vcpu_apicv_active(vcpu))
return 0;
vcpu->pre_pcpu = vcpu->cpu;
spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
list_add_tail(&vcpu->blocked_vcpu_list,
&per_cpu(blocked_vcpu_on_cpu,
vcpu->pre_pcpu));
spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
WARN_ON(irqs_disabled());
local_irq_disable();
if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
vcpu->pre_pcpu = vcpu->cpu;
spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
list_add_tail(&vcpu->blocked_vcpu_list,
&per_cpu(blocked_vcpu_on_cpu,
vcpu->pre_pcpu));
spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
}
do {
old.control = new.control = pi_desc->control;
/*
* We should not block the vCPU if
* an interrupt is posted for it.
*/
if (pi_test_on(pi_desc) == 1) {
spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
list_del(&vcpu->blocked_vcpu_list);
spin_unlock_irqrestore(
&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
vcpu->pre_pcpu = -1;
return 1;
}
WARN((pi_desc->sn == 1),
"Warning: SN field of posted-interrupts "
"is set before blocking\n");
@ -11779,10 +11800,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
/* set 'NV' to 'wakeup vector' */
new.nv = POSTED_INTR_WAKEUP_VECTOR;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
} while (cmpxchg64(&pi_desc->control, old.control,
new.control) != old.control);
return 0;
/* We should not block the vCPU if an interrupt is posted for it. */
if (pi_test_on(pi_desc) == 1)
__pi_post_block(vcpu);
local_irq_enable();
return (vcpu->pre_pcpu == -1);
}
static int vmx_pre_block(struct kvm_vcpu *vcpu)
@ -11798,44 +11824,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
static void pi_post_block(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct pi_desc old, new;
unsigned int dest;
unsigned long flags;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
!kvm_vcpu_apicv_active(vcpu))
if (vcpu->pre_pcpu == -1)
return;
do {
old.control = new.control = pi_desc->control;
dest = cpu_physical_id(vcpu->cpu);
if (x2apic_enabled())
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
/* Allow posting non-urgent interrupts */
new.sn = 0;
/* set 'NV' to 'notification vector' */
new.nv = POSTED_INTR_VECTOR;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
if(vcpu->pre_pcpu != -1) {
spin_lock_irqsave(
&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
list_del(&vcpu->blocked_vcpu_list);
spin_unlock_irqrestore(
&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
vcpu->pre_pcpu = -1;
}
WARN_ON(irqs_disabled());
local_irq_disable();
__pi_post_block(vcpu);
local_irq_enable();
}
static void vmx_post_block(struct kvm_vcpu *vcpu)

View File

@ -7225,7 +7225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r;
sigset_t sigsaved;
fpu__activate_curr(fpu);
fpu__initialize(fpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

View File

@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info)
struct desc_struct code_descriptor;
struct fpu *fpu = &current->thread.fpu;
fpu__activate_curr(fpu);
fpu__initialize(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {

View File

@ -2,6 +2,7 @@
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
#include <asm/fpu/internal.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_refcount);
/*
* Handler for when we fail to restore a task's FPU state. We should never get
* here because the FPU state of a task using the FPU (task->thread.fpu.state)
* should always be valid. However, past bugs have allowed userspace to set
* reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
* These caused XRSTOR to fail when switching to the task, leaking the FPU
* registers of the task previously executing on the CPU. Mitigate this class
* of vulnerability by restoring from the initial state (essentially, zeroing
* out all the FPU registers) if we can't restore from the task's FPU state.
*/
bool ex_handler_fprestore(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
(void *)instruction_pointer(regs));
__copy_kernel_to_fpregs(&init_fpstate, -1);
return true;
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
bool ex_handler_ext(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{

View File

@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
static void fill_sig_info_pkey(int si_code, siginfo_t *info,
struct vm_area_struct *vma)
static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
{
/* This is effectively an #ifdef */
if (!boot_cpu_has(X86_FEATURE_OSPKE))
@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
* valid VMA, so we should never reach this without a
* valid VMA.
*/
if (!vma) {
if (!pkey) {
WARN_ONCE(1, "PKU fault with no VMA passed in");
info->si_pkey = 0;
return;
@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
* absolutely guranteed to be 100% accurate because of
* the race explained above.
*/
info->si_pkey = vma_pkey(vma);
info->si_pkey = *pkey;
}
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
struct task_struct *tsk, struct vm_area_struct *vma,
int fault)
struct task_struct *tsk, u32 *pkey, int fault)
{
unsigned lsb = 0;
siginfo_t info;
@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
lsb = PAGE_SHIFT;
info.si_addr_lsb = lsb;
fill_sig_info_pkey(si_code, &info, vma);
fill_sig_info_pkey(si_code, &info, pkey);
force_sig_info(si_signo, &info, tsk);
}
@ -762,8 +760,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current;
unsigned long flags;
int sig;
/* No context means no VMA to pass down */
struct vm_area_struct *vma = NULL;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs, X86_TRAP_PF)) {
@ -788,7 +784,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* XXX: hwpoison faults will set the wrong code. */
force_sig_info_fault(signal, si_code, address,
tsk, vma, 0);
tsk, NULL, 0);
}
/*
@ -806,7 +802,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
if (is_vmalloc_addr((void *)address) &&
(((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
register void *__sp asm("rsp");
unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
/*
* We're likely to be running with very little stack space
@ -821,7 +816,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
asm volatile ("movq %[stack], %%rsp\n\t"
"call handle_stack_overflow\n\t"
"1: jmp 1b"
: "+r" (__sp)
: ASM_CALL_CONSTRAINT
: "D" ("kernel stack overflow (page fault)"),
"S" (regs), "d" (address),
[stack] "rm" (stack));
@ -897,8 +892,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct vm_area_struct *vma,
int si_code)
unsigned long address, u32 *pkey, int si_code)
{
struct task_struct *tsk = current;
@ -946,7 +940,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_PF;
force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
return;
}
@ -959,9 +953,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct vm_area_struct *vma)
unsigned long address, u32 *pkey)
{
__bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
__bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
}
static void
@ -969,6 +963,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct vm_area_struct *vma, int si_code)
{
struct mm_struct *mm = current->mm;
u32 pkey;
if (vma)
pkey = vma_pkey(vma);
/*
* Something tried to access memory that isn't in our memory map..
@ -976,7 +974,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
*/
up_read(&mm->mmap_sem);
__bad_area_nosemaphore(regs, error_code, address, vma, si_code);
__bad_area_nosemaphore(regs, error_code, address,
(vma) ? &pkey : NULL, si_code);
}
static noinline void
@ -1019,7 +1018,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
struct vm_area_struct *vma, unsigned int fault)
u32 *pkey, unsigned int fault)
{
struct task_struct *tsk = current;
int code = BUS_ADRERR;
@ -1046,13 +1045,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
code = BUS_MCEERR_AR;
}
#endif
force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
}
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct vm_area_struct *vma,
unsigned int fault)
unsigned long address, u32 *pkey, unsigned int fault)
{
if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
no_context(regs, error_code, address, 0, 0);
@ -1076,9 +1074,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, vma, fault);
do_sigbus(regs, error_code, address, pkey, fault);
else if (fault & VM_FAULT_SIGSEGV)
bad_area_nosemaphore(regs, error_code, address, vma);
bad_area_nosemaphore(regs, error_code, address, pkey);
else
BUG();
}
@ -1268,6 +1266,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
struct mm_struct *mm;
int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
u32 pkey;
tsk = current;
mm = tsk->mm;
@ -1468,9 +1467,10 @@ good_area:
return;
}
pkey = vma_pkey(vma);
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, vma, fault);
mm_fault_error(regs, error_code, address, &pkey, fault);
return;
}

View File

@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
#define DISABLE_BRANCH_PROFILING
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/mm.h>

View File

@ -18,7 +18,6 @@
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/mmu_context.h> /* vma_pkey() */
#include <asm/fpu/internal.h> /* fpregs_active() */
int __execute_only_pkey(struct mm_struct *mm)
{
@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm)
*/
preempt_disable();
if (!need_to_set_mm_pkey &&
fpregs_active() &&
current->thread.fpu.initialized &&
!__pkru_allows_read(read_pkru(), execute_only_pkey)) {
preempt_enable();
return execute_only_pkey;

View File

@ -126,8 +126,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* isn't free.
*/
#ifdef CONFIG_DEBUG_VM
if (WARN_ON_ONCE(__read_cr3() !=
(__sme_pa(real_prev->pgd) | prev_asid))) {
if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
/*
* If we were to BUG here, we'd be very likely to kill
* the system so hard that we don't see the call trace.
@ -172,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
*/
this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
next_tlb_gen);
write_cr3(__sme_pa(next->pgd) | prev_asid);
write_cr3(build_cr3(next, prev_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
TLB_FLUSH_ALL);
}
@ -192,7 +191,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* mapped in the new pgd, we'll double-fault. Forcibly
* map it.
*/
unsigned int index = pgd_index(current_stack_pointer());
unsigned int index = pgd_index(current_stack_pointer);
pgd_t *pgd = next->pgd + index;
if (unlikely(pgd_none(*pgd)))
@ -216,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (need_flush) {
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
write_cr3(__sme_pa(next->pgd) | new_asid);
write_cr3(build_cr3(next, new_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH);
write_cr3(build_cr3_noflush(next, new_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
@ -265,7 +264,7 @@ void initialize_tlbstate_and_flush(void)
!(cr4_read_shadow() & X86_CR4_PCIDE));
/* Force ASID 0 and force a TLB flush. */
write_cr3(cr3 & ~CR3_PCID_MASK);
write_cr3(build_cr3(mm, 0));
/* Reinitialize tlbstate. */
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);

View File

@ -1238,21 +1238,16 @@ static void __init xen_pagetable_cleanhighmap(void)
* from _brk_limit way up to the max_pfn_mapped (which is the end of
* the ramdisk). We continue on, erasing PMD entries that point to page
* tables - do note that they are accessible at this stage via __va.
* For good measure we also round up to the PMD - which means that if
* As Xen is aligning the memory end to a 4MB boundary, for good
* measure we also round up to PMD_SIZE * 2 - which means that if
* anybody is using __ka address to the initial boot-stack - and try
* to use it - they are going to crash. The xen_start_info has been
* taken care of already in xen_setup_kernel_pagetable. */
addr = xen_start_info->pt_base;
size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
size = xen_start_info->nr_pt_frames * PAGE_SIZE;
xen_cleanhighmap(addr, addr + size);
xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
#ifdef DEBUG
/* This is superfluous and is not necessary, but you know what
* lets do it. The MODULES_VADDR -> MODULES_END should be clear of
* anything at this stage. */
xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
#endif
}
#endif

View File

@ -854,6 +854,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
kobject_init(&q->kobj, &blk_queue_ktype);
#ifdef CONFIG_BLK_DEV_IO_TRACE
mutex_init(&q->blk_trace_mutex);
#endif
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);

View File

@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
failjob_rls_rqst_payload:
kfree(job->request_payload.sg_list);
failjob_rls_job:
kfree(job);
return -ENOMEM;
}

View File

@ -112,7 +112,7 @@ ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hd_struct *p = dev_to_part(dev);
struct request_queue *q = dev_to_disk(dev)->queue;
struct request_queue *q = part_to_disk(p)->queue;
unsigned int inflight[2];
int cpu;

View File

@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes)
}
ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
if (rc == -ENOENT)
return rc;
/*
* GHESv2 type HEST entries introduce support for error acknowledgment,
* so only acknowledge the error if this support is present.
*/
if (is_hest_type_generic_v2(ghes)) {
rc = ghes_ack_error(ghes->generic_v2);
if (rc)
return rc;
}
out:
ghes_clear_estatus(ghes);
if (is_hest_type_generic_v2(ghes))
return ghes_ack_error(ghes->generic_v2);
return rc;
}

View File

@ -2217,7 +2217,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
debug_id, (u64)fda->num_fds);
continue;
}
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
task_close_fd(proc, fd_array[fd_index]);
} break;
@ -2326,7 +2326,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
(u64)node->ptr);
binder_node_unlock(node);
} else {
int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
@ -2442,7 +2441,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
*/
parent_buffer = parent->buffer -
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
@ -2508,7 +2507,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
proc->pid, thread->pid);
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
parent_buffer = (u8 *)((uintptr_t)parent->buffer -
binder_alloc_get_user_buffer_offset(
&target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
@ -3083,6 +3082,7 @@ static void binder_transaction(struct binder_proc *proc,
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:

View File

@ -621,8 +621,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
int rc;
ahci_reset_controller(host);
rc = ahci_reset_controller(host);
if (rc)
return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
struct ahci_host_priv *hpriv = host->private_data;

View File

@ -492,6 +492,7 @@ static const struct ich_laptop ich_laptop[] = {
{ 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
{ 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
{ 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
{ 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */

View File

@ -3234,19 +3234,19 @@ static const struct ata_timing ata_timing[] = {
};
#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
{
q->setup = EZ(t->setup * 1000, T);
q->act8b = EZ(t->act8b * 1000, T);
q->rec8b = EZ(t->rec8b * 1000, T);
q->cyc8b = EZ(t->cyc8b * 1000, T);
q->active = EZ(t->active * 1000, T);
q->recover = EZ(t->recover * 1000, T);
q->dmack_hold = EZ(t->dmack_hold * 1000, T);
q->cycle = EZ(t->cycle * 1000, T);
q->udma = EZ(t->udma * 1000, UT);
q->setup = EZ(t->setup, T);
q->act8b = EZ(t->act8b, T);
q->rec8b = EZ(t->rec8b, T);
q->cyc8b = EZ(t->cyc8b, T);
q->active = EZ(t->active, T);
q->recover = EZ(t->recover, T);
q->dmack_hold = EZ(t->dmack_hold, T);
q->cycle = EZ(t->cycle, T);
q->udma = EZ(t->udma, UT);
}
void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,

View File

@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
static int charlcd_open(struct inode *inode, struct file *file)
{
struct charlcd_priv *priv = to_priv(the_charlcd);
int ret;
ret = -EBUSY;
if (!atomic_dec_and_test(&charlcd_available))
return -EBUSY; /* open only once at a time */
goto fail; /* open only once at a time */
ret = -EPERM;
if (file->f_mode & FMODE_READ) /* device is write-only */
return -EPERM;
goto fail;
if (priv->must_clear) {
charlcd_clear_display(&priv->lcd);
priv->must_clear = false;
}
return nonseekable_open(inode, file);
fail:
atomic_inc(&charlcd_available);
return ret;
}
static int charlcd_release(struct inode *inode, struct file *file)

View File

@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file,
static int keypad_open(struct inode *inode, struct file *file)
{
if (!atomic_dec_and_test(&keypad_available))
return -EBUSY; /* open only once at a time */
int ret;
ret = -EBUSY;
if (!atomic_dec_and_test(&keypad_available))
goto fail; /* open only once at a time */
ret = -EPERM;
if (file->f_mode & FMODE_WRITE) /* device is read-only */
return -EPERM;
goto fail;
keypad_buflen = 0; /* flush the buffer on opening */
return 0;
fail:
atomic_inc(&keypad_available);
return ret;
}
static int keypad_release(struct inode *inode, struct file *file)

View File

@ -166,11 +166,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
}
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static void parsing_done_workfn(struct work_struct *work);
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
static cpumask_var_t cpus_to_visit __initdata;
static void __init parsing_done_workfn(struct work_struct *work);
static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
static int
static int __init
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
@ -206,7 +206,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
return 0;
}
static struct notifier_block init_cpu_capacity_notifier = {
static struct notifier_block init_cpu_capacity_notifier __initdata = {
.notifier_call = init_cpu_capacity_callback,
};
@ -232,7 +232,7 @@ static int __init register_cpufreq_notifier(void)
}
core_initcall(register_cpufreq_notifier);
static void parsing_done_workfn(struct work_struct *work)
static void __init parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);

View File

@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev,
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
if (count > PATH_MAX)
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);

View File

@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
opp->available = availability_req;
dev_pm_opp_get(opp);
mutex_unlock(&opp_table->lock);
/* Notify the change of the OPP availability */
if (availability_req)
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
blocking_notifier_call_chain(&opp_table->head,
OPP_EVENT_DISABLE, opp);
dev_pm_opp_put(opp);
goto put_table;
unlock:
mutex_unlock(&opp_table->lock);
put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}

View File

@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
if (!brd)
return -ENODEV;
page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
if (!page)
return -ENOSPC;
*kaddr = page_address(page);

View File

@ -67,10 +67,8 @@ struct loop_device {
struct loop_cmd {
struct kthread_work work;
struct request *rq;
union {
bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */
};
bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */
long ret;
struct kiocb iocb;
struct bio_vec *bvec;

View File

@ -1194,6 +1194,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* The block layer will pass back some non-nbd ioctls in case we have
* special handling for them, but we don't so just return an error.
*/
if (_IOC_TYPE(cmd) != 0xab)
return -EINVAL;
mutex_lock(&nbd->config_lock);
/* Don't allow ioctl operations on a nbd device that was created with

Some files were not shown because too many files have changed in this diff Show More