mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/phy/bcm7xxx.cd88fd1b546
("net: phy: bcm7xxx: Fixed indirect MMD operations")f68d08c437
("net: phy: bcm7xxx: Add EPHY entry for 72165") net/sched/sch_api.cb193e15ac6
("net: prevent user from passing illegal stab size")69508d4333
("net_sched: Use struct_size() and flex_array_size() helpers") Both cases trivial - adjacent code additions. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
dd9a887b35
@ -175,9 +175,10 @@ for IRQ numbers that are passed to struct device registrations. In that
|
||||
case the Linux IRQ numbers cannot be dynamically assigned and the legacy
|
||||
mapping should be used.
|
||||
|
||||
As the name implies, the *_legacy() functions are deprecated and only
|
||||
As the name implies, the \*_legacy() functions are deprecated and only
|
||||
exist to ease the support of ancient platforms. No new users should be
|
||||
added.
|
||||
added. Same goes for the \*_simple() functions when their use results
|
||||
in the legacy behaviour.
|
||||
|
||||
The legacy map assumes a contiguous range of IRQ numbers has already
|
||||
been allocated for the controller and that the IRQ number can be
|
||||
|
32
MAINTAINERS
32
MAINTAINERS
@ -810,7 +810,7 @@ F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml
|
||||
F: drivers/dma/altera-msgdma.c
|
||||
|
||||
ALTERA PIO DRIVER
|
||||
M: Joyce Ooi <joyce.ooi@intel.com>
|
||||
M: Mun Yew Tham <mun.yew.tham@intel.com>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/gpio/gpio-altera.c
|
||||
@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/platform/x86/amd-pmc.*
|
||||
|
||||
AMD POWERPLAY
|
||||
AMD POWERPLAY AND SWSMU
|
||||
M: Evan Quan <evan.quan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/pm/powerplay/
|
||||
F: drivers/gpu/drm/amd/pm/
|
||||
|
||||
AMD PTDMA DRIVER
|
||||
M: Sanjay R Mehta <sanju.mehta@amd.com>
|
||||
@ -2961,7 +2961,7 @@ F: crypto/async_tx/
|
||||
F: include/linux/async_tx.h
|
||||
|
||||
AT24 EEPROM DRIVER
|
||||
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
@ -3384,9 +3384,11 @@ F: Documentation/networking/filter.rst
|
||||
F: Documentation/userspace-api/ebpf/
|
||||
F: arch/*/net/*
|
||||
F: include/linux/bpf*
|
||||
F: include/linux/btf*
|
||||
F: include/linux/filter.h
|
||||
F: include/trace/events/xdp.h
|
||||
F: include/uapi/linux/bpf*
|
||||
F: include/uapi/linux/btf*
|
||||
F: include/uapi/linux/filter.h
|
||||
F: kernel/bpf/
|
||||
F: kernel/trace/bpf_trace.c
|
||||
@ -3820,7 +3822,6 @@ F: drivers/scsi/mpi3mr/
|
||||
|
||||
BROADCOM NETXTREME-E ROCE DRIVER
|
||||
M: Selvin Xavier <selvin.xavier@broadcom.com>
|
||||
M: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.broadcom.com
|
||||
@ -7985,7 +7986,7 @@ F: include/linux/gpio/regmap.h
|
||||
|
||||
GPIO SUBSYSTEM
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
||||
@ -11366,7 +11367,7 @@ F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
|
||||
F: drivers/iio/proximity/mb1232.c
|
||||
|
||||
MAXIM MAX77650 PMIC MFD DRIVER
|
||||
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/*/*max77650.yaml
|
||||
@ -16650,13 +16651,6 @@ M: Lubomir Rintel <lkundrak@v3.sk>
|
||||
S: Supported
|
||||
F: drivers/char/pcmcia/scr24x_cs.c
|
||||
|
||||
SCSI CDROM DRIVER
|
||||
M: Jens Axboe <axboe@kernel.dk>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.kernel.dk
|
||||
F: drivers/scsi/sr*
|
||||
|
||||
SCSI RDMA PROTOCOL (SRP) INITIATOR
|
||||
M: Bart Van Assche <bvanassche@acm.org>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
@ -17890,7 +17884,8 @@ M: Olivier Moysan <olivier.moysan@foss.st.com>
|
||||
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
|
||||
F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
|
||||
F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml
|
||||
F: sound/soc/stm/
|
||||
|
||||
STM32 TIMER/LPTIMER DRIVERS
|
||||
@ -18689,7 +18684,7 @@ F: include/linux/clk/ti.h
|
||||
|
||||
TI DAVINCI MACHINE SUPPORT
|
||||
M: Sekhar Nori <nsekhar@ti.com>
|
||||
R: Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
||||
R: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
|
||||
@ -19288,13 +19283,12 @@ S: Maintained
|
||||
F: drivers/usb/misc/chaoskey.c
|
||||
|
||||
USB CYPRESS C67X00 DRIVER
|
||||
M: Peter Korsgaard <jacmet@sunsite.dk>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/usb/c67x00/
|
||||
|
||||
USB DAVICOM DM9601 DRIVER
|
||||
M: Peter Korsgaard <jacmet@sunsite.dk>
|
||||
M: Peter Korsgaard <peter@korsgaard.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linux-usb.org/usbnet
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Opossums on Parade
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
uprobe_notify_resume(regs);
|
||||
} else {
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
local_irq_disable();
|
||||
|
@ -487,7 +487,6 @@
|
||||
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&qusb_phy_0>, <&usb0_ssphy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
tx-fifo-resize;
|
||||
snps,is-utmi-l1-suspend;
|
||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
@ -528,7 +527,6 @@
|
||||
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&qusb_phy_1>, <&usb1_ssphy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
tx-fifo-resize;
|
||||
snps,is-utmi-l1-suspend;
|
||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
|
@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
|
||||
#define acpi_os_ioremap acpi_os_ioremap
|
||||
|
||||
void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
|
||||
#define acpi_os_memmap acpi_os_memmap
|
||||
|
||||
typedef u64 phys_cpuid_t;
|
||||
#define PHYS_CPUID_INVALID INVALID_HWID
|
||||
|
||||
|
@ -525,6 +525,11 @@ alternative_endif
|
||||
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define EXPORT_SYMBOL_NOHWKASAN(name)
|
||||
#else
|
||||
#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
|
||||
#endif
|
||||
/*
|
||||
* Emit a 64-bit absolute little endian symbol reference in a way that
|
||||
* ensures that it will be resolved at build time, even when building a
|
||||
|
@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
|
||||
|
||||
static inline void mte_check_tfsr_entry(void)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
mte_check_tfsr_el1();
|
||||
}
|
||||
|
||||
static inline void mte_check_tfsr_exit(void)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
/*
|
||||
* The asynchronous faults are sync'ed automatically with
|
||||
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
|
||||
|
@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
|
||||
#define __HAVE_ARCH_STRCHR
|
||||
extern char *strchr(const char *, int c);
|
||||
|
||||
#ifndef CONFIG_KASAN_HW_TAGS
|
||||
#define __HAVE_ARCH_STRCMP
|
||||
extern int strcmp(const char *, const char *);
|
||||
|
||||
#define __HAVE_ARCH_STRNCMP
|
||||
extern int strncmp(const char *, const char *, __kernel_size_t);
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_STRLEN
|
||||
extern __kernel_size_t strlen(const char *);
|
||||
|
@ -273,8 +273,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
|
||||
return __pgprot(PROT_DEVICE_nGnRnE);
|
||||
}
|
||||
|
||||
static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
|
||||
acpi_size size, bool memory)
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
efi_memory_desc_t *md, *region = NULL;
|
||||
pgprot_t prot;
|
||||
@ -300,11 +299,9 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
|
||||
* It is fine for AML to remap regions that are not represented in the
|
||||
* EFI memory map at all, as it only describes normal memory, and MMIO
|
||||
* regions that require a virtual mapping to make them accessible to
|
||||
* the EFI runtime services. Determine the region default
|
||||
* attributes by checking the requested memory semantics.
|
||||
* the EFI runtime services.
|
||||
*/
|
||||
prot = memory ? __pgprot(PROT_NORMAL_NC) :
|
||||
__pgprot(PROT_DEVICE_nGnRnE);
|
||||
prot = __pgprot(PROT_DEVICE_nGnRnE);
|
||||
if (region) {
|
||||
switch (region->type) {
|
||||
case EFI_LOADER_CODE:
|
||||
@ -364,16 +361,6 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
|
||||
return __ioremap(phys, size, prot);
|
||||
}
|
||||
|
||||
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return __acpi_os_ioremap(phys, size, false);
|
||||
}
|
||||
|
||||
void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return __acpi_os_ioremap(phys, size, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Claim Synchronous External Aborts as a firmware first notification.
|
||||
*
|
||||
|
@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
/*
|
||||
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
||||
* ThunderX leads to apparent I-cache corruption of kernel text, which
|
||||
* ends as well as you might imagine. Don't even try.
|
||||
* ends as well as you might imagine. Don't even try. We cannot rely
|
||||
* on the cpus_have_*cap() helpers here to detect the CPU erratum
|
||||
* because cpucap detection order may change. However, since we know
|
||||
* affected CPUs are always in a homogeneous configuration, it is
|
||||
* safe to rely on this_cpu_has_cap() here.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
|
||||
if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
|
||||
str = "ARM64_WORKAROUND_CAVIUM_27456";
|
||||
__kpti_forced = -1;
|
||||
}
|
||||
|
@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
void mte_check_tfsr_el1(void)
|
||||
{
|
||||
u64 tfsr_el1;
|
||||
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
|
||||
u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
|
||||
|
||||
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
|
||||
/*
|
||||
@ -199,6 +194,9 @@ void mte_thread_init_user(void)
|
||||
|
||||
void mte_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
mte_update_sctlr_user(next);
|
||||
|
||||
/*
|
||||
|
@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
|
||||
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
if (thread_flags & _TIF_FOREIGN_FPSTATE)
|
||||
fpsimd_restore_current_state();
|
||||
|
@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
|
||||
# runtime. Because the hypervisor is part of the kernel binary, relocations
|
||||
# produce a kernel VA. We enumerate relocations targeting hyp at build time
|
||||
# and convert the kernel VAs at those positions to hyp VAs.
|
||||
$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
|
||||
$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
|
||||
$(call if_changed,hyprel)
|
||||
|
||||
# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
|
||||
|
@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
||||
|
||||
int kvm_perf_init(void)
|
||||
{
|
||||
if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
|
||||
static_branch_enable(&kvm_arm_pmu_available);
|
||||
|
||||
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||
}
|
||||
|
||||
|
@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
||||
kvm_pmu_create_perf_event(vcpu, select_idx);
|
||||
}
|
||||
|
||||
int kvm_pmu_probe_pmuver(void)
|
||||
void kvm_host_pmu_init(struct arm_pmu *pmu)
|
||||
{
|
||||
if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
|
||||
!kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
|
||||
static_branch_enable(&kvm_arm_pmu_available);
|
||||
}
|
||||
|
||||
static int kvm_pmu_probe_pmuver(void)
|
||||
{
|
||||
struct perf_event_attr attr = { };
|
||||
struct perf_event *event;
|
||||
|
@ -173,4 +173,4 @@ L(done):
|
||||
ret
|
||||
|
||||
SYM_FUNC_END_PI(strcmp)
|
||||
EXPORT_SYMBOL_NOKASAN(strcmp)
|
||||
EXPORT_SYMBOL_NOHWKASAN(strcmp)
|
||||
|
@ -258,4 +258,4 @@ L(ret0):
|
||||
ret
|
||||
|
||||
SYM_FUNC_END_PI(strncmp)
|
||||
EXPORT_SYMBOL_NOKASAN(strncmp)
|
||||
EXPORT_SYMBOL_NOHWKASAN(strncmp)
|
||||
|
@ -260,8 +260,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/entry.h>
|
||||
@ -25,7 +24,6 @@
|
||||
.globl system_call
|
||||
.globl resume
|
||||
.globl ret_from_exception
|
||||
.globl ret_from_signal
|
||||
.globl sys_call_table
|
||||
.globl bad_interrupt
|
||||
.globl inthandler1
|
||||
@ -59,8 +57,6 @@ do_trace:
|
||||
subql #4,%sp /* dummy return address */
|
||||
SAVE_SWITCH_STACK
|
||||
jbsr syscall_trace_leave
|
||||
|
||||
ret_from_signal:
|
||||
RESTORE_SWITCH_STACK
|
||||
addql #4,%sp
|
||||
jra ret_from_exception
|
||||
|
@ -29,7 +29,6 @@ config M68K
|
||||
select NO_DMA if !MMU && !COLDFIRE
|
||||
select OLD_SIGACTION
|
||||
select OLD_SIGSUSPEND3
|
||||
select SET_FS
|
||||
select UACCESS_MEMCPY if !MMU
|
||||
select VIRT_TO_BUS
|
||||
select ZONE_DMA
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/entry.h>
|
||||
|
||||
@ -51,7 +50,6 @@ sw_usp:
|
||||
.globl system_call
|
||||
.globl resume
|
||||
.globl ret_from_exception
|
||||
.globl ret_from_signal
|
||||
.globl sys_call_table
|
||||
.globl inthandler
|
||||
|
||||
@ -98,8 +96,6 @@ ENTRY(system_call)
|
||||
subql #4,%sp /* dummy return address */
|
||||
SAVE_SWITCH_STACK
|
||||
jbsr syscall_trace_leave
|
||||
|
||||
ret_from_signal:
|
||||
RESTORE_SWITCH_STACK
|
||||
addql #4,%sp
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
#define __ASM_M68K_PROCESSOR_H
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp)
|
||||
#define TASK_UNMAPPED_BASE 0
|
||||
#endif
|
||||
|
||||
/* Address spaces (or Function Codes in Motorola lingo) */
|
||||
#define USER_DATA 1
|
||||
#define USER_PROGRAM 2
|
||||
#define SUPER_DATA 5
|
||||
#define SUPER_PROGRAM 6
|
||||
#define CPU_SPACE 7
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
|
||||
/*
|
||||
* Set the SFC/DFC registers for special MM operations. For most normal
|
||||
* operation these remain set to USER_DATA for the uaccess routines.
|
||||
*/
|
||||
static inline void set_fc(unsigned long val)
|
||||
{
|
||||
WARN_ON_ONCE(in_interrupt());
|
||||
|
||||
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
|
||||
"movec %0,%/dfc\n\t"
|
||||
: /* no outputs */ : "r" (val) : "memory");
|
||||
}
|
||||
#else
|
||||
static inline void set_fc(unsigned long val)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
|
||||
|
||||
struct thread_struct {
|
||||
unsigned long ksp; /* kernel stack pointer */
|
||||
unsigned long usp; /* user stack pointer */
|
||||
unsigned short sr; /* saved status register */
|
||||
unsigned short fs; /* saved fs (sfc, dfc) */
|
||||
unsigned short fc; /* saved fc (sfc, dfc) */
|
||||
unsigned long crp[2]; /* cpu root pointer */
|
||||
unsigned long esp0; /* points to SR of stack frame */
|
||||
unsigned long faddr; /* info about last fault */
|
||||
@ -92,7 +117,7 @@ struct thread_struct {
|
||||
#define INIT_THREAD { \
|
||||
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
|
||||
.sr = PS_S, \
|
||||
.fs = __KERNEL_DS, \
|
||||
.fc = USER_DATA, \
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,59 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _M68K_SEGMENT_H
|
||||
#define _M68K_SEGMENT_H
|
||||
|
||||
/* define constants */
|
||||
/* Address spaces (FC0-FC2) */
|
||||
#define USER_DATA (1)
|
||||
#ifndef __USER_DS
|
||||
#define __USER_DS (USER_DATA)
|
||||
#endif
|
||||
#define USER_PROGRAM (2)
|
||||
#define SUPER_DATA (5)
|
||||
#ifndef __KERNEL_DS
|
||||
#define __KERNEL_DS (SUPER_DATA)
|
||||
#endif
|
||||
#define SUPER_PROGRAM (6)
|
||||
#define CPU_SPACE (7)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef struct {
|
||||
unsigned long seg;
|
||||
} mm_segment_t;
|
||||
|
||||
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
|
||||
/*
|
||||
* Get/set the SFC/DFC registers for MOVES instructions
|
||||
*/
|
||||
#define USER_DS MAKE_MM_SEG(__USER_DS)
|
||||
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
|
||||
|
||||
static inline mm_segment_t get_fs(void)
|
||||
{
|
||||
mm_segment_t _v;
|
||||
__asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
|
||||
return _v;
|
||||
}
|
||||
|
||||
static inline void set_fs(mm_segment_t val)
|
||||
{
|
||||
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
|
||||
"movec %0,%/dfc\n\t"
|
||||
: /* no outputs */ : "r" (val.seg) : "memory");
|
||||
}
|
||||
|
||||
#else
|
||||
#define USER_DS MAKE_MM_SEG(TASK_SIZE)
|
||||
#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
||||
#endif
|
||||
|
||||
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _M68K_SEGMENT_H */
|
@ -4,7 +4,6 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/segment.h>
|
||||
|
||||
/*
|
||||
* On machines with 4k pages we default to an 8k thread size, though we
|
||||
@ -27,7 +26,6 @@
|
||||
struct thread_info {
|
||||
struct task_struct *task; /* main task structure */
|
||||
unsigned long flags;
|
||||
mm_segment_t addr_limit; /* thread address space */
|
||||
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
||||
__u32 cpu; /* should always be 0 on m68k */
|
||||
unsigned long tp_value; /* thread pointer */
|
||||
@ -37,7 +35,6 @@ struct thread_info {
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.task = &tsk, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
}
|
||||
|
||||
|
@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr)
|
||||
if (CPU_IS_COLDFIRE) {
|
||||
mmu_write(MMUOR, MMUOR_CNL);
|
||||
} else if (CPU_IS_040_OR_060) {
|
||||
mm_segment_t old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
set_fc(SUPER_DATA);
|
||||
__asm__ __volatile__(".chip 68040\n\t"
|
||||
"pflush (%0)\n\t"
|
||||
".chip 68k"
|
||||
: : "a" (addr));
|
||||
set_fs(old_fs);
|
||||
set_fc(USER_DATA);
|
||||
} else if (CPU_IS_020_OR_030)
|
||||
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
|
||||
}
|
||||
@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
if (vma->vm_mm == current->active_mm) {
|
||||
mm_segment_t old_fs = force_uaccess_begin();
|
||||
|
||||
if (vma->vm_mm == current->active_mm)
|
||||
__flush_tlb_one(addr);
|
||||
force_uaccess_end(old_fs);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
|
@ -267,6 +267,10 @@ struct frame {
|
||||
} un;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_M68040
|
||||
asmlinkage void berr_040cleanup(struct frame *fp);
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _M68K_TRAPS_H */
|
||||
|
@ -9,13 +9,16 @@
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/extable.h>
|
||||
|
||||
/* We let the MMU do all checking */
|
||||
static inline int access_ok(const void __user *addr,
|
||||
unsigned long size)
|
||||
{
|
||||
/*
|
||||
* XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
|
||||
* for TASK_SIZE!
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr,
|
||||
#define MOVES "move"
|
||||
#endif
|
||||
|
||||
extern int __put_user_bad(void);
|
||||
extern int __get_user_bad(void);
|
||||
|
||||
#define __put_user_asm(res, x, ptr, bwl, reg, err) \
|
||||
#define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
|
||||
asm volatile ("\n" \
|
||||
"1: "MOVES"."#bwl" %2,%1\n" \
|
||||
"1: "inst"."#bwl" %2,%1\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .even\n" \
|
||||
@ -56,32 +56,13 @@ asm volatile ("\n" \
|
||||
: "+d" (res), "=m" (*(ptr)) \
|
||||
: #reg (x), "i" (err))
|
||||
|
||||
/*
|
||||
* These are the main single-value transfer routines. They automatically
|
||||
* use the right size if we just have the right pointer type.
|
||||
*/
|
||||
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
typeof(*(ptr)) __pu_val = (x); \
|
||||
int __pu_err = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof (*(ptr))) { \
|
||||
case 1: \
|
||||
__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
|
||||
break; \
|
||||
case 8: \
|
||||
{ \
|
||||
const void __user *__pu_ptr = (ptr); \
|
||||
#define __put_user_asm8(inst, res, x, ptr) \
|
||||
do { \
|
||||
const void *__pu_ptr = (const void __force *)(ptr); \
|
||||
\
|
||||
asm volatile ("\n" \
|
||||
"1: "MOVES".l %2,(%1)+\n" \
|
||||
"2: "MOVES".l %R2,(%1)\n" \
|
||||
"1: "inst".l %2,(%1)+\n" \
|
||||
"2: "inst".l %R2,(%1)\n" \
|
||||
"3:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .even\n" \
|
||||
@ -95,24 +76,46 @@ asm volatile ("\n" \
|
||||
" .long 2b,10b\n" \
|
||||
" .long 3b,10b\n" \
|
||||
" .previous" \
|
||||
: "+d" (__pu_err), "+a" (__pu_ptr) \
|
||||
: "r" (__pu_val), "i" (-EFAULT) \
|
||||
: "+d" (res), "+a" (__pu_ptr) \
|
||||
: "r" (x), "i" (-EFAULT) \
|
||||
: "memory"); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* These are the main single-value transfer routines. They automatically
|
||||
* use the right size if we just have the right pointer type.
|
||||
*/
|
||||
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
typeof(*(ptr)) __pu_val = (x); \
|
||||
int __pu_err = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof (*(ptr))) { \
|
||||
case 1: \
|
||||
__put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_asm8(MOVES, __pu_err, __pu_val, ptr); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
__pu_err = __put_user_bad(); \
|
||||
break; \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
__pu_err; \
|
||||
})
|
||||
#define put_user(x, ptr) __put_user(x, ptr)
|
||||
|
||||
|
||||
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
|
||||
#define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({ \
|
||||
type __gu_val; \
|
||||
asm volatile ("\n" \
|
||||
"1: "MOVES"."#bwl" %2,%1\n" \
|
||||
"1: "inst"."#bwl" %2,%1\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .even\n" \
|
||||
@ -130,29 +133,17 @@ asm volatile ("\n" \
|
||||
(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
|
||||
})
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
int __gu_err = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
|
||||
break; \
|
||||
case 8: { \
|
||||
const void __user *__gu_ptr = (ptr); \
|
||||
#define __get_user_asm8(inst, res, x, ptr) \
|
||||
do { \
|
||||
const void *__gu_ptr = (const void __force *)(ptr); \
|
||||
union { \
|
||||
u64 l; \
|
||||
__typeof__(*(ptr)) t; \
|
||||
} __gu_val; \
|
||||
\
|
||||
asm volatile ("\n" \
|
||||
"1: "MOVES".l (%2)+,%1\n" \
|
||||
"2: "MOVES".l (%2),%R1\n" \
|
||||
"1: "inst".l (%2)+,%1\n" \
|
||||
"2: "inst".l (%2),%R1\n" \
|
||||
"3:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .even\n" \
|
||||
@ -167,16 +158,32 @@ asm volatile ("\n" \
|
||||
" .long 1b,10b\n" \
|
||||
" .long 2b,10b\n" \
|
||||
" .previous" \
|
||||
: "+d" (__gu_err), "=&r" (__gu_val.l), \
|
||||
: "+d" (res), "=&r" (__gu_val.l), \
|
||||
"+a" (__gu_ptr) \
|
||||
: "i" (-EFAULT) \
|
||||
: "memory"); \
|
||||
(x) = __gu_val.t; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
int __gu_err = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
__get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
|
||||
break; \
|
||||
case 8: \
|
||||
__get_user_asm8(MOVES, __gu_err, x, ptr); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
__gu_err = __get_user_bad(); \
|
||||
break; \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
__gu_err; \
|
||||
})
|
||||
@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
|
||||
__put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
|
||||
b, d, 1);
|
||||
break;
|
||||
case 2:
|
||||
__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
|
||||
__put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
|
||||
w, r, 2);
|
||||
break;
|
||||
case 3:
|
||||
__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
|
||||
break;
|
||||
case 4:
|
||||
__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
|
||||
__put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
|
||||
l, r, 4);
|
||||
break;
|
||||
case 5:
|
||||
__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
|
||||
@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
|
||||
#define user_addr_max() \
|
||||
(uaccess_kernel() ? ~0UL : TASK_SIZE)
|
||||
#define HAVE_GET_KERNEL_NOFAULT
|
||||
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
type *__gk_dst = (type *)(dst); \
|
||||
type *__gk_src = (type *)(src); \
|
||||
int __gk_err = 0; \
|
||||
\
|
||||
switch (sizeof(type)) { \
|
||||
case 1: \
|
||||
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
|
||||
u8, b, d, -EFAULT); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
|
||||
u16, w, r, -EFAULT); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
|
||||
u32, l, r, -EFAULT); \
|
||||
break; \
|
||||
case 8: \
|
||||
__get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
if (unlikely(__gk_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
type __pk_src = *(type *)(src); \
|
||||
type *__pk_dst = (type *)(dst); \
|
||||
int __pk_err = 0; \
|
||||
\
|
||||
switch (sizeof(type)) { \
|
||||
case 1: \
|
||||
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
|
||||
b, d, -EFAULT); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
|
||||
w, r, -EFAULT); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
|
||||
l, r, -EFAULT); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_asm8("move", __pk_err, __pk_src, __pk_dst); \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
if (unlikely(__pk_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
|
||||
extern long strncpy_from_user(char *dst, const char __user *src, long count);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
@ -31,7 +31,7 @@ int main(void)
|
||||
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
|
||||
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
|
||||
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
|
||||
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
|
||||
DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
|
||||
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
|
||||
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
|
||||
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
@ -78,20 +77,38 @@ ENTRY(__sys_clone3)
|
||||
|
||||
ENTRY(sys_sigreturn)
|
||||
SAVE_SWITCH_STACK
|
||||
movel %sp,%sp@- | switch_stack pointer
|
||||
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
|
||||
movel %sp,%a1 | switch_stack pointer
|
||||
lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
|
||||
lea %sp@(-84),%sp | leave a gap
|
||||
movel %a1,%sp@-
|
||||
movel %a0,%sp@-
|
||||
jbsr do_sigreturn
|
||||
addql #8,%sp
|
||||
RESTORE_SWITCH_STACK
|
||||
rts
|
||||
jra 1f | shared with rt_sigreturn()
|
||||
|
||||
ENTRY(sys_rt_sigreturn)
|
||||
SAVE_SWITCH_STACK
|
||||
movel %sp,%sp@- | switch_stack pointer
|
||||
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
|
||||
movel %sp,%a1 | switch_stack pointer
|
||||
lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
|
||||
lea %sp@(-84),%sp | leave a gap
|
||||
movel %a1,%sp@-
|
||||
movel %a0,%sp@-
|
||||
| stack contents:
|
||||
| [original pt_regs address] [original switch_stack address]
|
||||
| [gap] [switch_stack] [pt_regs] [exception frame]
|
||||
jbsr do_rt_sigreturn
|
||||
addql #8,%sp
|
||||
|
||||
1:
|
||||
| stack contents now:
|
||||
| [original pt_regs address] [original switch_stack address]
|
||||
| [unused part of the gap] [moved switch_stack] [moved pt_regs]
|
||||
| [replacement exception frame]
|
||||
| return value of do_{rt_,}sigreturn() points to moved switch_stack.
|
||||
|
||||
movel %d0,%sp | discard the leftover junk
|
||||
RESTORE_SWITCH_STACK
|
||||
| stack contents now is just [syscall return address] [pt_regs] [frame]
|
||||
| return pt_regs.d0
|
||||
movel %sp@(PT_OFF_D0+4),%d0
|
||||
rts
|
||||
|
||||
ENTRY(buserr)
|
||||
@ -182,25 +199,6 @@ do_trace_exit:
|
||||
addql #4,%sp
|
||||
jra .Lret_from_exception
|
||||
|
||||
ENTRY(ret_from_signal)
|
||||
movel %curptr@(TASK_STACK),%a1
|
||||
tstb %a1@(TINFO_FLAGS+2)
|
||||
jge 1f
|
||||
jbsr syscall_trace
|
||||
1: RESTORE_SWITCH_STACK
|
||||
addql #4,%sp
|
||||
/* on 68040 complete pending writebacks if any */
|
||||
#ifdef CONFIG_M68040
|
||||
bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
|
||||
subql #7,%d0 | bus error frame ?
|
||||
jbne 1f
|
||||
movel %sp,%sp@-
|
||||
jbsr berr_040cleanup
|
||||
addql #4,%sp
|
||||
1:
|
||||
#endif
|
||||
jra .Lret_from_exception
|
||||
|
||||
ENTRY(system_call)
|
||||
SAVE_ALL_SYS
|
||||
|
||||
@ -338,7 +336,7 @@ resume:
|
||||
|
||||
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
|
||||
movec %sfc,%d0
|
||||
movew %d0,%a0@(TASK_THREAD+THREAD_FS)
|
||||
movew %d0,%a0@(TASK_THREAD+THREAD_FC)
|
||||
|
||||
/* save usp */
|
||||
/* it is better to use a movel here instead of a movew 8*) */
|
||||
@ -424,7 +422,7 @@ resume:
|
||||
movel %a0,%usp
|
||||
|
||||
/* restore fs (sfc,%dfc) */
|
||||
movew %a1@(TASK_THREAD+THREAD_FS),%a0
|
||||
movew %a1@(TASK_THREAD+THREAD_FC),%a0
|
||||
movec %a0,%sfc
|
||||
movec %a0,%dfc
|
||||
|
||||
|
@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs)
|
||||
|
||||
void flush_thread(void)
|
||||
{
|
||||
current->thread.fs = __USER_DS;
|
||||
current->thread.fc = USER_DATA;
|
||||
#ifdef CONFIG_FPU
|
||||
if (!FPU_IS_EMU) {
|
||||
unsigned long zero = 0;
|
||||
@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
* Must save the current SFC/DFC value, NOT the value when
|
||||
* the parent was last descheduled - RGH 10-08-96
|
||||
*/
|
||||
p->thread.fs = get_fs().seg;
|
||||
p->thread.fc = USER_DATA;
|
||||
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
/* kernel thread */
|
||||
|
@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
|
||||
|
||||
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
|
||||
fpu_version = sc->sc_fpstate[0];
|
||||
if (CPU_IS_020_OR_030 &&
|
||||
if (CPU_IS_020_OR_030 && !regs->stkadj &&
|
||||
regs->vector >= (VEC_FPBRUC * 4) &&
|
||||
regs->vector <= (VEC_FPNAN * 4)) {
|
||||
/* Clear pending exception in 68882 idle frame */
|
||||
@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
|
||||
if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
|
||||
context_size = fpstate[1];
|
||||
fpu_version = fpstate[0];
|
||||
if (CPU_IS_020_OR_030 &&
|
||||
if (CPU_IS_020_OR_030 && !regs->stkadj &&
|
||||
regs->vector >= (VEC_FPBRUC * 4) &&
|
||||
regs->vector <= (VEC_FPNAN * 4)) {
|
||||
/* Clear pending exception in 68882 idle frame */
|
||||
@ -641,56 +641,35 @@ static inline void siginfo_build_tests(void)
|
||||
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
|
||||
void __user *fp)
|
||||
{
|
||||
int fsize = frame_extra_sizes(formatvec >> 12);
|
||||
if (fsize < 0) {
|
||||
int extra = frame_extra_sizes(formatvec >> 12);
|
||||
char buf[sizeof_field(struct frame, un)];
|
||||
|
||||
if (extra < 0) {
|
||||
/*
|
||||
* user process trying to return with weird frame format
|
||||
*/
|
||||
pr_debug("user process returning with weird frame format\n");
|
||||
return 1;
|
||||
return -1;
|
||||
}
|
||||
if (!fsize) {
|
||||
if (extra && copy_from_user(buf, fp, extra))
|
||||
return -1;
|
||||
regs->format = formatvec >> 12;
|
||||
regs->vector = formatvec & 0xfff;
|
||||
} else {
|
||||
struct switch_stack *sw = (struct switch_stack *)regs - 1;
|
||||
/* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */
|
||||
unsigned long buf[sizeof_field(struct frame, un) / 2];
|
||||
if (extra) {
|
||||
void *p = (struct switch_stack *)regs - 1;
|
||||
struct frame *new = (void *)regs - extra;
|
||||
int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
|
||||
|
||||
/* that'll make sure that expansion won't crap over data */
|
||||
if (copy_from_user(buf + fsize / 4, fp, fsize))
|
||||
return 1;
|
||||
|
||||
/* point of no return */
|
||||
regs->format = formatvec >> 12;
|
||||
regs->vector = formatvec & 0xfff;
|
||||
#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
|
||||
__asm__ __volatile__ (
|
||||
#ifdef CONFIG_COLDFIRE
|
||||
" movel %0,%/sp\n\t"
|
||||
" bra ret_from_signal\n"
|
||||
#else
|
||||
" movel %0,%/a0\n\t"
|
||||
" subl %1,%/a0\n\t" /* make room on stack */
|
||||
" movel %/a0,%/sp\n\t" /* set stack pointer */
|
||||
/* move switch_stack and pt_regs */
|
||||
"1: movel %0@+,%/a0@+\n\t"
|
||||
" dbra %2,1b\n\t"
|
||||
" lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
|
||||
" lsrl #2,%1\n\t"
|
||||
" subql #1,%1\n\t"
|
||||
/* copy to the gap we'd made */
|
||||
"2: movel %4@+,%/a0@+\n\t"
|
||||
" dbra %1,2b\n\t"
|
||||
" bral ret_from_signal\n"
|
||||
memmove(p - extra, p, size);
|
||||
memcpy(p - extra + size, buf, extra);
|
||||
current->thread.esp0 = (unsigned long)&new->ptregs;
|
||||
#ifdef CONFIG_M68040
|
||||
/* on 68040 complete pending writebacks if any */
|
||||
if (new->ptregs.format == 7) // bus error frame
|
||||
berr_040cleanup(new);
|
||||
#endif
|
||||
: /* no outputs, it doesn't ever return */
|
||||
: "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
|
||||
"n" (frame_offset), "a" (buf + fsize/4)
|
||||
: "a0");
|
||||
#undef frame_offset
|
||||
}
|
||||
return 0;
|
||||
return extra;
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -698,7 +677,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
|
||||
{
|
||||
int formatvec;
|
||||
struct sigcontext context;
|
||||
int err = 0;
|
||||
|
||||
siginfo_build_tests();
|
||||
|
||||
@ -707,7 +685,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
|
||||
|
||||
/* get previous context */
|
||||
if (copy_from_user(&context, usc, sizeof(context)))
|
||||
goto badframe;
|
||||
return -1;
|
||||
|
||||
/* restore passed registers */
|
||||
regs->d0 = context.sc_d0;
|
||||
@ -720,15 +698,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
|
||||
wrusp(context.sc_usp);
|
||||
formatvec = context.sc_formatvec;
|
||||
|
||||
err = restore_fpu_state(&context);
|
||||
if (restore_fpu_state(&context))
|
||||
return -1;
|
||||
|
||||
if (err || mangle_kernel_stack(regs, formatvec, fp))
|
||||
goto badframe;
|
||||
|
||||
return 0;
|
||||
|
||||
badframe:
|
||||
return 1;
|
||||
return mangle_kernel_stack(regs, formatvec, fp);
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -745,7 +718,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
|
||||
|
||||
err = __get_user(temp, &uc->uc_mcontext.version);
|
||||
if (temp != MCONTEXT_VERSION)
|
||||
goto badframe;
|
||||
return -1;
|
||||
/* restore passed registers */
|
||||
err |= __get_user(regs->d0, &gregs[0]);
|
||||
err |= __get_user(regs->d1, &gregs[1]);
|
||||
@ -774,22 +747,17 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
|
||||
err |= restore_altstack(&uc->uc_stack);
|
||||
|
||||
if (err)
|
||||
goto badframe;
|
||||
return -1;
|
||||
|
||||
if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
|
||||
goto badframe;
|
||||
|
||||
return 0;
|
||||
|
||||
badframe:
|
||||
return 1;
|
||||
return mangle_kernel_stack(regs, temp, &uc->uc_extra);
|
||||
}
|
||||
|
||||
asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
|
||||
asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
|
||||
{
|
||||
unsigned long usp = rdusp();
|
||||
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
|
||||
sigset_t set;
|
||||
int size;
|
||||
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
@ -801,20 +769,22 @@ asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
|
||||
|
||||
set_current_blocked(&set);
|
||||
|
||||
if (restore_sigcontext(regs, &frame->sc, frame + 1))
|
||||
size = restore_sigcontext(regs, &frame->sc, frame + 1);
|
||||
if (size < 0)
|
||||
goto badframe;
|
||||
return regs->d0;
|
||||
return (void *)sw - size;
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV);
|
||||
return 0;
|
||||
return sw;
|
||||
}
|
||||
|
||||
asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
|
||||
asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
|
||||
{
|
||||
unsigned long usp = rdusp();
|
||||
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
|
||||
sigset_t set;
|
||||
int size;
|
||||
|
||||
if (!access_ok(frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
@ -823,27 +793,34 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
|
||||
|
||||
set_current_blocked(&set);
|
||||
|
||||
if (rt_restore_ucontext(regs, sw, &frame->uc))
|
||||
size = rt_restore_ucontext(regs, sw, &frame->uc);
|
||||
if (size < 0)
|
||||
goto badframe;
|
||||
return regs->d0;
|
||||
return (void *)sw - size;
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV);
|
||||
return 0;
|
||||
return sw;
|
||||
}
|
||||
|
||||
static inline struct pt_regs *rte_regs(struct pt_regs *regs)
|
||||
{
|
||||
return (void *)regs + regs->stkadj;
|
||||
}
|
||||
|
||||
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
|
||||
unsigned long mask)
|
||||
{
|
||||
struct pt_regs *tregs = rte_regs(regs);
|
||||
sc->sc_mask = mask;
|
||||
sc->sc_usp = rdusp();
|
||||
sc->sc_d0 = regs->d0;
|
||||
sc->sc_d1 = regs->d1;
|
||||
sc->sc_a0 = regs->a0;
|
||||
sc->sc_a1 = regs->a1;
|
||||
sc->sc_sr = regs->sr;
|
||||
sc->sc_pc = regs->pc;
|
||||
sc->sc_formatvec = regs->format << 12 | regs->vector;
|
||||
sc->sc_sr = tregs->sr;
|
||||
sc->sc_pc = tregs->pc;
|
||||
sc->sc_formatvec = tregs->format << 12 | tregs->vector;
|
||||
save_a5_state(sc, regs);
|
||||
save_fpu_state(sc, regs);
|
||||
}
|
||||
@ -851,6 +828,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
|
||||
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
|
||||
{
|
||||
struct switch_stack *sw = (struct switch_stack *)regs - 1;
|
||||
struct pt_regs *tregs = rte_regs(regs);
|
||||
greg_t __user *gregs = uc->uc_mcontext.gregs;
|
||||
int err = 0;
|
||||
|
||||
@ -871,9 +849,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
|
||||
err |= __put_user(sw->a5, &gregs[13]);
|
||||
err |= __put_user(sw->a6, &gregs[14]);
|
||||
err |= __put_user(rdusp(), &gregs[15]);
|
||||
err |= __put_user(regs->pc, &gregs[16]);
|
||||
err |= __put_user(regs->sr, &gregs[17]);
|
||||
err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
|
||||
err |= __put_user(tregs->pc, &gregs[16]);
|
||||
err |= __put_user(tregs->sr, &gregs[17]);
|
||||
err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
|
||||
err |= rt_save_fpu_state(uc, regs);
|
||||
return err;
|
||||
}
|
||||
@ -890,13 +868,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct sigframe __user *frame;
|
||||
int fsize = frame_extra_sizes(regs->format);
|
||||
struct pt_regs *tregs = rte_regs(regs);
|
||||
int fsize = frame_extra_sizes(tregs->format);
|
||||
struct sigcontext context;
|
||||
int err = 0, sig = ksig->sig;
|
||||
|
||||
if (fsize < 0) {
|
||||
pr_debug("setup_frame: Unknown frame format %#x\n",
|
||||
regs->format);
|
||||
tregs->format);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -907,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
||||
|
||||
err |= __put_user(sig, &frame->sig);
|
||||
|
||||
err |= __put_user(regs->vector, &frame->code);
|
||||
err |= __put_user(tregs->vector, &frame->code);
|
||||
err |= __put_user(&frame->sc, &frame->psc);
|
||||
|
||||
if (_NSIG_WORDS > 1)
|
||||
@ -933,34 +912,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
||||
|
||||
push_cache ((unsigned long) &frame->retcode);
|
||||
|
||||
/*
|
||||
* Set up registers for signal handler. All the state we are about
|
||||
* to destroy is successfully copied to sigframe.
|
||||
*/
|
||||
wrusp ((unsigned long) frame);
|
||||
regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
||||
adjustformat(regs);
|
||||
|
||||
/*
|
||||
* This is subtle; if we build more than one sigframe, all but the
|
||||
* first one will see frame format 0 and have fsize == 0, so we won't
|
||||
* screw stkadj.
|
||||
*/
|
||||
if (fsize)
|
||||
if (fsize) {
|
||||
regs->stkadj = fsize;
|
||||
|
||||
/* Prepare to skip over the extra stuff in the exception frame. */
|
||||
if (regs->stkadj) {
|
||||
struct pt_regs *tregs =
|
||||
(struct pt_regs *)((ulong)regs + regs->stkadj);
|
||||
tregs = rte_regs(regs);
|
||||
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
|
||||
/* This must be copied with decreasing addresses to
|
||||
handle overlaps. */
|
||||
tregs->vector = 0;
|
||||
tregs->format = 0;
|
||||
tregs->pc = regs->pc;
|
||||
tregs->sr = regs->sr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up registers for signal handler. All the state we are about
|
||||
* to destroy is successfully copied to sigframe.
|
||||
*/
|
||||
wrusp ((unsigned long) frame);
|
||||
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
||||
adjustformat(regs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -968,7 +941,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct rt_sigframe __user *frame;
|
||||
int fsize = frame_extra_sizes(regs->format);
|
||||
struct pt_regs *tregs = rte_regs(regs);
|
||||
int fsize = frame_extra_sizes(tregs->format);
|
||||
int err = 0, sig = ksig->sig;
|
||||
|
||||
if (fsize < 0) {
|
||||
@ -1018,34 +992,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
|
||||
push_cache ((unsigned long) &frame->retcode);
|
||||
|
||||
/*
|
||||
* Set up registers for signal handler. All the state we are about
|
||||
* to destroy is successfully copied to sigframe.
|
||||
*/
|
||||
wrusp ((unsigned long) frame);
|
||||
regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
||||
adjustformat(regs);
|
||||
|
||||
/*
|
||||
* This is subtle; if we build more than one sigframe, all but the
|
||||
* first one will see frame format 0 and have fsize == 0, so we won't
|
||||
* screw stkadj.
|
||||
*/
|
||||
if (fsize)
|
||||
if (fsize) {
|
||||
regs->stkadj = fsize;
|
||||
|
||||
/* Prepare to skip over the extra stuff in the exception frame. */
|
||||
if (regs->stkadj) {
|
||||
struct pt_regs *tregs =
|
||||
(struct pt_regs *)((ulong)regs + regs->stkadj);
|
||||
tregs = rte_regs(regs);
|
||||
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
|
||||
/* This must be copied with decreasing addresses to
|
||||
handle overlaps. */
|
||||
tregs->vector = 0;
|
||||
tregs->format = 0;
|
||||
tregs->pc = regs->pc;
|
||||
tregs->sr = regs->sr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up registers for signal handler. All the state we are about
|
||||
* to destroy is successfully copied to sigframe.
|
||||
*/
|
||||
wrusp ((unsigned long) frame);
|
||||
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
||||
adjustformat(regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp)
|
||||
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
|
||||
{
|
||||
unsigned long mmusr;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
set_fs(MAKE_MM_SEG(wbs));
|
||||
set_fc(wbs);
|
||||
|
||||
if (iswrite)
|
||||
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
|
||||
@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
|
||||
|
||||
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
|
||||
|
||||
set_fs(old_fs);
|
||||
set_fc(USER_DATA);
|
||||
|
||||
return mmusr;
|
||||
}
|
||||
@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
|
||||
unsigned long wbd)
|
||||
{
|
||||
int res = 0;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
/* set_fs can not be moved, otherwise put_user() may oops */
|
||||
set_fs(MAKE_MM_SEG(wbs));
|
||||
set_fc(wbs);
|
||||
|
||||
switch (wbs & WBSIZ_040) {
|
||||
case BA_SIZE_BYTE:
|
||||
@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
|
||||
break;
|
||||
}
|
||||
|
||||
/* set_fs can not be moved, otherwise put_user() may oops */
|
||||
set_fs(old_fs);
|
||||
|
||||
set_fc(USER_DATA);
|
||||
|
||||
pr_debug("do_040writeback1, res=%d\n", res);
|
||||
|
||||
|
@ -18,7 +18,6 @@
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/macintosh.h>
|
||||
#include <asm/mac_via.h>
|
||||
|
@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
|
||||
if (mmusr & MMU_R_040)
|
||||
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
|
||||
} else {
|
||||
unsigned short mmusr;
|
||||
unsigned long *descaddr;
|
||||
|
||||
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
|
||||
"pmove %%psr,%1"
|
||||
: "=a&" (descaddr), "=m" (mmusr)
|
||||
: "a" (vaddr), "d" (get_fs().seg));
|
||||
if (mmusr & (MMU_I|MMU_B|MMU_L))
|
||||
return 0;
|
||||
descaddr = phys_to_virt((unsigned long)descaddr);
|
||||
switch (mmusr & MMU_NUM) {
|
||||
case 1:
|
||||
return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
|
||||
case 2:
|
||||
return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
|
||||
case 3:
|
||||
return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
|
||||
}
|
||||
WARN_ON_ONCE(!CPU_IS_040_OR_060);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr)
|
||||
|
||||
void flush_icache_range(unsigned long address, unsigned long endaddr)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
set_fc(SUPER_DATA);
|
||||
flush_icache_user_range(address, endaddr);
|
||||
set_fs(old_fs);
|
||||
set_fc(USER_DATA);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
|
@ -72,12 +72,6 @@ void __init paging_init(void)
|
||||
if (!empty_zero_page)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
||||
__func__, PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Set up SFC/DFC registers (user data space).
|
||||
*/
|
||||
set_fs (USER_DS);
|
||||
|
||||
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
|
||||
free_area_init(max_zone_pfn);
|
||||
}
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/machdep.h>
|
||||
|
@ -467,7 +467,7 @@ void __init paging_init(void)
|
||||
/*
|
||||
* Set up SFC/DFC registers
|
||||
*/
|
||||
set_fs(KERNEL_DS);
|
||||
set_fc(USER_DATA);
|
||||
|
||||
#ifdef DEBUG
|
||||
printk ("before free_area_init\n");
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <asm/intersil.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/sun3ints.h>
|
||||
|
||||
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
|
||||
@ -89,7 +88,7 @@ void __init sun3_init(void)
|
||||
sun3_reserved_pmeg[249] = 1;
|
||||
sun3_reserved_pmeg[252] = 1;
|
||||
sun3_reserved_pmeg[253] = 1;
|
||||
set_fs(KERNEL_DS);
|
||||
set_fc(USER_DATA);
|
||||
}
|
||||
|
||||
/* Without this, Bad Things happen when something calls arch_reset. */
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sun3mmu.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/dvma.h>
|
||||
@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end)
|
||||
for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
|
||||
sun3_put_segmap(seg, SUN3_INVALID_PMEG);
|
||||
|
||||
set_fs(MAKE_MM_SEG(3));
|
||||
set_fc(3);
|
||||
for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
|
||||
i = sun3_get_segmap(seg);
|
||||
for(j = 1; j < CONTEXTS_NUM; j++)
|
||||
(*(romvec->pv_setctxt))(j, (void *)seg, i);
|
||||
}
|
||||
set_fs(KERNEL_DS);
|
||||
|
||||
set_fc(USER_DATA);
|
||||
}
|
||||
|
||||
/* erase the mappings for a dead context. Uses the pg_dir for hints
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/intersil.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/sun3ints.h>
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <asm/traps.h>
|
||||
#include <asm/sun3xprom.h>
|
||||
#include <asm/idprom.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/sun3ints.h>
|
||||
#include <asm/openprom.h>
|
||||
#include <asm/machines.h>
|
||||
|
@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
|
||||
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
|
||||
func##_positive)
|
||||
|
||||
static bool is_bad_offset(int b_off)
|
||||
{
|
||||
return b_off > 0x1ffff || b_off < -0x20000;
|
||||
}
|
||||
|
||||
static int build_body(struct jit_ctx *ctx)
|
||||
{
|
||||
const struct bpf_prog *prog = ctx->skf;
|
||||
@ -728,7 +733,10 @@ load_common:
|
||||
/* Load return register on DS for failures */
|
||||
emit_reg_move(r_ret, r_zero, ctx);
|
||||
/* Return with error */
|
||||
emit_b(b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
emit_nop(ctx);
|
||||
break;
|
||||
case BPF_LD | BPF_W | BPF_IND:
|
||||
@ -775,8 +783,10 @@ load_ind:
|
||||
emit_jalr(MIPS_R_RA, r_s0, ctx);
|
||||
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
|
||||
/* Check the error value */
|
||||
emit_bcond(MIPS_COND_NE, r_ret, 0,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
|
||||
emit_reg_move(r_ret, r_zero, ctx);
|
||||
/* We are good */
|
||||
/* X <- P[1:K] & 0xf */
|
||||
@ -855,8 +865,10 @@ load_ind:
|
||||
/* A /= X */
|
||||
ctx->flags |= SEEN_X | SEEN_A;
|
||||
/* Check if r_X is zero */
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
|
||||
emit_load_imm(r_ret, 0, ctx); /* delay slot */
|
||||
emit_div(r_A, r_X, ctx);
|
||||
break;
|
||||
@ -864,8 +876,10 @@ load_ind:
|
||||
/* A %= X */
|
||||
ctx->flags |= SEEN_X | SEEN_A;
|
||||
/* Check if r_X is zero */
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
|
||||
emit_load_imm(r_ret, 0, ctx); /* delay slot */
|
||||
emit_mod(r_A, r_X, ctx);
|
||||
break;
|
||||
@ -926,7 +940,10 @@ load_ind:
|
||||
break;
|
||||
case BPF_JMP | BPF_JA:
|
||||
/* pc += K */
|
||||
emit_b(b_imm(i + k + 1, ctx), ctx);
|
||||
b_off = b_imm(i + k + 1, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
emit_nop(ctx);
|
||||
break;
|
||||
case BPF_JMP | BPF_JEQ | BPF_K:
|
||||
@ -1056,12 +1073,16 @@ jmp_cmp:
|
||||
break;
|
||||
case BPF_RET | BPF_A:
|
||||
ctx->flags |= SEEN_A;
|
||||
if (i != prog->len - 1)
|
||||
if (i != prog->len - 1) {
|
||||
/*
|
||||
* If this is not the last instruction
|
||||
* then jump to the epilogue
|
||||
*/
|
||||
emit_b(b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
}
|
||||
emit_reg_move(r_ret, r_A, ctx); /* delay slot */
|
||||
break;
|
||||
case BPF_RET | BPF_K:
|
||||
@ -1075,7 +1096,10 @@ jmp_cmp:
|
||||
* If this is not the last instruction
|
||||
* then jump to the epilogue
|
||||
*/
|
||||
emit_b(b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
emit_nop(ctx);
|
||||
}
|
||||
break;
|
||||
@ -1133,8 +1157,10 @@ jmp_cmp:
|
||||
/* Load *dev pointer */
|
||||
emit_load_ptr(r_s0, r_skb, off, ctx);
|
||||
/* error (0) in the delay slot */
|
||||
emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
|
||||
emit_reg_move(r_ret, r_zero, ctx);
|
||||
if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
|
||||
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
|
||||
@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
|
||||
|
||||
/* Generate the actual JIT code */
|
||||
build_prologue(&ctx);
|
||||
build_body(&ctx);
|
||||
if (build_body(&ctx)) {
|
||||
module_memfree(ctx.target);
|
||||
goto out;
|
||||
}
|
||||
build_epilogue(&ctx);
|
||||
|
||||
/* Update the icache */
|
||||
|
@ -3,9 +3,10 @@
|
||||
config EARLY_PRINTK
|
||||
bool "Activate early kernel debugging"
|
||||
default y
|
||||
depends on TTY
|
||||
select SERIAL_CORE_CONSOLE
|
||||
help
|
||||
Enable early printk on console
|
||||
Enable early printk on console.
|
||||
This is useful for kernel debugging when your machine crashes very
|
||||
early before the console code is initialized.
|
||||
You should normally say N here, unless you want to debug such a crash.
|
||||
|
@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
|
||||
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
int dram_start;
|
||||
|
||||
console_verbose();
|
||||
|
||||
memory_start = memblock_start_of_DRAM();
|
||||
|
@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
do_signal(current);
|
||||
}
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long get_tm_stackpointer(struct task_struct *tsk)
|
||||
|
@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
|
||||
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
|
||||
set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
|
||||
set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
|
||||
}
|
||||
|
||||
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
|
||||
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
|
||||
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
|
||||
}
|
||||
|
||||
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
|
||||
|
@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
|
||||
kvm_s390_patch_guest_per_regs(vcpu);
|
||||
}
|
||||
|
||||
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
|
||||
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
|
||||
|
||||
vcpu->arch.sie_block->icptcode = 0;
|
||||
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
|
||||
|
@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
|
||||
return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
|
||||
}
|
||||
|
||||
static inline int kvm_is_ucontrol(struct kvm *kvm)
|
||||
|
@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
|
||||
|
||||
static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
{
|
||||
return (pmd_t *)pud_val(pud);
|
||||
return (pmd_t *)(unsigned long)pud_val(pud);
|
||||
}
|
||||
|
||||
/* only used by the stubbed out hugetlb gup code, should never be called */
|
||||
|
@ -2610,7 +2610,6 @@ config PCI_OLPC
|
||||
config PCI_XEN
|
||||
def_bool y
|
||||
depends on PCI && XEN
|
||||
select SWIOTLB_XEN
|
||||
|
||||
config MMCONF_FAM10H
|
||||
def_bool y
|
||||
|
@ -367,10 +367,11 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
|
||||
* %rdx: src (1..8 blocks)
|
||||
* %rcx: num blocks (1..8)
|
||||
*/
|
||||
FRAME_BEGIN
|
||||
|
||||
cmpq $5, %rcx;
|
||||
jb sm4_aesni_avx_crypt4;
|
||||
|
||||
FRAME_BEGIN
|
||||
|
||||
vmovdqu (0 * 16)(%rdx), RA0;
|
||||
vmovdqu (1 * 16)(%rdx), RA1;
|
||||
vmovdqu (2 * 16)(%rdx), RA2;
|
||||
|
@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
|
||||
struct kvm_page_track_notifier_node *node);
|
||||
};
|
||||
|
||||
void kvm_page_track_init(struct kvm *kvm);
|
||||
int kvm_page_track_init(struct kvm *kvm);
|
||||
void kvm_page_track_cleanup(struct kvm *kvm);
|
||||
|
||||
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
|
||||
|
@ -2,8 +2,6 @@
|
||||
#ifndef _ASM_X86_PKEYS_H
|
||||
#define _ASM_X86_PKEYS_H
|
||||
|
||||
#define ARCH_DEFAULT_PKEY 0
|
||||
|
||||
/*
|
||||
* If more than 16 keys are ever supported, a thorough audit
|
||||
* will be necessary to ensure that the types that store key
|
||||
|
@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
|
||||
{
|
||||
const struct { char _[64]; } *__src = src;
|
||||
struct { char _[64]; } __iomem *__dst = dst;
|
||||
int zf;
|
||||
bool zf;
|
||||
|
||||
/*
|
||||
* ENQCMDS %(rdx), rax
|
||||
|
@ -3,14 +3,10 @@
|
||||
#define _ASM_X86_SWIOTLB_XEN_H
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_XEN
|
||||
extern int xen_swiotlb;
|
||||
extern int __init pci_xen_swiotlb_detect(void);
|
||||
extern void __init pci_xen_swiotlb_init(void);
|
||||
extern int pci_xen_swiotlb_init_late(void);
|
||||
#else
|
||||
#define xen_swiotlb (0)
|
||||
static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
|
||||
static inline void __init pci_xen_swiotlb_init(void) { }
|
||||
#define pci_xen_swiotlb_detect NULL
|
||||
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
|
||||
#endif
|
||||
|
||||
|
@ -830,6 +830,20 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
x86_init.oem.arch_setup();
|
||||
|
||||
/*
|
||||
* Do some memory reservations *before* memory is added to memblock, so
|
||||
* memblock allocations won't overwrite it.
|
||||
*
|
||||
* After this point, everything still needed from the boot loader or
|
||||
* firmware or kernel text should be early reserved or marked not RAM in
|
||||
* e820. All other memory is free game.
|
||||
*
|
||||
* This call needs to happen before e820__memory_setup() which calls the
|
||||
* xen_memory_setup() on Xen dom0 which relies on the fact that those
|
||||
* early reservations have happened already.
|
||||
*/
|
||||
early_reserve_memory();
|
||||
|
||||
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
|
||||
e820__memory_setup();
|
||||
parse_setup_data();
|
||||
@ -876,18 +890,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
parse_early_param();
|
||||
|
||||
/*
|
||||
* Do some memory reservations *before* memory is added to
|
||||
* memblock, so memblock allocations won't overwrite it.
|
||||
* Do it after early param, so we could get (unlikely) panic from
|
||||
* serial.
|
||||
*
|
||||
* After this point everything still needed from the boot loader or
|
||||
* firmware or kernel text should be early reserved or marked not
|
||||
* RAM in e820. All other memory is free game.
|
||||
*/
|
||||
early_reserve_memory();
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
* Memory used by the kernel cannot be hot-removed because Linux
|
||||
|
@ -4206,7 +4206,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
|
||||
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
|
||||
|
||||
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
|
||||
return emulate_ud(ctxt);
|
||||
return emulate_gp(ctxt, 0);
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
|
||||
stimer_init(&hv_vcpu->stimer[i], i);
|
||||
|
||||
hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
|
||||
hv_vcpu->vp_index = vcpu->vcpu_idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
||||
switch (msr) {
|
||||
case HV_X64_MSR_VP_INDEX: {
|
||||
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
|
||||
int vcpu_idx = kvm_vcpu_get_idx(vcpu);
|
||||
u32 new_vp_index = (u32)data;
|
||||
|
||||
if (!host || new_vp_index >= KVM_MAX_VCPUS)
|
||||
@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
||||
* VP index is changing, adjust num_mismatched_vp_indexes if
|
||||
* it now matches or no longer matches vcpu_idx.
|
||||
*/
|
||||
if (hv_vcpu->vp_index == vcpu_idx)
|
||||
if (hv_vcpu->vp_index == vcpu->vcpu_idx)
|
||||
atomic_inc(&hv->num_mismatched_vp_indexes);
|
||||
else if (new_vp_index == vcpu_idx)
|
||||
else if (new_vp_index == vcpu->vcpu_idx)
|
||||
atomic_dec(&hv->num_mismatched_vp_indexes);
|
||||
|
||||
hv_vcpu->vp_index = new_vp_index;
|
||||
|
@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
|
||||
|
||||
return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
|
||||
return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
|
||||
}
|
||||
|
||||
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
|
||||
|
@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
unsigned index;
|
||||
bool mask_before, mask_after;
|
||||
union kvm_ioapic_redirect_entry *e;
|
||||
unsigned long vcpu_bitmap;
|
||||
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
|
||||
switch (ioapic->ioregsel) {
|
||||
case IOAPIC_REG_VERSION:
|
||||
@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
irq.shorthand = APIC_DEST_NOSHORT;
|
||||
irq.dest_id = e->fields.dest_id;
|
||||
irq.msi_redir_hint = false;
|
||||
bitmap_zero(&vcpu_bitmap, 16);
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
if (old_dest_mode != e->fields.dest_mode ||
|
||||
old_dest_id != e->fields.dest_id) {
|
||||
/*
|
||||
@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
kvm_lapic_irq_dest_mode(
|
||||
!!e->fields.dest_mode);
|
||||
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
}
|
||||
kvm_make_scan_ioapic_request_mask(ioapic->kvm,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
} else {
|
||||
kvm_make_scan_ioapic_request(ioapic->kvm);
|
||||
}
|
||||
|
@ -2027,8 +2027,8 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
|
||||
} while (!sp->unsync_children);
|
||||
}
|
||||
|
||||
static void mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *parent)
|
||||
static int mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *parent, bool can_yield)
|
||||
{
|
||||
int i;
|
||||
struct kvm_mmu_page *sp;
|
||||
@ -2055,12 +2055,18 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
|
||||
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
|
||||
if (!can_yield) {
|
||||
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
|
||||
flush = false;
|
||||
}
|
||||
}
|
||||
|
||||
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
|
||||
@ -2146,9 +2152,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
|
||||
}
|
||||
|
||||
if (sp->unsync_children)
|
||||
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
|
||||
|
||||
__clear_sp_write_flooding_count(sp);
|
||||
|
||||
trace_get_page:
|
||||
@ -3684,7 +3687,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
|
||||
|
||||
mmu_sync_children(vcpu, sp);
|
||||
mmu_sync_children(vcpu, sp, true);
|
||||
|
||||
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
@ -3700,7 +3703,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||||
if (IS_VALID_PAE_ROOT(root)) {
|
||||
root &= PT64_BASE_ADDR_MASK;
|
||||
sp = to_shadow_page(root);
|
||||
mmu_sync_children(vcpu, sp);
|
||||
mmu_sync_children(vcpu, sp, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,13 +164,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
|
||||
cleanup_srcu_struct(&head->track_srcu);
|
||||
}
|
||||
|
||||
void kvm_page_track_init(struct kvm *kvm)
|
||||
int kvm_page_track_init(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_page_track_notifier_head *head;
|
||||
|
||||
head = &kvm->arch.track_notifier_head;
|
||||
init_srcu_struct(&head->track_srcu);
|
||||
INIT_HLIST_HEAD(&head->track_notifier_list);
|
||||
return init_srcu_struct(&head->track_srcu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -707,8 +707,27 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
|
||||
if (!is_shadow_present_pte(*it.sptep)) {
|
||||
table_gfn = gw->table_gfn[it.level - 2];
|
||||
access = gw->pt_access[it.level - 2];
|
||||
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
|
||||
false, access);
|
||||
sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
|
||||
it.level-1, false, access);
|
||||
/*
|
||||
* We must synchronize the pagetable before linking it
|
||||
* because the guest doesn't need to flush tlb when
|
||||
* the gpte is changed from non-present to present.
|
||||
* Otherwise, the guest may use the wrong mapping.
|
||||
*
|
||||
* For PG_LEVEL_4K, kvm_mmu_get_page() has already
|
||||
* synchronized it transiently via kvm_sync_page().
|
||||
*
|
||||
* For higher level pagetable, we synchronize it via
|
||||
* the slower mmu_sync_children(). If it needs to
|
||||
* break, some progress has been made; return
|
||||
* RET_PF_RETRY and retry on the next #PF.
|
||||
* KVM_REQ_MMU_SYNC is not necessary but it
|
||||
* expedites the process.
|
||||
*/
|
||||
if (sp->unsync_children &&
|
||||
mmu_sync_children(vcpu, sp, false))
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1047,14 +1066,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
* Using the cached information from sp->gfns is safe because:
|
||||
* - The spte has a reference to the struct page, so the pfn for a given gfn
|
||||
* can't change unless all sptes pointing to it are nuked first.
|
||||
*
|
||||
* Note:
|
||||
* We should flush all tlbs if spte is dropped even though guest is
|
||||
* responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
|
||||
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
|
||||
* used by guest then tlbs are not flushed, so guest is allowed to access the
|
||||
* freed pages.
|
||||
* And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
|
||||
*/
|
||||
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
@ -1107,13 +1118,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
return 0;
|
||||
|
||||
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
|
||||
/*
|
||||
* Update spte before increasing tlbs_dirty to make
|
||||
* sure no tlb flush is lost after spte is zapped; see
|
||||
* the comments in kvm_flush_remote_tlbs().
|
||||
*/
|
||||
smp_wmb();
|
||||
vcpu->kvm->tlbs_dirty++;
|
||||
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1128,12 +1133,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
|
||||
if (gfn != sp->gfns[i]) {
|
||||
drop_spte(vcpu->kvm, &sp->spt[i]);
|
||||
/*
|
||||
* The same as above where we are doing
|
||||
* prefetch_invalid_gpte().
|
||||
*/
|
||||
smp_wmb();
|
||||
vcpu->kvm->tlbs_dirty++;
|
||||
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
||||
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
|
||||
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
|
||||
|
||||
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
|
||||
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
||||
svm->vmcb->control.int_state = svm->nested.ctl.int_state;
|
||||
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
|
||||
@ -579,7 +578,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
|
||||
}
|
||||
|
||||
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
|
||||
struct vmcb *vmcb12)
|
||||
struct vmcb *vmcb12, bool from_vmrun)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
int ret;
|
||||
@ -609,13 +608,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
|
||||
nested_vmcb02_prepare_save(svm, vmcb12);
|
||||
|
||||
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
|
||||
nested_npt_enabled(svm), true);
|
||||
nested_npt_enabled(svm), from_vmrun);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!npt_enabled)
|
||||
vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
|
||||
|
||||
if (!from_vmrun)
|
||||
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
||||
|
||||
svm_set_gif(svm, true);
|
||||
|
||||
return 0;
|
||||
@ -681,7 +683,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
|
||||
|
||||
svm->nested.nested_run_pending = 1;
|
||||
|
||||
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
|
||||
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
|
||||
goto out_exit_err;
|
||||
|
||||
if (nested_svm_vmrun_msrpm(svm))
|
||||
|
@ -595,20 +595,12 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
int *error)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
struct sev_data_launch_update_vmsa vmsa;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, ret;
|
||||
|
||||
if (!sev_es_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
||||
vmsa.reserved = 0;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
int ret;
|
||||
|
||||
/* Perform some pre-encryption checks against the VMSA */
|
||||
ret = sev_es_sync_vmsa(svm);
|
||||
@ -616,22 +608,37 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The LAUNCH_UPDATE_VMSA command will perform in-place
|
||||
* encryption of the VMSA memory content (i.e it will write
|
||||
* the same memory region with the guest's key), so invalidate
|
||||
* it first.
|
||||
* The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
|
||||
* the VMSA memory content (i.e it will write the same memory region
|
||||
* with the guest's key), so invalidate it first.
|
||||
*/
|
||||
clflush_cache_range(svm->vmsa, PAGE_SIZE);
|
||||
|
||||
vmsa.handle = sev->handle;
|
||||
vmsa.reserved = 0;
|
||||
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vmsa.address = __sme_pa(svm->vmsa);
|
||||
vmsa.len = PAGE_SIZE;
|
||||
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
|
||||
&argp->error);
|
||||
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, ret;
|
||||
|
||||
if (!sev_es_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
ret = mutex_lock_killable(&vcpu->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
svm->vcpu.arch.guest_state_protected = true;
|
||||
ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1397,8 +1404,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
|
||||
/* Bind ASID to this guest */
|
||||
ret = sev_bind_asid(kvm, start.handle, error);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
sev_decommission(start.handle);
|
||||
goto e_free_session;
|
||||
}
|
||||
|
||||
params.handle = start.handle;
|
||||
if (copy_to_user((void __user *)(uintptr_t)argp->data,
|
||||
@ -1464,7 +1473,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
|
||||
/* Pin guest memory */
|
||||
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
|
||||
PAGE_SIZE, &n, 0);
|
||||
PAGE_SIZE, &n, 1);
|
||||
if (IS_ERR(guest_page)) {
|
||||
ret = PTR_ERR(guest_page);
|
||||
goto e_free_trans;
|
||||
@ -1501,6 +1510,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
|
||||
}
|
||||
|
||||
static bool cmd_allowed_from_miror(u32 cmd_id)
|
||||
{
|
||||
/*
|
||||
* Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
|
||||
* active mirror VMs. Also allow the debugging and status commands.
|
||||
*/
|
||||
if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
|
||||
cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
|
||||
cmd_id == KVM_SEV_DBG_ENCRYPT)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
||||
{
|
||||
struct kvm_sev_cmd sev_cmd;
|
||||
@ -1517,8 +1540,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
/* enc_context_owner handles all memory enc operations */
|
||||
if (is_mirroring_enc_context(kvm)) {
|
||||
/* Only the enc_context_owner handles some memory enc operations. */
|
||||
if (is_mirroring_enc_context(kvm) &&
|
||||
!cmd_allowed_from_miror(sev_cmd.id)) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1715,8 +1739,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||
{
|
||||
struct file *source_kvm_file;
|
||||
struct kvm *source_kvm;
|
||||
struct kvm_sev_info *mirror_sev;
|
||||
unsigned int asid;
|
||||
struct kvm_sev_info source_sev, *mirror_sev;
|
||||
int ret;
|
||||
|
||||
source_kvm_file = fget(source_fd);
|
||||
@ -1739,7 +1762,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||
goto e_source_unlock;
|
||||
}
|
||||
|
||||
asid = to_kvm_svm(source_kvm)->sev_info.asid;
|
||||
memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
|
||||
sizeof(source_sev));
|
||||
|
||||
/*
|
||||
* The mirror kvm holds an enc_context_owner ref so its asid can't
|
||||
@ -1759,8 +1783,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||
/* Set enc_context_owner and copy its encryption context over */
|
||||
mirror_sev = &to_kvm_svm(kvm)->sev_info;
|
||||
mirror_sev->enc_context_owner = source_kvm;
|
||||
mirror_sev->asid = asid;
|
||||
mirror_sev->active = true;
|
||||
mirror_sev->asid = source_sev.asid;
|
||||
mirror_sev->fd = source_sev.fd;
|
||||
mirror_sev->es_active = source_sev.es_active;
|
||||
mirror_sev->handle = source_sev.handle;
|
||||
/*
|
||||
* Do not copy ap_jump_table. Since the mirror does not share the same
|
||||
* KVM contexts as the original, and they may have different
|
||||
* memory-views.
|
||||
*/
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
|
@ -1566,6 +1566,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
|
||||
|
||||
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
|
||||
V_IRQ_INJECTION_BITS_MASK;
|
||||
|
||||
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
||||
}
|
||||
|
||||
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
|
||||
@ -2222,6 +2224,10 @@ static int gp_interception(struct kvm_vcpu *vcpu)
|
||||
if (error_code)
|
||||
goto reinject;
|
||||
|
||||
/* All SVM instructions expect page aligned RAX */
|
||||
if (svm->vmcb->save.rax & ~PAGE_MASK)
|
||||
goto reinject;
|
||||
|
||||
/* Decode the instruction for usage later */
|
||||
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
|
||||
goto reinject;
|
||||
@ -4285,7 +4291,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
struct kvm_host_map map_save;
|
||||
int ret;
|
||||
|
||||
if (is_guest_mode(vcpu)) {
|
||||
if (!is_guest_mode(vcpu))
|
||||
return 0;
|
||||
|
||||
/* FED8h - SVM Guest */
|
||||
put_smstate(u64, smstate, 0x7ed8, 1);
|
||||
/* FEE0h - SVM Guest VMCB Physical Address */
|
||||
@ -4321,7 +4329,6 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
&svm->vmcb01.ptr->save);
|
||||
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4329,50 +4336,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct kvm_host_map map, map_save;
|
||||
int ret = 0;
|
||||
|
||||
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
|
||||
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
|
||||
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
|
||||
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
|
||||
u64 saved_efer, vmcb12_gpa;
|
||||
struct vmcb *vmcb12;
|
||||
int ret;
|
||||
|
||||
if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||
return 0;
|
||||
|
||||
/* Non-zero if SMI arrived while vCPU was in guest mode. */
|
||||
if (!GET_SMSTATE(u64, smstate, 0x7ed8))
|
||||
return 0;
|
||||
|
||||
if (guest) {
|
||||
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
|
||||
return 1;
|
||||
|
||||
saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
|
||||
if (!(saved_efer & EFER_SVME))
|
||||
return 1;
|
||||
|
||||
if (kvm_vcpu_map(vcpu,
|
||||
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
|
||||
vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
|
||||
return 1;
|
||||
|
||||
ret = 1;
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
|
||||
goto unmap_map;
|
||||
|
||||
if (svm_allocate_nested(svm))
|
||||
return 1;
|
||||
|
||||
vmcb12 = map.hva;
|
||||
|
||||
nested_load_control_from_vmcb12(svm, &vmcb12->control);
|
||||
|
||||
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
|
||||
kvm_vcpu_unmap(vcpu, &map, true);
|
||||
goto unmap_save;
|
||||
|
||||
/*
|
||||
* Restore L1 host state from L1 HSAVE area as VMCB01 was
|
||||
* used during SMM (see svm_enter_smm())
|
||||
*/
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
|
||||
&map_save) == -EINVAL)
|
||||
return 1;
|
||||
|
||||
svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
|
||||
map_save.hva + 0x400);
|
||||
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
|
||||
|
||||
/*
|
||||
* Enter the nested guest now
|
||||
*/
|
||||
|
||||
vmcb12 = map.hva;
|
||||
nested_load_control_from_vmcb12(svm, &vmcb12->control);
|
||||
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
|
||||
|
||||
unmap_save:
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
}
|
||||
}
|
||||
|
||||
unmap_map:
|
||||
kvm_vcpu_unmap(vcpu, &map, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
|
||||
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
|
||||
}
|
||||
|
||||
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
|
||||
int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
|
||||
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
|
||||
void svm_leave_nested(struct vcpu_svm *svm);
|
||||
void svm_free_nested(struct vcpu_svm *svm);
|
||||
int svm_allocate_nested(struct vcpu_svm *svm);
|
||||
|
@ -353,14 +353,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
|
||||
switch (msr_index) {
|
||||
case MSR_IA32_VMX_EXIT_CTLS:
|
||||
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
||||
ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_ENTRY_CTLS:
|
||||
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
||||
ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
||||
ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
|
||||
break;
|
||||
case MSR_IA32_VMX_PINBASED_CTLS:
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_VMFUNC:
|
||||
ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2583,8 +2583,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
||||
* Guest state is invalid and unrestricted guest is disabled,
|
||||
* which means L1 attempted VMEntry to L2 with invalid state.
|
||||
* Fail the VMEntry.
|
||||
*
|
||||
* However when force loading the guest state (SMM exit or
|
||||
* loading nested state after migration, it is possible to
|
||||
* have invalid guest state now, which will be later fixed by
|
||||
* restoring L2 register state
|
||||
*/
|
||||
if (CC(!vmx_guest_state_valid(vcpu))) {
|
||||
if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
|
||||
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -4351,6 +4356,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
||||
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
|
||||
vmcs12->vm_exit_msr_load_count))
|
||||
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
|
||||
|
||||
to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
|
||||
@ -4899,14 +4906,7 @@ out_vmcs02:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Emulate the VMXON instruction.
|
||||
* Currently, we just remember that VMX is active, and do not save or even
|
||||
* inspect the argument to VMXON (the so-called "VMXON pointer") because we
|
||||
* do not currently need to store anything in that guest-allocated memory
|
||||
* region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
|
||||
* argument is different from the VMXON pointer (which the spec says they do).
|
||||
*/
|
||||
/* Emulate the VMXON instruction. */
|
||||
static int handle_vmon(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
@ -5903,6 +5903,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
||||
case EXIT_REASON_VMFUNC:
|
||||
/* VM functions are emulated through L2->L0 vmexits. */
|
||||
return true;
|
||||
case EXIT_REASON_BUS_LOCK:
|
||||
/*
|
||||
* At present, bus lock VM exit is never exposed to L1.
|
||||
* Handle L2's bus locks in L0 directly.
|
||||
*/
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1323,7 +1323,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
vmx_prepare_switch_to_host(to_vmx(vcpu));
|
||||
}
|
||||
|
||||
static bool emulation_required(struct kvm_vcpu *vcpu)
|
||||
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
|
||||
}
|
||||
@ -1367,7 +1367,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
||||
vmcs_writel(GUEST_RFLAGS, rflags);
|
||||
|
||||
if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
|
||||
vmx->emulation_required = emulation_required(vcpu);
|
||||
vmx->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
|
||||
@ -1837,10 +1837,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
&msr_info->data))
|
||||
return 1;
|
||||
/*
|
||||
* Enlightened VMCS v1 doesn't have certain fields, but buggy
|
||||
* Hyper-V versions are still trying to use corresponding
|
||||
* features when they are exposed. Filter out the essential
|
||||
* minimum.
|
||||
* Enlightened VMCS v1 doesn't have certain VMCS fields but
|
||||
* instead of just ignoring the features, different Hyper-V
|
||||
* versions are either trying to use them and fail or do some
|
||||
* sanity checking and refuse to boot. Filter all unsupported
|
||||
* features out.
|
||||
*/
|
||||
if (!msr_info->host_initiated &&
|
||||
vmx->nested.enlightened_vmcs_enabled)
|
||||
@ -3077,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
}
|
||||
|
||||
/* depends on vcpu->arch.cr0 to be set to a new value */
|
||||
vmx->emulation_required = emulation_required(vcpu);
|
||||
vmx->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static int vmx_get_max_tdp_level(void)
|
||||
@ -3330,7 +3331,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int
|
||||
{
|
||||
__vmx_set_segment(vcpu, var, seg);
|
||||
|
||||
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
|
||||
to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
|
||||
@ -6621,10 +6622,24 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmx->loaded_vmcs->soft_vnmi_blocked))
|
||||
vmx->loaded_vmcs->entry_time = ktime_get();
|
||||
|
||||
/* Don't enter VMX if guest state is invalid, let the exit handler
|
||||
start emulation until we arrive back to a valid state */
|
||||
if (vmx->emulation_required)
|
||||
/*
|
||||
* Don't enter VMX if guest state is invalid, let the exit handler
|
||||
* start emulation until we arrive back to a valid state. Synthesize a
|
||||
* consistency check VM-Exit due to invalid guest state and bail.
|
||||
*/
|
||||
if (unlikely(vmx->emulation_required)) {
|
||||
|
||||
/* We don't emulate invalid state of a nested guest */
|
||||
vmx->fail = is_guest_mode(vcpu);
|
||||
|
||||
vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
|
||||
vmx->exit_reason.failed_vmentry = 1;
|
||||
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
|
||||
vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
|
||||
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
|
||||
vmx->exit_intr_info = 0;
|
||||
return EXIT_FASTPATH_NONE;
|
||||
}
|
||||
|
||||
trace_kvm_entry(vcpu);
|
||||
|
||||
|
@ -248,12 +248,8 @@ struct vcpu_vmx {
|
||||
* only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
|
||||
* of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
|
||||
* be loaded into hardware if those conditions aren't met.
|
||||
* nr_active_uret_msrs tracks the number of MSRs that need to be loaded
|
||||
* into hardware when running the guest. guest_uret_msrs[] is resorted
|
||||
* whenever the number of "active" uret MSRs is modified.
|
||||
*/
|
||||
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
|
||||
int nr_active_uret_msrs;
|
||||
bool guest_uret_msrs_loaded;
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 msr_host_kernel_gs_base;
|
||||
@ -359,6 +355,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
|
||||
unsigned long fs_base, unsigned long gs_base);
|
||||
int vmx_get_cpl(struct kvm_vcpu *vcpu);
|
||||
bool vmx_emulation_required(struct kvm_vcpu *vcpu);
|
||||
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
|
||||
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
|
||||
|
@ -1332,6 +1332,13 @@ static const u32 msrs_to_save_all[] = {
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
|
||||
|
||||
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
|
||||
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
|
||||
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
|
||||
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
|
||||
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
|
||||
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
|
||||
};
|
||||
|
||||
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
|
||||
@ -2969,7 +2976,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
if (vcpu->xen.vcpu_time_info_set)
|
||||
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
|
||||
if (v == kvm_get_vcpu(v->kvm, 0))
|
||||
if (!v->vcpu_idx)
|
||||
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
|
||||
return 0;
|
||||
}
|
||||
@ -7658,6 +7665,13 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
|
||||
|
||||
/* Process a latched INIT or SMI, if any. */
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
|
||||
/*
|
||||
* Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
|
||||
* on SMM exit we still need to reload them from
|
||||
* guest memory
|
||||
*/
|
||||
vcpu->arch.pdptrs_from_userspace = false;
|
||||
}
|
||||
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
@ -10652,6 +10666,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
int r;
|
||||
|
||||
vcpu->arch.last_vmentry_cpu = -1;
|
||||
vcpu->arch.regs_avail = ~0;
|
||||
vcpu->arch.regs_dirty = ~0;
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
@ -10893,6 +10909,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
|
||||
kvm_rip_write(vcpu, 0xfff0);
|
||||
|
||||
vcpu->arch.cr3 = 0;
|
||||
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
|
||||
|
||||
/*
|
||||
* CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
|
||||
* of Intel's SDM list CD/NW as being set on INIT, but they contradict
|
||||
@ -11139,9 +11158,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (type)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kvm_page_track_init(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
|
||||
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
||||
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
|
||||
@ -11174,7 +11199,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
kvm_apicv_init(kvm);
|
||||
kvm_hv_init_vm(kvm);
|
||||
kvm_page_track_init(kvm);
|
||||
kvm_mmu_init_vm(kvm);
|
||||
kvm_xen_init_vm(kvm);
|
||||
|
||||
|
@ -37,10 +37,10 @@
|
||||
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
|
||||
|
||||
#define __get_next(t, insn) \
|
||||
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
|
||||
({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
|
||||
|
||||
#define __peek_nbyte_next(t, insn, n) \
|
||||
({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
|
||||
({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
|
||||
|
||||
#define get_next(t, insn) \
|
||||
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
|
||||
|
@ -710,7 +710,8 @@ oops:
|
||||
|
||||
static noinline void
|
||||
kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address, int signal, int si_code)
|
||||
unsigned long address, int signal, int si_code,
|
||||
u32 pkey)
|
||||
{
|
||||
WARN_ON_ONCE(user_mode(regs));
|
||||
|
||||
@ -735,9 +736,13 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
|
||||
|
||||
set_signal_archinfo(address, error_code);
|
||||
|
||||
if (si_code == SEGV_PKUERR) {
|
||||
force_sig_pkuerr((void __user *)address, pkey);
|
||||
} else {
|
||||
/* XXX: hwpoison faults will set the wrong code. */
|
||||
force_sig_fault(signal, si_code, (void __user *)address);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Barring that, we can do the fixup and be happy.
|
||||
@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
if (!user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGSEGV, si_code, pkey);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
|
||||
{
|
||||
/* Kernel mode? Handle exceptions or die: */
|
||||
if (!user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1396,7 +1403,8 @@ good_area:
|
||||
*/
|
||||
if (!user_mode(regs))
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGBUS, BUS_ADRERR);
|
||||
SIGBUS, BUS_ADRERR,
|
||||
ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1416,7 +1424,8 @@ good_area:
|
||||
return;
|
||||
|
||||
if (fatal_signal_pending(current) && !user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
0, 0, ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1424,7 +1433,8 @@ good_area:
|
||||
/* Kernel mode? Handle exceptions or die: */
|
||||
if (!user_mode(regs)) {
|
||||
kernelmode_fixup_or_oops(regs, error_code, address,
|
||||
SIGSEGV, SEGV_MAPERR);
|
||||
SIGSEGV, SEGV_MAPERR,
|
||||
ARCH_DEFAULT_PKEY);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1334,9 +1334,10 @@ st: if (is_imm8(insn->off))
|
||||
if (insn->imm == (BPF_AND | BPF_FETCH) ||
|
||||
insn->imm == (BPF_OR | BPF_FETCH) ||
|
||||
insn->imm == (BPF_XOR | BPF_FETCH)) {
|
||||
u8 *branch_target;
|
||||
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
|
||||
u32 real_src_reg = src_reg;
|
||||
u32 real_dst_reg = dst_reg;
|
||||
u8 *branch_target;
|
||||
|
||||
/*
|
||||
* Can't be implemented with a single x86 insn.
|
||||
@ -1347,11 +1348,13 @@ st: if (is_imm8(insn->off))
|
||||
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
|
||||
if (src_reg == BPF_REG_0)
|
||||
real_src_reg = BPF_REG_AX;
|
||||
if (dst_reg == BPF_REG_0)
|
||||
real_dst_reg = BPF_REG_AX;
|
||||
|
||||
branch_target = prog;
|
||||
/* Load old value */
|
||||
emit_ldx(&prog, BPF_SIZE(insn->code),
|
||||
BPF_REG_0, dst_reg, insn->off);
|
||||
BPF_REG_0, real_dst_reg, insn->off);
|
||||
/*
|
||||
* Perform the (commutative) operation locally,
|
||||
* put the result in the AUX_REG.
|
||||
@ -1362,7 +1365,8 @@ st: if (is_imm8(insn->off))
|
||||
add_2reg(0xC0, AUX_REG, real_src_reg));
|
||||
/* Attempt to swap in new value */
|
||||
err = emit_atomic(&prog, BPF_CMPXCHG,
|
||||
dst_reg, AUX_REG, insn->off,
|
||||
real_dst_reg, AUX_REG,
|
||||
insn->off,
|
||||
BPF_SIZE(insn->code));
|
||||
if (WARN_ON(err))
|
||||
return err;
|
||||
@ -1376,7 +1380,6 @@ st: if (is_imm8(insn->off))
|
||||
/* Restore R0 after clobbering RAX */
|
||||
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
|
||||
@ -1737,7 +1740,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
|
||||
}
|
||||
|
||||
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_prog *p, int stack_size, bool mod_ret)
|
||||
struct bpf_prog *p, int stack_size, bool save_ret)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
u8 *jmp_insn;
|
||||
@ -1770,11 +1773,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
if (emit_call(&prog, p->bpf_func, prog))
|
||||
return -EINVAL;
|
||||
|
||||
/* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
|
||||
/*
|
||||
* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
|
||||
* of the previous call which is then passed on the stack to
|
||||
* the next BPF program.
|
||||
*
|
||||
* BPF_TRAMP_FENTRY trampoline may need to return the return
|
||||
* value of BPF_PROG_TYPE_STRUCT_OPS prog.
|
||||
*/
|
||||
if (mod_ret)
|
||||
if (save_ret)
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||
|
||||
/* replace 2 nops with JE insn, since jmp target is known */
|
||||
@ -1821,13 +1828,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
|
||||
}
|
||||
|
||||
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_tramp_progs *tp, int stack_size)
|
||||
struct bpf_tramp_progs *tp, int stack_size,
|
||||
bool save_ret)
|
||||
{
|
||||
int i;
|
||||
u8 *prog = *pprog;
|
||||
|
||||
for (i = 0; i < tp->nr_progs; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
|
||||
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
|
||||
save_ret))
|
||||
return -EINVAL;
|
||||
}
|
||||
*pprog = prog;
|
||||
@ -1870,6 +1879,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_valid_bpf_tramp_flags(unsigned int flags)
|
||||
{
|
||||
if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
|
||||
(flags & BPF_TRAMP_F_SKIP_FRAME))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
|
||||
* and it must be used alone.
|
||||
*/
|
||||
if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
|
||||
(flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Example:
|
||||
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
* its 'struct btf_func_model' will be nr_args=2
|
||||
@ -1942,17 +1968,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
|
||||
u8 **branches = NULL;
|
||||
u8 *prog;
|
||||
bool save_ret;
|
||||
|
||||
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
|
||||
if (nr_args > 6)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
|
||||
(flags & BPF_TRAMP_F_SKIP_FRAME))
|
||||
if (!is_valid_bpf_tramp_flags(flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG)
|
||||
stack_size += 8; /* room for return value of orig_call */
|
||||
/* room for return value of orig_call or fentry prog */
|
||||
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
|
||||
if (save_ret)
|
||||
stack_size += 8;
|
||||
|
||||
if (flags & BPF_TRAMP_F_IP_ARG)
|
||||
stack_size += 8; /* room for IP address argument */
|
||||
@ -1998,7 +2026,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
}
|
||||
|
||||
if (fentry->nr_progs)
|
||||
if (invoke_bpf(m, &prog, fentry, stack_size))
|
||||
if (invoke_bpf(m, &prog, fentry, stack_size,
|
||||
flags & BPF_TRAMP_F_RET_FENTRY_RET))
|
||||
return -EINVAL;
|
||||
|
||||
if (fmod_ret->nr_progs) {
|
||||
@ -2045,7 +2074,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
}
|
||||
|
||||
if (fexit->nr_progs)
|
||||
if (invoke_bpf(m, &prog, fexit, stack_size)) {
|
||||
if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
@ -2065,9 +2094,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
/* restore original return value back into RAX */
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
||||
}
|
||||
/* restore return value of orig_call or fentry prog back into RAX */
|
||||
if (save_ret)
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
||||
|
||||
EMIT1(0x5B); /* pop rbx */
|
||||
EMIT1(0xC9); /* leave */
|
||||
|
@ -755,8 +755,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void xen_convert_trap_info(const struct desc_ptr *desc,
|
||||
struct trap_info *traps)
|
||||
static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
|
||||
struct trap_info *traps, bool full)
|
||||
{
|
||||
unsigned in, out, count;
|
||||
|
||||
@ -766,17 +766,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
|
||||
for (in = out = 0; in < count; in++) {
|
||||
gate_desc *entry = (gate_desc *)(desc->address) + in;
|
||||
|
||||
if (cvt_gate_to_trap(in, entry, &traps[out]))
|
||||
if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
|
||||
out++;
|
||||
}
|
||||
traps[out].address = 0;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
void xen_copy_trap_info(struct trap_info *traps)
|
||||
{
|
||||
const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
|
||||
|
||||
xen_convert_trap_info(desc, traps);
|
||||
xen_convert_trap_info(desc, traps, true);
|
||||
}
|
||||
|
||||
/* Load a new IDT into Xen. In principle this can be per-CPU, so we
|
||||
@ -786,6 +787,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
|
||||
{
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
static struct trap_info traps[257];
|
||||
unsigned out;
|
||||
|
||||
trace_xen_cpu_load_idt(desc);
|
||||
|
||||
@ -793,7 +795,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
|
||||
|
||||
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
|
||||
|
||||
xen_convert_trap_info(desc, traps);
|
||||
out = xen_convert_trap_info(desc, traps, false);
|
||||
memset(&traps[out], 0, sizeof(traps[0]));
|
||||
|
||||
xen_mc_flush();
|
||||
if (HYPERVISOR_set_trap_table(traps))
|
||||
|
@ -18,7 +18,7 @@
|
||||
#endif
|
||||
#include <linux/export.h>
|
||||
|
||||
int xen_swiotlb __read_mostly;
|
||||
static int xen_swiotlb __read_mostly;
|
||||
|
||||
/*
|
||||
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
|
||||
@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
|
||||
return xen_swiotlb;
|
||||
}
|
||||
|
||||
void __init pci_xen_swiotlb_init(void)
|
||||
static void __init pci_xen_swiotlb_init(void)
|
||||
{
|
||||
if (xen_swiotlb) {
|
||||
xen_swiotlb_init_early();
|
||||
|
@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
gdt = get_cpu_gdt_rw(cpu);
|
||||
|
||||
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
|
||||
|
||||
/*
|
||||
* Bring up the CPU in cpu_bringup_and_idle() with the stack
|
||||
* pointing just below where pt_regs would be if it were a normal
|
||||
@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
xen_copy_trap_info(ctxt->trap_ctxt);
|
||||
|
||||
ctxt->ldt_ents = 0;
|
||||
|
||||
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
|
||||
|
||||
gdt_mfn = arbitrary_virt_to_mfn(gdt);
|
||||
|
@ -1466,7 +1466,7 @@ again:
|
||||
if (!bio_integrity_endio(bio))
|
||||
return;
|
||||
|
||||
if (bio->bi_bdev)
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
|
||||
rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
|
||||
|
||||
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
|
23
block/bsg.c
23
block/bsg.c
@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static void bsg_device_release(struct device *dev)
|
||||
{
|
||||
struct bsg_device *bd = container_of(dev, struct bsg_device, device);
|
||||
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
kfree(bd);
|
||||
}
|
||||
|
||||
void bsg_unregister_queue(struct bsg_device *bd)
|
||||
{
|
||||
if (bd->queue->kobj.sd)
|
||||
sysfs_remove_link(&bd->queue->kobj, "bsg");
|
||||
cdev_device_del(&bd->cdev, &bd->device);
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
kfree(bd);
|
||||
put_device(&bd->device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
|
||||
|
||||
@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOSPC)
|
||||
dev_err(parent, "bsg: too many bsg devices\n");
|
||||
goto out_kfree;
|
||||
kfree(bd);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
bd->device.devt = MKDEV(bsg_major, ret);
|
||||
bd->device.class = bsg_class;
|
||||
bd->device.parent = parent;
|
||||
bd->device.release = bsg_device_release;
|
||||
dev_set_name(&bd->device, "%s", name);
|
||||
device_initialize(&bd->device);
|
||||
|
||||
@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
bd->cdev.owner = THIS_MODULE;
|
||||
ret = cdev_device_add(&bd->cdev, &bd->device);
|
||||
if (ret)
|
||||
goto out_ida_remove;
|
||||
goto out_put_device;
|
||||
|
||||
if (q->kobj.sd) {
|
||||
ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
|
||||
@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
|
||||
out_device_del:
|
||||
cdev_device_del(&bd->cdev, &bd->device);
|
||||
out_ida_remove:
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
out_kfree:
|
||||
kfree(bd);
|
||||
out_put_device:
|
||||
put_device(&bd->device);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_register_queue);
|
||||
|
21
block/fops.c
21
block/fops.c
@ -14,6 +14,7 @@
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/fs.h>
|
||||
#include "blk.h"
|
||||
|
||||
static struct inode *bdev_file_inode(struct file *file)
|
||||
@ -553,7 +554,8 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
loff_t len)
|
||||
{
|
||||
struct block_device *bdev = I_BDEV(bdev_file_inode(file));
|
||||
struct inode *inode = bdev_file_inode(file);
|
||||
struct block_device *bdev = I_BDEV(inode);
|
||||
loff_t end = start + len - 1;
|
||||
loff_t isize;
|
||||
int error;
|
||||
@ -580,10 +582,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
error = truncate_bdev_range(bdev, file->f_mode, start, end);
|
||||
if (error)
|
||||
return error;
|
||||
goto fail;
|
||||
|
||||
switch (mode) {
|
||||
case FALLOC_FL_ZERO_RANGE:
|
||||
@ -600,17 +604,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
GFP_KERNEL, 0);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
error = -EOPNOTSUPP;
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Invalidate the page cache again; if someone wandered in and dirtied
|
||||
* a page, we just discard it - userspace has no way of knowing whether
|
||||
* the write happened before or after discard completing...
|
||||
*/
|
||||
return truncate_bdev_range(bdev, file->f_mode, start, end);
|
||||
fail:
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
return error;
|
||||
}
|
||||
|
||||
const struct file_operations def_blk_fops = {
|
||||
|
@ -284,8 +284,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
||||
#define should_use_kmap(pfn) page_is_ram(pfn)
|
||||
#endif
|
||||
|
||||
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
|
||||
bool memory)
|
||||
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
@ -295,8 +294,7 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
|
||||
return NULL;
|
||||
return (void __iomem __force *)kmap(pfn_to_page(pfn));
|
||||
} else
|
||||
return memory ? acpi_os_memmap(pg_off, pg_sz) :
|
||||
acpi_os_ioremap(pg_off, pg_sz);
|
||||
return acpi_os_ioremap(pg_off, pg_sz);
|
||||
}
|
||||
|
||||
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
||||
@ -311,10 +309,9 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __acpi_os_map_iomem - Get a virtual address for a given physical address range.
|
||||
* acpi_os_map_iomem - Get a virtual address for a given physical address range.
|
||||
* @phys: Start of the physical address range to map.
|
||||
* @size: Size of the physical address range to map.
|
||||
* @memory: true if remapping memory, false if IO
|
||||
*
|
||||
* Look up the given physical address range in the list of existing ACPI memory
|
||||
* mappings. If found, get a reference to it and return a pointer to it (its
|
||||
@ -324,8 +321,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
||||
* During early init (when acpi_permanent_mmap has not been set yet) this
|
||||
* routine simply calls __acpi_map_table() to get the job done.
|
||||
*/
|
||||
static void __iomem __ref
|
||||
*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
|
||||
void __iomem __ref
|
||||
*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
struct acpi_ioremap *map;
|
||||
void __iomem *virt;
|
||||
@ -356,7 +353,7 @@ static void __iomem __ref
|
||||
|
||||
pg_off = round_down(phys, PAGE_SIZE);
|
||||
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
|
||||
virt = acpi_map(phys, size, memory);
|
||||
virt = acpi_map(phys, size);
|
||||
if (!virt) {
|
||||
mutex_unlock(&acpi_ioremap_lock);
|
||||
kfree(map);
|
||||
@ -375,17 +372,11 @@ out:
|
||||
mutex_unlock(&acpi_ioremap_lock);
|
||||
return map->virt + (phys - map->phys);
|
||||
}
|
||||
|
||||
void __iomem *__ref
|
||||
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return __acpi_os_map_iomem(phys, size, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
|
||||
|
||||
void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return (void *)__acpi_os_map_iomem(phys, size, true);
|
||||
return (void *)acpi_os_map_iomem(phys, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
|
||||
|
||||
|
@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
|
||||
}
|
||||
|
||||
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
struct binder_thread *thread,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t failed_at,
|
||||
bool is_failure)
|
||||
@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||
&proc->alloc, &fd, buffer,
|
||||
offset, sizeof(fd));
|
||||
WARN_ON(err);
|
||||
if (!err)
|
||||
if (!err) {
|
||||
binder_deferred_fd_close(fd);
|
||||
/*
|
||||
* Need to make sure the thread goes
|
||||
* back to userspace to complete the
|
||||
* deferred close
|
||||
*/
|
||||
if (thread)
|
||||
thread->looper_need_return = true;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
if (reply) {
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (target_thread->is_dead || target_proc->is_frozen) {
|
||||
return_error = target_thread->is_dead ?
|
||||
BR_DEAD_REPLY : BR_FROZEN_REPLY;
|
||||
if (target_thread->is_dead) {
|
||||
return_error = BR_DEAD_REPLY;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
goto err_dead_proc_or_thread;
|
||||
}
|
||||
@ -3105,7 +3113,7 @@ err_bad_parent:
|
||||
err_copy_data_failed:
|
||||
binder_free_txn_fixups(t);
|
||||
trace_binder_transaction_failed_buffer_release(t->buffer);
|
||||
binder_transaction_buffer_release(target_proc, t->buffer,
|
||||
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
|
||||
buffer_offset, true);
|
||||
if (target_node)
|
||||
binder_dec_node_tmpref(target_node);
|
||||
@ -3184,7 +3192,9 @@ err_invalid_target_handle:
|
||||
* Cleanup buffer and free it.
|
||||
*/
|
||||
static void
|
||||
binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
|
||||
binder_free_buf(struct binder_proc *proc,
|
||||
struct binder_thread *thread,
|
||||
struct binder_buffer *buffer)
|
||||
{
|
||||
binder_inner_proc_lock(proc);
|
||||
if (buffer->transaction) {
|
||||
@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
|
||||
binder_node_inner_unlock(buf_node);
|
||||
}
|
||||
trace_binder_transaction_buffer_release(buffer);
|
||||
binder_transaction_buffer_release(proc, buffer, 0, false);
|
||||
binder_transaction_buffer_release(proc, thread, buffer, 0, false);
|
||||
binder_alloc_free_buf(&proc->alloc, buffer);
|
||||
}
|
||||
|
||||
@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
proc->pid, thread->pid, (u64)data_ptr,
|
||||
buffer->debug_id,
|
||||
buffer->transaction ? "active" : "finished");
|
||||
binder_free_buf(proc, buffer);
|
||||
binder_free_buf(proc, thread, buffer);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -4107,7 +4117,7 @@ retry:
|
||||
buffer->transaction = NULL;
|
||||
binder_cleanup_transaction(t, "fd fixups failed",
|
||||
BR_FAILED_REPLY);
|
||||
binder_free_buf(proc, buffer);
|
||||
binder_free_buf(proc, thread, buffer);
|
||||
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
||||
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
|
||||
proc->pid, thread->pid,
|
||||
@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
|
||||
{
|
||||
struct rb_node *n;
|
||||
struct binder_thread *thread;
|
||||
|
||||
if (proc->outstanding_txns > 0)
|
||||
return true;
|
||||
|
||||
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
|
||||
thread = rb_entry(n, struct binder_thread, rb_node);
|
||||
if (thread->transaction_stack)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int binder_ioctl_freeze(struct binder_freeze_info *info,
|
||||
struct binder_proc *target_proc)
|
||||
{
|
||||
@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
|
||||
(!target_proc->outstanding_txns),
|
||||
msecs_to_jiffies(info->timeout_ms));
|
||||
|
||||
if (!ret && target_proc->outstanding_txns)
|
||||
/* Check pending transactions that wait for reply */
|
||||
if (ret >= 0) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (binder_txns_pending_ilocked(target_proc))
|
||||
ret = -EAGAIN;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
|
||||
{
|
||||
struct binder_proc *target_proc;
|
||||
bool found = false;
|
||||
__u32 txns_pending;
|
||||
|
||||
info->sync_recv = 0;
|
||||
info->async_recv = 0;
|
||||
@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
|
||||
if (target_proc->pid == info->pid) {
|
||||
found = true;
|
||||
binder_inner_proc_lock(target_proc);
|
||||
info->sync_recv |= target_proc->sync_recv;
|
||||
txns_pending = binder_txns_pending_ilocked(target_proc);
|
||||
info->sync_recv |= target_proc->sync_recv |
|
||||
(txns_pending << 1);
|
||||
info->async_recv |= target_proc->async_recv;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
|
@ -378,6 +378,8 @@ struct binder_ref {
|
||||
* binder transactions
|
||||
* (protected by @inner_lock)
|
||||
* @sync_recv: process received sync transactions since last frozen
|
||||
* bit 0: received sync transaction after being frozen
|
||||
* bit 1: new pending sync transaction during freezing
|
||||
* (protected by @inner_lock)
|
||||
* @async_recv: process received async transactions since last frozen
|
||||
* (protected by @inner_lock)
|
||||
|
@ -1116,6 +1116,9 @@ int device_create_managed_software_node(struct device *dev,
|
||||
to_swnode(fwnode)->managed = true;
|
||||
set_secondary_fwnode(dev, fwnode);
|
||||
|
||||
if (device_is_registered(dev))
|
||||
software_node_notify(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_create_managed_software_node);
|
||||
|
@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
|
||||
mutex_lock(&dev->mutex);
|
||||
rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
|
||||
mutex_unlock(&dev->mutex);
|
||||
kfree(insns);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
in_place ? DMA_BIDIRECTIONAL
|
||||
: DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
goto e_ctx;
|
||||
goto e_aad;
|
||||
|
||||
if (in_place) {
|
||||
dst = src;
|
||||
@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
op.u.aes.size = 0;
|
||||
ret = cmd_q->ccp->vdata->perform->aes(&op);
|
||||
if (ret)
|
||||
goto e_dst;
|
||||
goto e_final_wa;
|
||||
|
||||
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
|
||||
/* Put the ciphered tag after the ciphertext. */
|
||||
@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
goto e_tag;
|
||||
goto e_final_wa;
|
||||
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
|
||||
if (ret)
|
||||
goto e_tag;
|
||||
if (ret) {
|
||||
ccp_dm_free(&tag);
|
||||
goto e_final_wa;
|
||||
}
|
||||
|
||||
ret = crypto_memneq(tag.address, final_wa.address,
|
||||
authsize) ? -EBADMSG : 0;
|
||||
ccp_dm_free(&tag);
|
||||
}
|
||||
|
||||
e_tag:
|
||||
e_final_wa:
|
||||
ccp_dm_free(&final_wa);
|
||||
|
||||
e_dst:
|
||||
|
@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
|
||||
dimm->grain = pvt->mem_width_in_bytes;
|
||||
dimm->dtype = dt;
|
||||
dimm->mtype = mt;
|
||||
dimm->edac_mode = EDAC_FLAG_SECDED;
|
||||
dimm->edac_mode = EDAC_SECDED;
|
||||
dimm->nr_pages = pages_per_rank / csi->nr_channels;
|
||||
}
|
||||
}
|
||||
|
@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
|
||||
|
||||
for (j = 0; j < csi->nr_channels; j++) {
|
||||
dimm = csi->channels[j]->dimm;
|
||||
dimm->edac_mode = EDAC_FLAG_SECDED;
|
||||
dimm->edac_mode = EDAC_SECDED;
|
||||
dimm->mtype = p_data->get_mtype(priv->baseaddr);
|
||||
dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
|
||||
dimm->grain = SYNPS_EDAC_ERR_GRAIN;
|
||||
|
@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo,
|
||||
{
|
||||
unsigned int irq_base, nr_irqs;
|
||||
struct dfl_feature_info *finfo;
|
||||
u8 revision = 0;
|
||||
int ret;
|
||||
u8 revision;
|
||||
u64 v;
|
||||
|
||||
if (fid != FEATURE_ID_AFU) {
|
||||
v = readq(binfo->ioaddr + ofst);
|
||||
revision = FIELD_GET(DFH_REVISION, v);
|
||||
|
||||
/* read feature size and id if inputs are invalid */
|
||||
size = size ? size : feature_size(v);
|
||||
fid = fid ? fid : feature_id(v);
|
||||
}
|
||||
|
||||
if (binfo->len - ofst < size)
|
||||
return -EINVAL;
|
||||
|
@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
|
||||
goto fail;
|
||||
|
||||
get_status(spi, &status);
|
||||
if (test_bit(FAIL, &status))
|
||||
if (test_bit(FAIL, &status)) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
dump_status_reg(&status);
|
||||
|
||||
spi_message_init(&msg);
|
||||
@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
|
||||
dump_status_reg(&status);
|
||||
if (!test_bit(DONE, &status)) {
|
||||
machxo2_cleanup(mgr);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
|
||||
break;
|
||||
if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
|
||||
machxo2_cleanup(mgr);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
} while (1);
|
||||
|
@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
|
||||
reg = ioread32(bank_reg(data, bank, reg_irq_status));
|
||||
|
||||
for_each_set_bit(p, ®, 32)
|
||||
generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
|
||||
generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
|
||||
}
|
||||
|
||||
chained_irq_exit(ic, desc);
|
||||
|
@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
ret = regmap_read(chip->regmap, inreg, ®_val);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* NOTE:
|
||||
* diagnostic already emitted; that's all we should
|
||||
* do unless gpio_*_value_cansleep() calls become different
|
||||
* from their nonsleeping siblings (and report faults).
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return !!(reg_val & bit);
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip,
|
||||
u32 data;
|
||||
|
||||
data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
|
||||
if (data & BIT(offset))
|
||||
if (data)
|
||||
return GPIO_LINE_DIRECTION_OUT;
|
||||
|
||||
return GPIO_LINE_DIRECTION_IN;
|
||||
@ -195,7 +195,7 @@ static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
|
||||
unsigned int cur_div_reg;
|
||||
u64 div;
|
||||
|
||||
if (!IS_ERR(bank->db_clk)) {
|
||||
if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) {
|
||||
div_debounce_support = true;
|
||||
freq = clk_get_rate(bank->db_clk);
|
||||
max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
|
||||
@ -689,6 +689,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
|
||||
struct device_node *pctlnp = of_get_parent(np);
|
||||
struct pinctrl_dev *pctldev = NULL;
|
||||
struct rockchip_pin_bank *bank = NULL;
|
||||
struct rockchip_pin_output_deferred *cfg;
|
||||
static int gpio;
|
||||
int id, ret;
|
||||
|
||||
@ -716,12 +717,33 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Prevent clashes with a deferred output setting
|
||||
* being added right at this moment.
|
||||
*/
|
||||
mutex_lock(&bank->deferred_lock);
|
||||
|
||||
ret = rockchip_gpiolib_register(bank);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(bank->clk);
|
||||
mutex_unlock(&bank->deferred_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (!list_empty(&bank->deferred_output)) {
|
||||
cfg = list_first_entry(&bank->deferred_output,
|
||||
struct rockchip_pin_output_deferred, head);
|
||||
list_del(&cfg->head);
|
||||
|
||||
ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
|
||||
if (ret)
|
||||
dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
|
||||
|
||||
kfree(cfg);
|
||||
}
|
||||
|
||||
mutex_unlock(&bank->deferred_lock);
|
||||
|
||||
platform_set_drvdata(pdev, bank);
|
||||
dev_info(dev, "probed %pOF\n", np);
|
||||
|
||||
|
@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
|
||||
|
||||
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
|
||||
|
||||
return irq_chip_mask_parent(data);
|
||||
irq_chip_mask_parent(data);
|
||||
}
|
||||
|
||||
static void uniphier_gpio_irq_unmask(struct irq_data *data)
|
||||
@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
|
||||
|
||||
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
|
||||
|
||||
return irq_chip_unmask_parent(data);
|
||||
irq_chip_unmask_parent(data);
|
||||
}
|
||||
|
||||
static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
|
||||
|
@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
|
||||
|
||||
ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
|
||||
if (ret)
|
||||
gpiochip_free_own_desc(desc);
|
||||
dev_warn(chip->parent,
|
||||
"Failed to set debounce-timeout for pin 0x%04X, err %d\n",
|
||||
pin, ret);
|
||||
|
||||
return ret ? ERR_PTR(ret) : desc;
|
||||
return desc;
|
||||
}
|
||||
|
||||
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
|
||||
|
@ -971,7 +971,6 @@ out:
|
||||
void kgd2kfd_device_exit(struct kfd_dev *kfd)
|
||||
{
|
||||
if (kfd->init_complete) {
|
||||
svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
|
||||
device_queue_manager_uninit(kfd->dqm);
|
||||
kfd_interrupt_exit(kfd);
|
||||
kfd_topology_remove_device(kfd);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user