Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/net/phy/bcm7xxx.c
  d88fd1b546 ("net: phy: bcm7xxx: Fixed indirect MMD operations")
  f68d08c437 ("net: phy: bcm7xxx: Add EPHY entry for 72165")

net/sched/sch_api.c
  b193e15ac6 ("net: prevent user from passing illegal stab size")
  69508d4333 ("net_sched: Use struct_size() and flex_array_size() helpers")

Both cases trivial - adjacent code additions.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-09-30 14:49:21 -07:00
commit dd9a887b35
453 changed files with 4875 additions and 2675 deletions

View File

@ -175,9 +175,10 @@ for IRQ numbers that are passed to struct device registrations. In that
case the Linux IRQ numbers cannot be dynamically assigned and the legacy case the Linux IRQ numbers cannot be dynamically assigned and the legacy
mapping should be used. mapping should be used.
As the name implies, the *_legacy() functions are deprecated and only As the name implies, the \*_legacy() functions are deprecated and only
exist to ease the support of ancient platforms. No new users should be exist to ease the support of ancient platforms. No new users should be
added. added. Same goes for the \*_simple() functions when their use results
in the legacy behaviour.
The legacy map assumes a contiguous range of IRQ numbers has already The legacy map assumes a contiguous range of IRQ numbers has already
been allocated for the controller and that the IRQ number can be been allocated for the controller and that the IRQ number can be

View File

@ -810,7 +810,7 @@ F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml
F: drivers/dma/altera-msgdma.c F: drivers/dma/altera-msgdma.c
ALTERA PIO DRIVER ALTERA PIO DRIVER
M: Joyce Ooi <joyce.ooi@intel.com> M: Mun Yew Tham <mun.yew.tham@intel.com>
L: linux-gpio@vger.kernel.org L: linux-gpio@vger.kernel.org
S: Maintained S: Maintained
F: drivers/gpio/gpio-altera.c F: drivers/gpio/gpio-altera.c
@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained S: Maintained
F: drivers/platform/x86/amd-pmc.* F: drivers/platform/x86/amd-pmc.*
AMD POWERPLAY AMD POWERPLAY AND SWSMU
M: Evan Quan <evan.quan@amd.com> M: Evan Quan <evan.quan@amd.com>
L: amd-gfx@lists.freedesktop.org L: amd-gfx@lists.freedesktop.org
S: Supported S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/ F: drivers/gpu/drm/amd/pm/
AMD PTDMA DRIVER AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com> M: Sanjay R Mehta <sanju.mehta@amd.com>
@ -2961,7 +2961,7 @@ F: crypto/async_tx/
F: include/linux/async_tx.h F: include/linux/async_tx.h
AT24 EEPROM DRIVER AT24 EEPROM DRIVER
M: Bartosz Golaszewski <bgolaszewski@baylibre.com> M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-i2c@vger.kernel.org L: linux-i2c@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@ -3384,9 +3384,11 @@ F: Documentation/networking/filter.rst
F: Documentation/userspace-api/ebpf/ F: Documentation/userspace-api/ebpf/
F: arch/*/net/* F: arch/*/net/*
F: include/linux/bpf* F: include/linux/bpf*
F: include/linux/btf*
F: include/linux/filter.h F: include/linux/filter.h
F: include/trace/events/xdp.h F: include/trace/events/xdp.h
F: include/uapi/linux/bpf* F: include/uapi/linux/bpf*
F: include/uapi/linux/btf*
F: include/uapi/linux/filter.h F: include/uapi/linux/filter.h
F: kernel/bpf/ F: kernel/bpf/
F: kernel/trace/bpf_trace.c F: kernel/trace/bpf_trace.c
@ -3820,7 +3822,6 @@ F: drivers/scsi/mpi3mr/
BROADCOM NETXTREME-E ROCE DRIVER BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com> M: Selvin Xavier <selvin.xavier@broadcom.com>
M: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
W: http://www.broadcom.com W: http://www.broadcom.com
@ -7985,7 +7986,7 @@ F: include/linux/gpio/regmap.h
GPIO SUBSYSTEM GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org> M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <bgolaszewski@baylibre.com> M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-gpio@vger.kernel.org L: linux-gpio@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
@ -11366,7 +11367,7 @@ F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
F: drivers/iio/proximity/mb1232.c F: drivers/iio/proximity/mb1232.c
MAXIM MAX77650 PMIC MFD DRIVER MAXIM MAX77650 PMIC MFD DRIVER
M: Bartosz Golaszewski <bgolaszewski@baylibre.com> M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/*/*max77650.yaml F: Documentation/devicetree/bindings/*/*max77650.yaml
@ -16650,13 +16651,6 @@ M: Lubomir Rintel <lkundrak@v3.sk>
S: Supported S: Supported
F: drivers/char/pcmcia/scr24x_cs.c F: drivers/char/pcmcia/scr24x_cs.c
SCSI CDROM DRIVER
M: Jens Axboe <axboe@kernel.dk>
L: linux-scsi@vger.kernel.org
S: Maintained
W: http://www.kernel.dk
F: drivers/scsi/sr*
SCSI RDMA PROTOCOL (SRP) INITIATOR SCSI RDMA PROTOCOL (SRP) INITIATOR
M: Bart Van Assche <bvanassche@acm.org> M: Bart Van Assche <bvanassche@acm.org>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
@ -17890,7 +17884,8 @@ M: Olivier Moysan <olivier.moysan@foss.st.com>
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml
F: sound/soc/stm/ F: sound/soc/stm/
STM32 TIMER/LPTIMER DRIVERS STM32 TIMER/LPTIMER DRIVERS
@ -18689,7 +18684,7 @@ F: include/linux/clk/ti.h
TI DAVINCI MACHINE SUPPORT TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com> M: Sekhar Nori <nsekhar@ti.com>
R: Bartosz Golaszewski <bgolaszewski@baylibre.com> R: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
@ -19288,13 +19283,12 @@ S: Maintained
F: drivers/usb/misc/chaoskey.c F: drivers/usb/misc/chaoskey.c
USB CYPRESS C67X00 DRIVER USB CYPRESS C67X00 DRIVER
M: Peter Korsgaard <jacmet@sunsite.dk>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Orphan
F: drivers/usb/c67x00/ F: drivers/usb/c67x00/
USB DAVICOM DM9601 DRIVER USB DAVICOM DM9601 DRIVER
M: Peter Korsgaard <jacmet@sunsite.dk> M: Peter Korsgaard <peter@korsgaard.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
W: http://www.linux-usb.org/usbnet W: http://www.linux-usb.org/usbnet

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc2 EXTRAVERSION = -rc3
NAME = Opossums on Parade NAME = Opossums on Parade
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
uprobe_notify_resume(regs); uprobe_notify_resume(regs);
} else { } else {
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
} }
} }
local_irq_disable(); local_irq_disable();

View File

@ -487,7 +487,6 @@
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_0>, <&usb0_ssphy>; phys = <&qusb_phy_0>, <&usb0_ssphy>;
phy-names = "usb2-phy", "usb3-phy"; phy-names = "usb2-phy", "usb3-phy";
tx-fifo-resize;
snps,is-utmi-l1-suspend; snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>; snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk; snps,dis_u2_susphy_quirk;
@ -528,7 +527,6 @@
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_1>, <&usb1_ssphy>; phys = <&qusb_phy_1>, <&usb1_ssphy>;
phy-names = "usb2-phy", "usb3-phy"; phy-names = "usb2-phy", "usb3-phy";
tx-fifo-resize;
snps,is-utmi-l1-suspend; snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>; snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk; snps,dis_u2_susphy_quirk;

View File

@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap #define acpi_os_ioremap acpi_os_ioremap
void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
#define acpi_os_memmap acpi_os_memmap
typedef u64 phys_cpuid_t; typedef u64 phys_cpuid_t;
#define PHYS_CPUID_INVALID INVALID_HWID #define PHYS_CPUID_INVALID INVALID_HWID

View File

@ -525,6 +525,11 @@ alternative_endif
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif #endif
#ifdef CONFIG_KASAN_HW_TAGS
#define EXPORT_SYMBOL_NOHWKASAN(name)
#else
#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
#endif
/* /*
* Emit a 64-bit absolute little endian symbol reference in a way that * Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a * ensures that it will be resolved at build time, even when building a

View File

@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void) static inline void mte_check_tfsr_entry(void)
{ {
if (!system_supports_mte())
return;
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
static inline void mte_check_tfsr_exit(void) static inline void mte_check_tfsr_exit(void)
{ {
if (!system_supports_mte())
return;
/* /*
* The asynchronous faults are sync'ed automatically with * The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb() * TFSR_EL1 on kernel entry but for exit an explicit dsb()

View File

@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR #define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c); extern char *strchr(const char *, int c);
#ifndef CONFIG_KASAN_HW_TAGS
#define __HAVE_ARCH_STRCMP #define __HAVE_ARCH_STRCMP
extern int strcmp(const char *, const char *); extern int strcmp(const char *, const char *);
#define __HAVE_ARCH_STRNCMP #define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *, const char *, __kernel_size_t); extern int strncmp(const char *, const char *, __kernel_size_t);
#endif
#define __HAVE_ARCH_STRLEN #define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *); extern __kernel_size_t strlen(const char *);

View File

@ -273,8 +273,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_DEVICE_nGnRnE); return __pgprot(PROT_DEVICE_nGnRnE);
} }
static void __iomem *__acpi_os_ioremap(acpi_physical_address phys, void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
acpi_size size, bool memory)
{ {
efi_memory_desc_t *md, *region = NULL; efi_memory_desc_t *md, *region = NULL;
pgprot_t prot; pgprot_t prot;
@ -300,11 +299,9 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
* It is fine for AML to remap regions that are not represented in the * It is fine for AML to remap regions that are not represented in the
* EFI memory map at all, as it only describes normal memory, and MMIO * EFI memory map at all, as it only describes normal memory, and MMIO
* regions that require a virtual mapping to make them accessible to * regions that require a virtual mapping to make them accessible to
* the EFI runtime services. Determine the region default * the EFI runtime services.
* attributes by checking the requested memory semantics.
*/ */
prot = memory ? __pgprot(PROT_NORMAL_NC) : prot = __pgprot(PROT_DEVICE_nGnRnE);
__pgprot(PROT_DEVICE_nGnRnE);
if (region) { if (region) {
switch (region->type) { switch (region->type) {
case EFI_LOADER_CODE: case EFI_LOADER_CODE:
@ -364,16 +361,6 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
return __ioremap(phys, size, prot); return __ioremap(phys, size, prot);
} }
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
return __acpi_os_ioremap(phys, size, false);
}
void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
{
return __acpi_os_ioremap(phys, size, true);
}
/* /*
* Claim Synchronous External Aborts as a firmware first notification. * Claim Synchronous External Aborts as a firmware first notification.
* *

View File

@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/* /*
* For reasons that aren't entirely clear, enabling KPTI on Cavium * For reasons that aren't entirely clear, enabling KPTI on Cavium
* ThunderX leads to apparent I-cache corruption of kernel text, which * ThunderX leads to apparent I-cache corruption of kernel text, which
* ends as well as you might imagine. Don't even try. * ends as well as you might imagine. Don't even try. We cannot rely
* on the cpus_have_*cap() helpers here to detect the CPU erratum
* because cpucap detection order may change. However, since we know
* affected CPUs are always in a homogeneous configuration, it is
* safe to rely on this_cpu_has_cap() here.
*/ */
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
str = "ARM64_WORKAROUND_CAVIUM_27456"; str = "ARM64_WORKAROUND_CAVIUM_27456";
__kpti_forced = -1; __kpti_forced = -1;
} }

View File

@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void) void mte_check_tfsr_el1(void)
{ {
u64 tfsr_el1; u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (!system_supports_mte())
return;
tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/* /*
@ -199,6 +194,9 @@ void mte_thread_init_user(void)
void mte_thread_switch(struct task_struct *next) void mte_thread_switch(struct task_struct *next)
{ {
if (!system_supports_mte())
return;
mte_update_sctlr_user(next); mte_update_sctlr_user(next);
/* /*

View File

@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs); do_signal(regs);
if (thread_flags & _TIF_NOTIFY_RESUME) { if (thread_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE) if (thread_flags & _TIF_FOREIGN_FPSTATE)
fpsimd_restore_current_state(); fpsimd_restore_current_state();

View File

@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
# runtime. Because the hypervisor is part of the kernel binary, relocations # runtime. Because the hypervisor is part of the kernel binary, relocations
# produce a kernel VA. We enumerate relocations targeting hyp at build time # produce a kernel VA. We enumerate relocations targeting hyp at build time
# and convert the kernel VAs at those positions to hyp VAs. # and convert the kernel VAs at those positions to hyp VAs.
$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel $(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
$(call if_changed,hyprel) $(call if_changed,hyprel)
# 5) Compile hyp-reloc.S and link it into the existing partially linked object. # 5) Compile hyp-reloc.S and link it into the existing partially linked object.

View File

@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
int kvm_perf_init(void) int kvm_perf_init(void)
{ {
if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
static_branch_enable(&kvm_arm_pmu_available);
return perf_register_guest_info_callbacks(&kvm_guest_cbs); return perf_register_guest_info_callbacks(&kvm_guest_cbs);
} }

View File

@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
kvm_pmu_create_perf_event(vcpu, select_idx); kvm_pmu_create_perf_event(vcpu, select_idx);
} }
int kvm_pmu_probe_pmuver(void) void kvm_host_pmu_init(struct arm_pmu *pmu)
{
if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
!kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
static_branch_enable(&kvm_arm_pmu_available);
}
static int kvm_pmu_probe_pmuver(void)
{ {
struct perf_event_attr attr = { }; struct perf_event_attr attr = { };
struct perf_event *event; struct perf_event *event;

View File

@ -173,4 +173,4 @@ L(done):
ret ret
SYM_FUNC_END_PI(strcmp) SYM_FUNC_END_PI(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp) EXPORT_SYMBOL_NOHWKASAN(strcmp)

View File

@ -258,4 +258,4 @@ L(ret0):
ret ret
SYM_FUNC_END_PI(strncmp) SYM_FUNC_END_PI(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp) EXPORT_SYMBOL_NOHWKASAN(strncmp)

View File

@ -260,8 +260,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs); do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) { if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
} }

View File

@ -15,7 +15,6 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/entry.h> #include <asm/entry.h>
@ -25,7 +24,6 @@
.globl system_call .globl system_call
.globl resume .globl resume
.globl ret_from_exception .globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table .globl sys_call_table
.globl bad_interrupt .globl bad_interrupt
.globl inthandler1 .globl inthandler1
@ -59,8 +57,6 @@ do_trace:
subql #4,%sp /* dummy return address */ subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
jbsr syscall_trace_leave jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
addql #4,%sp addql #4,%sp
jra ret_from_exception jra ret_from_exception

View File

@ -29,7 +29,6 @@ config M68K
select NO_DMA if !MMU && !COLDFIRE select NO_DMA if !MMU && !COLDFIRE
select OLD_SIGACTION select OLD_SIGACTION
select OLD_SIGSUSPEND3 select OLD_SIGSUSPEND3
select SET_FS
select UACCESS_MEMCPY if !MMU select UACCESS_MEMCPY if !MMU
select VIRT_TO_BUS select VIRT_TO_BUS
select ZONE_DMA select ZONE_DMA

View File

@ -31,7 +31,6 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/entry.h> #include <asm/entry.h>
@ -51,7 +50,6 @@ sw_usp:
.globl system_call .globl system_call
.globl resume .globl resume
.globl ret_from_exception .globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table .globl sys_call_table
.globl inthandler .globl inthandler
@ -98,8 +96,6 @@ ENTRY(system_call)
subql #4,%sp /* dummy return address */ subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
jbsr syscall_trace_leave jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
addql #4,%sp addql #4,%sp

View File

@ -9,7 +9,6 @@
#define __ASM_M68K_PROCESSOR_H #define __ASM_M68K_PROCESSOR_H
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <asm/segment.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp)
#define TASK_UNMAPPED_BASE 0 #define TASK_UNMAPPED_BASE 0
#endif #endif
/* Address spaces (or Function Codes in Motorola lingo) */
#define USER_DATA 1
#define USER_PROGRAM 2
#define SUPER_DATA 5
#define SUPER_PROGRAM 6
#define CPU_SPACE 7
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
/*
* Set the SFC/DFC registers for special MM operations. For most normal
* operation these remain set to USER_DATA for the uaccess routines.
*/
static inline void set_fc(unsigned long val)
{
WARN_ON_ONCE(in_interrupt());
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val) : "memory");
}
#else
static inline void set_fc(unsigned long val)
{
}
#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
struct thread_struct { struct thread_struct {
unsigned long ksp; /* kernel stack pointer */ unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */ unsigned long usp; /* user stack pointer */
unsigned short sr; /* saved status register */ unsigned short sr; /* saved status register */
unsigned short fs; /* saved fs (sfc, dfc) */ unsigned short fc; /* saved fc (sfc, dfc) */
unsigned long crp[2]; /* cpu root pointer */ unsigned long crp[2]; /* cpu root pointer */
unsigned long esp0; /* points to SR of stack frame */ unsigned long esp0; /* points to SR of stack frame */
unsigned long faddr; /* info about last fault */ unsigned long faddr; /* info about last fault */
@ -92,7 +117,7 @@ struct thread_struct {
#define INIT_THREAD { \ #define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
.sr = PS_S, \ .sr = PS_S, \
.fs = __KERNEL_DS, \ .fc = USER_DATA, \
} }
/* /*

View File

@ -1,59 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _M68K_SEGMENT_H
#define _M68K_SEGMENT_H
/* define constants */
/* Address spaces (FC0-FC2) */
#define USER_DATA (1)
#ifndef __USER_DS
#define __USER_DS (USER_DATA)
#endif
#define USER_PROGRAM (2)
#define SUPER_DATA (5)
#ifndef __KERNEL_DS
#define __KERNEL_DS (SUPER_DATA)
#endif
#define SUPER_PROGRAM (6)
#define CPU_SPACE (7)
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
/*
* Get/set the SFC/DFC registers for MOVES instructions
*/
#define USER_DS MAKE_MM_SEG(__USER_DS)
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
static inline mm_segment_t get_fs(void)
{
mm_segment_t _v;
__asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
return _v;
}
static inline void set_fs(mm_segment_t val)
{
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val.seg) : "memory");
}
#else
#define USER_DS MAKE_MM_SEG(TASK_SIZE)
#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
#endif
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#endif /* __ASSEMBLY__ */
#endif /* _M68K_SEGMENT_H */

View File

@ -4,7 +4,6 @@
#include <asm/types.h> #include <asm/types.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/segment.h>
/* /*
* On machines with 4k pages we default to an 8k thread size, though we * On machines with 4k pages we default to an 8k thread size, though we
@ -27,7 +26,6 @@
struct thread_info { struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
unsigned long flags; unsigned long flags;
mm_segment_t addr_limit; /* thread address space */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */ __u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */ unsigned long tp_value; /* thread pointer */
@ -37,7 +35,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
{ \ { \
.task = &tsk, \ .task = &tsk, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
} }

View File

@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr)
if (CPU_IS_COLDFIRE) { if (CPU_IS_COLDFIRE) {
mmu_write(MMUOR, MMUOR_CNL); mmu_write(MMUOR, MMUOR_CNL);
} else if (CPU_IS_040_OR_060) { } else if (CPU_IS_040_OR_060) {
mm_segment_t old_fs = get_fs(); set_fc(SUPER_DATA);
set_fs(KERNEL_DS);
__asm__ __volatile__(".chip 68040\n\t" __asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t" "pflush (%0)\n\t"
".chip 68k" ".chip 68k"
: : "a" (addr)); : : "a" (addr));
set_fs(old_fs); set_fc(USER_DATA);
} else if (CPU_IS_020_OR_030) } else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr)); __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
} }
@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{ {
if (vma->vm_mm == current->active_mm) { if (vma->vm_mm == current->active_mm)
mm_segment_t old_fs = force_uaccess_begin();
__flush_tlb_one(addr); __flush_tlb_one(addr);
force_uaccess_end(old_fs);
}
} }
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,

View File

@ -267,6 +267,10 @@ struct frame {
} un; } un;
}; };
#ifdef CONFIG_M68040
asmlinkage void berr_040cleanup(struct frame *fp);
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _M68K_TRAPS_H */ #endif /* _M68K_TRAPS_H */

View File

@ -9,13 +9,16 @@
*/ */
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/segment.h>
#include <asm/extable.h> #include <asm/extable.h>
/* We let the MMU do all checking */ /* We let the MMU do all checking */
static inline int access_ok(const void __user *addr, static inline int access_ok(const void __user *addr,
unsigned long size) unsigned long size)
{ {
/*
* XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
* for TASK_SIZE!
*/
return 1; return 1;
} }
@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr,
#define MOVES "move" #define MOVES "move"
#endif #endif
extern int __put_user_bad(void); #define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
extern int __get_user_bad(void);
#define __put_user_asm(res, x, ptr, bwl, reg, err) \
asm volatile ("\n" \ asm volatile ("\n" \
"1: "MOVES"."#bwl" %2,%1\n" \ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
@ -56,6 +56,31 @@ asm volatile ("\n" \
: "+d" (res), "=m" (*(ptr)) \ : "+d" (res), "=m" (*(ptr)) \
: #reg (x), "i" (err)) : #reg (x), "i" (err))
#define __put_user_asm8(inst, res, x, ptr) \
do { \
const void *__pu_ptr = (const void __force *)(ptr); \
\
asm volatile ("\n" \
"1: "inst".l %2,(%1)+\n" \
"2: "inst".l %R2,(%1)\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: movel %3,%0\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .long 3b,10b\n" \
" .previous" \
: "+d" (res), "+a" (__pu_ptr) \
: "r" (x), "i" (-EFAULT) \
: "memory"); \
} while (0)
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type. * use the right size if we just have the right pointer type.
@ -68,51 +93,29 @@ asm volatile ("\n" \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \ switch (sizeof (*(ptr))) { \
case 1: \ case 1: \
__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
break; \ break; \
case 2: \ case 2: \
__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
break; \ break; \
case 4: \ case 4: \
__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
break; \ break; \
case 8: \ case 8: \
{ \ __put_user_asm8(MOVES, __pu_err, __pu_val, ptr); \
const void __user *__pu_ptr = (ptr); \
asm volatile ("\n" \
"1: "MOVES".l %2,(%1)+\n" \
"2: "MOVES".l %R2,(%1)\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: movel %3,%0\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .long 3b,10b\n" \
" .previous" \
: "+d" (__pu_err), "+a" (__pu_ptr) \
: "r" (__pu_val), "i" (-EFAULT) \
: "memory"); \
break; \ break; \
} \
default: \ default: \
__pu_err = __put_user_bad(); \ BUILD_BUG(); \
break; \
} \ } \
__pu_err; \ __pu_err; \
}) })
#define put_user(x, ptr) __put_user(x, ptr) #define put_user(x, ptr) __put_user(x, ptr)
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ #define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({ \
type __gu_val; \ type __gu_val; \
asm volatile ("\n" \ asm volatile ("\n" \
"1: "MOVES"."#bwl" %2,%1\n" \ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
@ -130,53 +133,57 @@ asm volatile ("\n" \
(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
}) })
#define __get_user_asm8(inst, res, x, ptr) \
do { \
const void *__gu_ptr = (const void __force *)(ptr); \
union { \
u64 l; \
__typeof__(*(ptr)) t; \
} __gu_val; \
\
asm volatile ("\n" \
"1: "inst".l (%2)+,%1\n" \
"2: "inst".l (%2),%R1\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: move.l %3,%0\n" \
" sub.l %1,%1\n" \
" sub.l %R1,%R1\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .previous" \
: "+d" (res), "=&r" (__gu_val.l), \
"+a" (__gu_ptr) \
: "i" (-EFAULT) \
: "memory"); \
(x) = __gu_val.t; \
} while (0)
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
({ \ ({ \
int __gu_err = 0; \ int __gu_err = 0; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
break; \ break; \
case 2: \ case 2: \
__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \ __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
break; \ break; \
case 4: \ case 4: \
__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
break; \ break; \
case 8: { \ case 8: \
const void __user *__gu_ptr = (ptr); \ __get_user_asm8(MOVES, __gu_err, x, ptr); \
union { \
u64 l; \
__typeof__(*(ptr)) t; \
} __gu_val; \
asm volatile ("\n" \
"1: "MOVES".l (%2)+,%1\n" \
"2: "MOVES".l (%2),%R1\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: move.l %3,%0\n" \
" sub.l %1,%1\n" \
" sub.l %R1,%R1\n" \
" jra 3b\n" \
" .previous\n" \
"\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10b\n" \
" .long 2b,10b\n" \
" .previous" \
: "+d" (__gu_err), "=&r" (__gu_val.l), \
"+a" (__gu_ptr) \
: "i" (-EFAULT) \
: "memory"); \
(x) = __gu_val.t; \
break; \ break; \
} \
default: \ default: \
__gu_err = __get_user_bad(); \ BUILD_BUG(); \
break; \
} \ } \
__gu_err; \ __gu_err; \
}) })
@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
switch (n) { switch (n) {
case 1: case 1:
__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
b, d, 1);
break; break;
case 2: case 2:
__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2); __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
w, r, 2);
break; break;
case 3: case 3:
__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
break; break;
case 4: case 4:
__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
l, r, 4);
break; break;
case 5: case 5:
__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_FROM_USER #define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER #define INLINE_COPY_TO_USER
#define user_addr_max() \ #define HAVE_GET_KERNEL_NOFAULT
(uaccess_kernel() ? ~0UL : TASK_SIZE)
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
type *__gk_dst = (type *)(dst); \
type *__gk_src = (type *)(src); \
int __gk_err = 0; \
\
switch (sizeof(type)) { \
case 1: \
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
u8, b, d, -EFAULT); \
break; \
case 2: \
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
u16, w, r, -EFAULT); \
break; \
case 4: \
__get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
u32, l, r, -EFAULT); \
break; \
case 8: \
__get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
break; \
default: \
BUILD_BUG(); \
} \
if (unlikely(__gk_err)) \
goto err_label; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
type __pk_src = *(type *)(src); \
type *__pk_dst = (type *)(dst); \
int __pk_err = 0; \
\
switch (sizeof(type)) { \
case 1: \
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
b, d, -EFAULT); \
break; \
case 2: \
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
w, r, -EFAULT); \
break; \
case 4: \
__put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
l, r, -EFAULT); \
break; \
case 8: \
__put_user_asm8("move", __pk_err, __pk_src, __pk_dst); \
break; \
default: \
BUILD_BUG(); \
} \
if (unlikely(__pk_err)) \
goto err_label; \
} while (0)
extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);

View File

@ -31,7 +31,7 @@ int main(void)
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr)); DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs)); DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp)); DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp)); DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));

View File

@ -36,7 +36,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
@ -78,20 +77,38 @@ ENTRY(__sys_clone3)
ENTRY(sys_sigreturn) ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
movel %sp,%sp@- | switch_stack pointer movel %sp,%a1 | switch_stack pointer
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
jbsr do_sigreturn jbsr do_sigreturn
addql #8,%sp jra 1f | shared with rt_sigreturn()
RESTORE_SWITCH_STACK
rts
ENTRY(sys_rt_sigreturn) ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
movel %sp,%sp@- | switch_stack pointer movel %sp,%a1 | switch_stack pointer
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
| stack contents:
| [original pt_regs address] [original switch_stack address]
| [gap] [switch_stack] [pt_regs] [exception frame]
jbsr do_rt_sigreturn jbsr do_rt_sigreturn
addql #8,%sp
1:
| stack contents now:
| [original pt_regs address] [original switch_stack address]
| [unused part of the gap] [moved switch_stack] [moved pt_regs]
| [replacement exception frame]
| return value of do_{rt_,}sigreturn() points to moved switch_stack.
movel %d0,%sp | discard the leftover junk
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
| stack contents now is just [syscall return address] [pt_regs] [frame]
| return pt_regs.d0
movel %sp@(PT_OFF_D0+4),%d0
rts rts
ENTRY(buserr) ENTRY(buserr)
@ -182,25 +199,6 @@ do_trace_exit:
addql #4,%sp addql #4,%sp
jra .Lret_from_exception jra .Lret_from_exception
ENTRY(ret_from_signal)
movel %curptr@(TASK_STACK),%a1
tstb %a1@(TINFO_FLAGS+2)
jge 1f
jbsr syscall_trace
1: RESTORE_SWITCH_STACK
addql #4,%sp
/* on 68040 complete pending writebacks if any */
#ifdef CONFIG_M68040
bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
subql #7,%d0 | bus error frame ?
jbne 1f
movel %sp,%sp@-
jbsr berr_040cleanup
addql #4,%sp
1:
#endif
jra .Lret_from_exception
ENTRY(system_call) ENTRY(system_call)
SAVE_ALL_SYS SAVE_ALL_SYS
@ -338,7 +336,7 @@ resume:
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */ /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0 movec %sfc,%d0
movew %d0,%a0@(TASK_THREAD+THREAD_FS) movew %d0,%a0@(TASK_THREAD+THREAD_FC)
/* save usp */ /* save usp */
/* it is better to use a movel here instead of a movew 8*) */ /* it is better to use a movel here instead of a movew 8*) */
@ -424,7 +422,7 @@ resume:
movel %a0,%usp movel %a0,%usp
/* restore fs (sfc,%dfc) */ /* restore fs (sfc,%dfc) */
movew %a1@(TASK_THREAD+THREAD_FS),%a0 movew %a1@(TASK_THREAD+THREAD_FC),%a0
movec %a0,%sfc movec %a0,%sfc
movec %a0,%dfc movec %a0,%dfc

View File

@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs)
void flush_thread(void) void flush_thread(void)
{ {
current->thread.fs = __USER_DS; current->thread.fc = USER_DATA;
#ifdef CONFIG_FPU #ifdef CONFIG_FPU
if (!FPU_IS_EMU) { if (!FPU_IS_EMU) {
unsigned long zero = 0; unsigned long zero = 0;
@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
* Must save the current SFC/DFC value, NOT the value when * Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96 * the parent was last descheduled - RGH 10-08-96
*/ */
p->thread.fs = get_fs().seg; p->thread.fc = USER_DATA;
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */ /* kernel thread */

View File

@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0]; fpu_version = sc->sc_fpstate[0];
if (CPU_IS_020_OR_030 && if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) && regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) { regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */ /* Clear pending exception in 68882 idle frame */
@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1]; context_size = fpstate[1];
fpu_version = fpstate[0]; fpu_version = fpstate[0];
if (CPU_IS_020_OR_030 && if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) && regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) { regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */ /* Clear pending exception in 68882 idle frame */
@ -641,56 +641,35 @@ static inline void siginfo_build_tests(void)
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
void __user *fp) void __user *fp)
{ {
int fsize = frame_extra_sizes(formatvec >> 12); int extra = frame_extra_sizes(formatvec >> 12);
if (fsize < 0) { char buf[sizeof_field(struct frame, un)];
if (extra < 0) {
/* /*
* user process trying to return with weird frame format * user process trying to return with weird frame format
*/ */
pr_debug("user process returning with weird frame format\n"); pr_debug("user process returning with weird frame format\n");
return 1; return -1;
} }
if (!fsize) { if (extra && copy_from_user(buf, fp, extra))
regs->format = formatvec >> 12; return -1;
regs->vector = formatvec & 0xfff; regs->format = formatvec >> 12;
} else { regs->vector = formatvec & 0xfff;
struct switch_stack *sw = (struct switch_stack *)regs - 1; if (extra) {
/* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */ void *p = (struct switch_stack *)regs - 1;
unsigned long buf[sizeof_field(struct frame, un) / 2]; struct frame *new = (void *)regs - extra;
int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
/* that'll make sure that expansion won't crap over data */ memmove(p - extra, p, size);
if (copy_from_user(buf + fsize / 4, fp, fsize)) memcpy(p - extra + size, buf, extra);
return 1; current->thread.esp0 = (unsigned long)&new->ptregs;
#ifdef CONFIG_M68040
/* point of no return */ /* on 68040 complete pending writebacks if any */
regs->format = formatvec >> 12; if (new->ptregs.format == 7) // bus error frame
regs->vector = formatvec & 0xfff; berr_040cleanup(new);
#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
__asm__ __volatile__ (
#ifdef CONFIG_COLDFIRE
" movel %0,%/sp\n\t"
" bra ret_from_signal\n"
#else
" movel %0,%/a0\n\t"
" subl %1,%/a0\n\t" /* make room on stack */
" movel %/a0,%/sp\n\t" /* set stack pointer */
/* move switch_stack and pt_regs */
"1: movel %0@+,%/a0@+\n\t"
" dbra %2,1b\n\t"
" lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
" lsrl #2,%1\n\t"
" subql #1,%1\n\t"
/* copy to the gap we'd made */
"2: movel %4@+,%/a0@+\n\t"
" dbra %1,2b\n\t"
" bral ret_from_signal\n"
#endif #endif
: /* no outputs, it doesn't ever return */
: "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
"n" (frame_offset), "a" (buf + fsize/4)
: "a0");
#undef frame_offset
} }
return 0; return extra;
} }
static inline int static inline int
@ -698,7 +677,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
{ {
int formatvec; int formatvec;
struct sigcontext context; struct sigcontext context;
int err = 0;
siginfo_build_tests(); siginfo_build_tests();
@ -707,7 +685,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
/* get previous context */ /* get previous context */
if (copy_from_user(&context, usc, sizeof(context))) if (copy_from_user(&context, usc, sizeof(context)))
goto badframe; return -1;
/* restore passed registers */ /* restore passed registers */
regs->d0 = context.sc_d0; regs->d0 = context.sc_d0;
@ -720,15 +698,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
wrusp(context.sc_usp); wrusp(context.sc_usp);
formatvec = context.sc_formatvec; formatvec = context.sc_formatvec;
err = restore_fpu_state(&context); if (restore_fpu_state(&context))
return -1;
if (err || mangle_kernel_stack(regs, formatvec, fp)) return mangle_kernel_stack(regs, formatvec, fp);
goto badframe;
return 0;
badframe:
return 1;
} }
static inline int static inline int
@ -745,7 +718,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
err = __get_user(temp, &uc->uc_mcontext.version); err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION) if (temp != MCONTEXT_VERSION)
goto badframe; return -1;
/* restore passed registers */ /* restore passed registers */
err |= __get_user(regs->d0, &gregs[0]); err |= __get_user(regs->d0, &gregs[0]);
err |= __get_user(regs->d1, &gregs[1]); err |= __get_user(regs->d1, &gregs[1]);
@ -774,22 +747,17 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
err |= restore_altstack(&uc->uc_stack); err |= restore_altstack(&uc->uc_stack);
if (err) if (err)
goto badframe; return -1;
if (mangle_kernel_stack(regs, temp, &uc->uc_extra)) return mangle_kernel_stack(regs, temp, &uc->uc_extra);
goto badframe;
return 0;
badframe:
return 1;
} }
asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw) asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{ {
unsigned long usp = rdusp(); unsigned long usp = rdusp();
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
sigset_t set; sigset_t set;
int size;
if (!access_ok(frame, sizeof(*frame))) if (!access_ok(frame, sizeof(*frame)))
goto badframe; goto badframe;
@ -801,20 +769,22 @@ asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
set_current_blocked(&set); set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, frame + 1)) size = restore_sigcontext(regs, &frame->sc, frame + 1);
if (size < 0)
goto badframe; goto badframe;
return regs->d0; return (void *)sw - size;
badframe: badframe:
force_sig(SIGSEGV); force_sig(SIGSEGV);
return 0; return sw;
} }
asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw) asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{ {
unsigned long usp = rdusp(); unsigned long usp = rdusp();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
sigset_t set; sigset_t set;
int size;
if (!access_ok(frame, sizeof(*frame))) if (!access_ok(frame, sizeof(*frame)))
goto badframe; goto badframe;
@ -823,27 +793,34 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
set_current_blocked(&set); set_current_blocked(&set);
if (rt_restore_ucontext(regs, sw, &frame->uc)) size = rt_restore_ucontext(regs, sw, &frame->uc);
if (size < 0)
goto badframe; goto badframe;
return regs->d0; return (void *)sw - size;
badframe: badframe:
force_sig(SIGSEGV); force_sig(SIGSEGV);
return 0; return sw;
}
static inline struct pt_regs *rte_regs(struct pt_regs *regs)
{
return (void *)regs + regs->stkadj;
} }
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
unsigned long mask) unsigned long mask)
{ {
struct pt_regs *tregs = rte_regs(regs);
sc->sc_mask = mask; sc->sc_mask = mask;
sc->sc_usp = rdusp(); sc->sc_usp = rdusp();
sc->sc_d0 = regs->d0; sc->sc_d0 = regs->d0;
sc->sc_d1 = regs->d1; sc->sc_d1 = regs->d1;
sc->sc_a0 = regs->a0; sc->sc_a0 = regs->a0;
sc->sc_a1 = regs->a1; sc->sc_a1 = regs->a1;
sc->sc_sr = regs->sr; sc->sc_sr = tregs->sr;
sc->sc_pc = regs->pc; sc->sc_pc = tregs->pc;
sc->sc_formatvec = regs->format << 12 | regs->vector; sc->sc_formatvec = tregs->format << 12 | tregs->vector;
save_a5_state(sc, regs); save_a5_state(sc, regs);
save_fpu_state(sc, regs); save_fpu_state(sc, regs);
} }
@ -851,6 +828,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
{ {
struct switch_stack *sw = (struct switch_stack *)regs - 1; struct switch_stack *sw = (struct switch_stack *)regs - 1;
struct pt_regs *tregs = rte_regs(regs);
greg_t __user *gregs = uc->uc_mcontext.gregs; greg_t __user *gregs = uc->uc_mcontext.gregs;
int err = 0; int err = 0;
@ -871,9 +849,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
err |= __put_user(sw->a5, &gregs[13]); err |= __put_user(sw->a5, &gregs[13]);
err |= __put_user(sw->a6, &gregs[14]); err |= __put_user(sw->a6, &gregs[14]);
err |= __put_user(rdusp(), &gregs[15]); err |= __put_user(rdusp(), &gregs[15]);
err |= __put_user(regs->pc, &gregs[16]); err |= __put_user(tregs->pc, &gregs[16]);
err |= __put_user(regs->sr, &gregs[17]); err |= __put_user(tregs->sr, &gregs[17]);
err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec); err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
err |= rt_save_fpu_state(uc, regs); err |= rt_save_fpu_state(uc, regs);
return err; return err;
} }
@ -890,13 +868,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct sigframe __user *frame; struct sigframe __user *frame;
int fsize = frame_extra_sizes(regs->format); struct pt_regs *tregs = rte_regs(regs);
int fsize = frame_extra_sizes(tregs->format);
struct sigcontext context; struct sigcontext context;
int err = 0, sig = ksig->sig; int err = 0, sig = ksig->sig;
if (fsize < 0) { if (fsize < 0) {
pr_debug("setup_frame: Unknown frame format %#x\n", pr_debug("setup_frame: Unknown frame format %#x\n",
regs->format); tregs->format);
return -EFAULT; return -EFAULT;
} }
@ -907,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
err |= __put_user(sig, &frame->sig); err |= __put_user(sig, &frame->sig);
err |= __put_user(regs->vector, &frame->code); err |= __put_user(tregs->vector, &frame->code);
err |= __put_user(&frame->sc, &frame->psc); err |= __put_user(&frame->sc, &frame->psc);
if (_NSIG_WORDS > 1) if (_NSIG_WORDS > 1)
@ -933,34 +912,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
push_cache ((unsigned long) &frame->retcode); push_cache ((unsigned long) &frame->retcode);
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
adjustformat(regs);
/* /*
* This is subtle; if we build more than one sigframe, all but the * This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't * first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj. * screw stkadj.
*/ */
if (fsize) if (fsize) {
regs->stkadj = fsize; regs->stkadj = fsize;
tregs = rte_regs(regs);
/* Prepare to skip over the extra stuff in the exception frame. */
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj); pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
/* This must be copied with decreasing addresses to
handle overlaps. */
tregs->vector = 0; tregs->vector = 0;
tregs->format = 0; tregs->format = 0;
tregs->pc = regs->pc;
tregs->sr = regs->sr; tregs->sr = regs->sr;
} }
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
adjustformat(regs);
return 0; return 0;
} }
@ -968,7 +941,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct rt_sigframe __user *frame; struct rt_sigframe __user *frame;
int fsize = frame_extra_sizes(regs->format); struct pt_regs *tregs = rte_regs(regs);
int fsize = frame_extra_sizes(tregs->format);
int err = 0, sig = ksig->sig; int err = 0, sig = ksig->sig;
if (fsize < 0) { if (fsize < 0) {
@ -1018,34 +992,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
push_cache ((unsigned long) &frame->retcode); push_cache ((unsigned long) &frame->retcode);
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
adjustformat(regs);
/* /*
* This is subtle; if we build more than one sigframe, all but the * This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't * first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj. * screw stkadj.
*/ */
if (fsize) if (fsize) {
regs->stkadj = fsize; regs->stkadj = fsize;
tregs = rte_regs(regs);
/* Prepare to skip over the extra stuff in the exception frame. */
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj); pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
/* This must be copied with decreasing addresses to
handle overlaps. */
tregs->vector = 0; tregs->vector = 0;
tregs->format = 0; tregs->format = 0;
tregs->pc = regs->pc;
tregs->sr = regs->sr; tregs->sr = regs->sr;
} }
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
adjustformat(regs);
return 0; return 0;
} }

View File

@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp)
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs) static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
{ {
unsigned long mmusr; unsigned long mmusr;
mm_segment_t old_fs = get_fs();
set_fs(MAKE_MM_SEG(wbs)); set_fc(wbs);
if (iswrite) if (iswrite)
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr)); asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr)); asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
set_fs(old_fs); set_fc(USER_DATA);
return mmusr; return mmusr;
} }
@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
unsigned long wbd) unsigned long wbd)
{ {
int res = 0; int res = 0;
mm_segment_t old_fs = get_fs();
/* set_fs can not be moved, otherwise put_user() may oops */ set_fc(wbs);
set_fs(MAKE_MM_SEG(wbs));
switch (wbs & WBSIZ_040) { switch (wbs & WBSIZ_040) {
case BA_SIZE_BYTE: case BA_SIZE_BYTE:
@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
break; break;
} }
/* set_fs can not be moved, otherwise put_user() may oops */ set_fc(USER_DATA);
set_fs(old_fs);
pr_debug("do_040writeback1, res=%d\n", res); pr_debug("do_040writeback1, res=%d\n", res);

View File

@ -18,7 +18,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/segment.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/macintosh.h> #include <asm/macintosh.h>
#include <asm/mac_via.h> #include <asm/mac_via.h>

View File

@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
if (mmusr & MMU_R_040) if (mmusr & MMU_R_040)
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
} else { } else {
unsigned short mmusr; WARN_ON_ONCE(!CPU_IS_040_OR_060);
unsigned long *descaddr;
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1"
: "=a&" (descaddr), "=m" (mmusr)
: "a" (vaddr), "d" (get_fs().seg));
if (mmusr & (MMU_I|MMU_B|MMU_L))
return 0;
descaddr = phys_to_virt((unsigned long)descaddr);
switch (mmusr & MMU_NUM) {
case 1:
return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
case 2:
return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
case 3:
return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
}
} }
return 0; return 0;
} }
@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr)
void flush_icache_range(unsigned long address, unsigned long endaddr) void flush_icache_range(unsigned long address, unsigned long endaddr)
{ {
mm_segment_t old_fs = get_fs(); set_fc(SUPER_DATA);
set_fs(KERNEL_DS);
flush_icache_user_range(address, endaddr); flush_icache_user_range(address, endaddr);
set_fs(old_fs); set_fc(USER_DATA);
} }
EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(flush_icache_range);

View File

@ -72,12 +72,6 @@ void __init paging_init(void)
if (!empty_zero_page) if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n", panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE); __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
*/
set_fs (USER_DS);
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT; max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
free_area_init(max_zone_pfn); free_area_init(max_zone_pfn);
} }

View File

@ -17,7 +17,6 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>

View File

@ -15,7 +15,6 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/machdep.h> #include <asm/machdep.h>

View File

@ -467,7 +467,7 @@ void __init paging_init(void)
/* /*
* Set up SFC/DFC registers * Set up SFC/DFC registers
*/ */
set_fs(KERNEL_DS); set_fc(USER_DATA);
#ifdef DEBUG #ifdef DEBUG
printk ("before free_area_init\n"); printk ("before free_area_init\n");

View File

@ -31,7 +31,6 @@
#include <asm/intersil.h> #include <asm/intersil.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/segment.h>
#include <asm/sun3ints.h> #include <asm/sun3ints.h>
char sun3_reserved_pmeg[SUN3_PMEGS_NUM]; char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
@ -89,7 +88,7 @@ void __init sun3_init(void)
sun3_reserved_pmeg[249] = 1; sun3_reserved_pmeg[249] = 1;
sun3_reserved_pmeg[252] = 1; sun3_reserved_pmeg[252] = 1;
sun3_reserved_pmeg[253] = 1; sun3_reserved_pmeg[253] = 1;
set_fs(KERNEL_DS); set_fc(USER_DATA);
} }
/* Without this, Bad Things happen when something calls arch_reset. */ /* Without this, Bad Things happen when something calls arch_reset. */

View File

@ -23,7 +23,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/sun3mmu.h> #include <asm/sun3mmu.h>
#include <asm/segment.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/dvma.h> #include <asm/dvma.h>
@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end)
for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE) for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
sun3_put_segmap(seg, SUN3_INVALID_PMEG); sun3_put_segmap(seg, SUN3_INVALID_PMEG);
set_fs(MAKE_MM_SEG(3)); set_fc(3);
for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) { for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
i = sun3_get_segmap(seg); i = sun3_get_segmap(seg);
for(j = 1; j < CONTEXTS_NUM; j++) for(j = 1; j < CONTEXTS_NUM; j++)
(*(romvec->pv_setctxt))(j, (void *)seg, i); (*(romvec->pv_setctxt))(j, (void *)seg, i);
} }
set_fs(KERNEL_DS); set_fc(USER_DATA);
} }
/* erase the mappings for a dead context. Uses the pg_dir for hints /* erase the mappings for a dead context. Uses the pg_dir for hints

View File

@ -11,7 +11,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/segment.h>
#include <asm/intersil.h> #include <asm/intersil.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/sun3ints.h> #include <asm/sun3ints.h>

View File

@ -14,7 +14,6 @@
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/sun3xprom.h> #include <asm/sun3xprom.h>
#include <asm/idprom.h> #include <asm/idprom.h>
#include <asm/segment.h>
#include <asm/sun3ints.h> #include <asm/sun3ints.h>
#include <asm/openprom.h> #include <asm/openprom.h>
#include <asm/machines.h> #include <asm/machines.h>

View File

@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs); do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) { if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
user_enter(); user_enter();
} }

View File

@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
func##_positive) func##_positive)
static bool is_bad_offset(int b_off)
{
return b_off > 0x1ffff || b_off < -0x20000;
}
static int build_body(struct jit_ctx *ctx) static int build_body(struct jit_ctx *ctx)
{ {
const struct bpf_prog *prog = ctx->skf; const struct bpf_prog *prog = ctx->skf;
@ -728,7 +733,10 @@ load_common:
/* Load return register on DS for failures */ /* Load return register on DS for failures */
emit_reg_move(r_ret, r_zero, ctx); emit_reg_move(r_ret, r_zero, ctx);
/* Return with error */ /* Return with error */
emit_b(b_imm(prog->len, ctx), ctx); b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx); emit_nop(ctx);
break; break;
case BPF_LD | BPF_W | BPF_IND: case BPF_LD | BPF_W | BPF_IND:
@ -775,8 +783,10 @@ load_ind:
emit_jalr(MIPS_R_RA, r_s0, ctx); emit_jalr(MIPS_R_RA, r_s0, ctx);
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
/* Check the error value */ /* Check the error value */
emit_bcond(MIPS_COND_NE, r_ret, 0, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx); emit_reg_move(r_ret, r_zero, ctx);
/* We are good */ /* We are good */
/* X <- P[1:K] & 0xf */ /* X <- P[1:K] & 0xf */
@ -855,8 +865,10 @@ load_ind:
/* A /= X */ /* A /= X */
ctx->flags |= SEEN_X | SEEN_A; ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */ /* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */ emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_div(r_A, r_X, ctx); emit_div(r_A, r_X, ctx);
break; break;
@ -864,8 +876,10 @@ load_ind:
/* A %= X */ /* A %= X */
ctx->flags |= SEEN_X | SEEN_A; ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */ /* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */ emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_mod(r_A, r_X, ctx); emit_mod(r_A, r_X, ctx);
break; break;
@ -926,7 +940,10 @@ load_ind:
break; break;
case BPF_JMP | BPF_JA: case BPF_JMP | BPF_JA:
/* pc += K */ /* pc += K */
emit_b(b_imm(i + k + 1, ctx), ctx); b_off = b_imm(i + k + 1, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx); emit_nop(ctx);
break; break;
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
@ -1056,12 +1073,16 @@ jmp_cmp:
break; break;
case BPF_RET | BPF_A: case BPF_RET | BPF_A:
ctx->flags |= SEEN_A; ctx->flags |= SEEN_A;
if (i != prog->len - 1) if (i != prog->len - 1) {
/* /*
* If this is not the last instruction * If this is not the last instruction
* then jump to the epilogue * then jump to the epilogue
*/ */
emit_b(b_imm(prog->len, ctx), ctx); b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
}
emit_reg_move(r_ret, r_A, ctx); /* delay slot */ emit_reg_move(r_ret, r_A, ctx); /* delay slot */
break; break;
case BPF_RET | BPF_K: case BPF_RET | BPF_K:
@ -1075,7 +1096,10 @@ jmp_cmp:
* If this is not the last instruction * If this is not the last instruction
* then jump to the epilogue * then jump to the epilogue
*/ */
emit_b(b_imm(prog->len, ctx), ctx); b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx); emit_nop(ctx);
} }
break; break;
@ -1133,8 +1157,10 @@ jmp_cmp:
/* Load *dev pointer */ /* Load *dev pointer */
emit_load_ptr(r_s0, r_skb, off, ctx); emit_load_ptr(r_s0, r_skb, off, ctx);
/* error (0) in the delay slot */ /* error (0) in the delay slot */
emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off = b_imm(prog->len, ctx);
b_imm(prog->len, ctx), ctx); if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx); emit_reg_move(r_ret, r_zero, ctx);
if (code == (BPF_ANC | SKF_AD_IFINDEX)) { if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* Generate the actual JIT code */ /* Generate the actual JIT code */
build_prologue(&ctx); build_prologue(&ctx);
build_body(&ctx); if (build_body(&ctx)) {
module_memfree(ctx.target);
goto out;
}
build_epilogue(&ctx); build_epilogue(&ctx);
/* Update the icache */ /* Update the icache */

View File

@ -3,9 +3,10 @@
config EARLY_PRINTK config EARLY_PRINTK
bool "Activate early kernel debugging" bool "Activate early kernel debugging"
default y default y
depends on TTY
select SERIAL_CORE_CONSOLE select SERIAL_CORE_CONSOLE
help help
Enable early printk on console Enable early printk on console.
This is useful for kernel debugging when your machine crashes very This is useful for kernel debugging when your machine crashes very
early before the console code is initialized. early before the console code is initialized.
You should normally say N here, unless you want to debug such a crash. You should normally say N here, unless you want to debug such a crash.

View File

@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
int dram_start;
console_verbose(); console_verbose();
memory_start = memblock_start_of_DRAM(); memory_start = memblock_start_of_DRAM();

View File

@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
do_signal(current); do_signal(current);
} }
if (thread_info_flags & _TIF_NOTIFY_RESUME) { if (thread_info_flags & _TIF_NOTIFY_RESUME)
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
} }
static unsigned long get_tm_stackpointer(struct task_struct *tsk) static unsigned long get_tm_stackpointer(struct task_struct *tsk)

View File

@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{ {
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
} }
static void __unset_cpu_idle(struct kvm_vcpu *vcpu) static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{ {
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
} }
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)

View File

@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu); kvm_s390_patch_guest_per_regs(vcpu);
} }
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask); clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0; vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);

View File

@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{ {
return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
} }
static inline int kvm_is_ucontrol(struct kvm *kvm) static inline int kvm_is_ucontrol(struct kvm *kvm)

View File

@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
static inline pmd_t *pud_pgtable(pud_t pud) static inline pmd_t *pud_pgtable(pud_t pud)
{ {
return (pmd_t *)pud_val(pud); return (pmd_t *)(unsigned long)pud_val(pud);
} }
/* only used by the stubbed out hugetlb gup code, should never be called */ /* only used by the stubbed out hugetlb gup code, should never be called */

View File

@ -2610,7 +2610,6 @@ config PCI_OLPC
config PCI_XEN config PCI_XEN
def_bool y def_bool y
depends on PCI && XEN depends on PCI && XEN
select SWIOTLB_XEN
config MMCONF_FAM10H config MMCONF_FAM10H
def_bool y def_bool y

View File

@ -367,10 +367,11 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
* %rdx: src (1..8 blocks) * %rdx: src (1..8 blocks)
* %rcx: num blocks (1..8) * %rcx: num blocks (1..8)
*/ */
FRAME_BEGIN
cmpq $5, %rcx; cmpq $5, %rcx;
jb sm4_aesni_avx_crypt4; jb sm4_aesni_avx_crypt4;
FRAME_BEGIN
vmovdqu (0 * 16)(%rdx), RA0; vmovdqu (0 * 16)(%rdx), RA0;
vmovdqu (1 * 16)(%rdx), RA1; vmovdqu (1 * 16)(%rdx), RA1;
vmovdqu (2 * 16)(%rdx), RA2; vmovdqu (2 * 16)(%rdx), RA2;

View File

@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
struct kvm_page_track_notifier_node *node); struct kvm_page_track_notifier_node *node);
}; };
void kvm_page_track_init(struct kvm *kvm); int kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);

View File

@ -2,8 +2,6 @@
#ifndef _ASM_X86_PKEYS_H #ifndef _ASM_X86_PKEYS_H
#define _ASM_X86_PKEYS_H #define _ASM_X86_PKEYS_H
#define ARCH_DEFAULT_PKEY 0
/* /*
* If more than 16 keys are ever supported, a thorough audit * If more than 16 keys are ever supported, a thorough audit
* will be necessary to ensure that the types that store key * will be necessary to ensure that the types that store key

View File

@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
{ {
const struct { char _[64]; } *__src = src; const struct { char _[64]; } *__src = src;
struct { char _[64]; } __iomem *__dst = dst; struct { char _[64]; } __iomem *__dst = dst;
int zf; bool zf;
/* /*
* ENQCMDS %(rdx), rax * ENQCMDS %(rdx), rax

View File

@ -3,14 +3,10 @@
#define _ASM_X86_SWIOTLB_XEN_H #define _ASM_X86_SWIOTLB_XEN_H
#ifdef CONFIG_SWIOTLB_XEN #ifdef CONFIG_SWIOTLB_XEN
extern int xen_swiotlb;
extern int __init pci_xen_swiotlb_detect(void); extern int __init pci_xen_swiotlb_detect(void);
extern void __init pci_xen_swiotlb_init(void);
extern int pci_xen_swiotlb_init_late(void); extern int pci_xen_swiotlb_init_late(void);
#else #else
#define xen_swiotlb (0) #define pci_xen_swiotlb_detect NULL
static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
static inline void __init pci_xen_swiotlb_init(void) { }
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
#endif #endif

View File

@ -830,6 +830,20 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup(); x86_init.oem.arch_setup();
/*
* Do some memory reservations *before* memory is added to memblock, so
* memblock allocations won't overwrite it.
*
* After this point, everything still needed from the boot loader or
* firmware or kernel text should be early reserved or marked not RAM in
* e820. All other memory is free game.
*
* This call needs to happen before e820__memory_setup() which calls the
* xen_memory_setup() on Xen dom0 which relies on the fact that those
* early reservations have happened already.
*/
early_reserve_memory();
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
e820__memory_setup(); e820__memory_setup();
parse_setup_data(); parse_setup_data();
@ -876,18 +890,6 @@ void __init setup_arch(char **cmdline_p)
parse_early_param(); parse_early_param();
/*
* Do some memory reservations *before* memory is added to
* memblock, so memblock allocations won't overwrite it.
* Do it after early param, so we could get (unlikely) panic from
* serial.
*
* After this point everything still needed from the boot loader or
* firmware or kernel text should be early reserved or marked not
* RAM in e820. All other memory is free game.
*/
early_reserve_memory();
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
/* /*
* Memory used by the kernel cannot be hot-removed because Linux * Memory used by the kernel cannot be hot-removed because Linux

View File

@ -4206,7 +4206,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
return emulate_ud(ctxt); return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }

View File

@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_init(&hv_vcpu->stimer[i], i); stimer_init(&hv_vcpu->stimer[i], i);
hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); hv_vcpu->vp_index = vcpu->vcpu_idx;
return 0; return 0;
} }
@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
switch (msr) { switch (msr) {
case HV_X64_MSR_VP_INDEX: { case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
int vcpu_idx = kvm_vcpu_get_idx(vcpu);
u32 new_vp_index = (u32)data; u32 new_vp_index = (u32)data;
if (!host || new_vp_index >= KVM_MAX_VCPUS) if (!host || new_vp_index >= KVM_MAX_VCPUS)
@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
* VP index is changing, adjust num_mismatched_vp_indexes if * VP index is changing, adjust num_mismatched_vp_indexes if
* it now matches or no longer matches vcpu_idx. * it now matches or no longer matches vcpu_idx.
*/ */
if (hv_vcpu->vp_index == vcpu_idx) if (hv_vcpu->vp_index == vcpu->vcpu_idx)
atomic_inc(&hv->num_mismatched_vp_indexes); atomic_inc(&hv->num_mismatched_vp_indexes);
else if (new_vp_index == vcpu_idx) else if (new_vp_index == vcpu->vcpu_idx)
atomic_dec(&hv->num_mismatched_vp_indexes); atomic_dec(&hv->num_mismatched_vp_indexes);
hv_vcpu->vp_index = new_vp_index; hv_vcpu->vp_index = new_vp_index;

View File

@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu); return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
} }
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);

View File

@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
unsigned index; unsigned index;
bool mask_before, mask_after; bool mask_before, mask_after;
union kvm_ioapic_redirect_entry *e; union kvm_ioapic_redirect_entry *e;
unsigned long vcpu_bitmap;
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode; int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
switch (ioapic->ioregsel) { switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION: case IOAPIC_REG_VERSION:
@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
irq.shorthand = APIC_DEST_NOSHORT; irq.shorthand = APIC_DEST_NOSHORT;
irq.dest_id = e->fields.dest_id; irq.dest_id = e->fields.dest_id;
irq.msi_redir_hint = false; irq.msi_redir_hint = false;
bitmap_zero(&vcpu_bitmap, 16); bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
&vcpu_bitmap); vcpu_bitmap);
if (old_dest_mode != e->fields.dest_mode || if (old_dest_mode != e->fields.dest_mode ||
old_dest_id != e->fields.dest_id) { old_dest_id != e->fields.dest_id) {
/* /*
@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
kvm_lapic_irq_dest_mode( kvm_lapic_irq_dest_mode(
!!e->fields.dest_mode); !!e->fields.dest_mode);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
&vcpu_bitmap); vcpu_bitmap);
} }
kvm_make_scan_ioapic_request_mask(ioapic->kvm, kvm_make_scan_ioapic_request_mask(ioapic->kvm,
&vcpu_bitmap); vcpu_bitmap);
} else { } else {
kvm_make_scan_ioapic_request(ioapic->kvm); kvm_make_scan_ioapic_request(ioapic->kvm);
} }

View File

@ -2027,8 +2027,8 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
} while (!sp->unsync_children); } while (!sp->unsync_children);
} }
static void mmu_sync_children(struct kvm_vcpu *vcpu, static int mmu_sync_children(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *parent) struct kvm_mmu_page *parent, bool can_yield)
{ {
int i; int i;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
@ -2055,12 +2055,18 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
} }
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
if (!can_yield) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
return -EINTR;
}
cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
flush = false; flush = false;
} }
} }
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
return 0;
} }
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
@ -2146,9 +2152,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
if (sp->unsync_children)
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp); __clear_sp_write_flooding_count(sp);
trace_get_page: trace_get_page:
@ -3684,7 +3687,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
mmu_sync_children(vcpu, sp); mmu_sync_children(vcpu, sp, true);
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
write_unlock(&vcpu->kvm->mmu_lock); write_unlock(&vcpu->kvm->mmu_lock);
@ -3700,7 +3703,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
if (IS_VALID_PAE_ROOT(root)) { if (IS_VALID_PAE_ROOT(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
sp = to_shadow_page(root); sp = to_shadow_page(root);
mmu_sync_children(vcpu, sp); mmu_sync_children(vcpu, sp, true);
} }
} }

View File

@ -164,13 +164,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
cleanup_srcu_struct(&head->track_srcu); cleanup_srcu_struct(&head->track_srcu);
} }
void kvm_page_track_init(struct kvm *kvm) int kvm_page_track_init(struct kvm *kvm)
{ {
struct kvm_page_track_notifier_head *head; struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head; head = &kvm->arch.track_notifier_head;
init_srcu_struct(&head->track_srcu);
INIT_HLIST_HEAD(&head->track_notifier_list); INIT_HLIST_HEAD(&head->track_notifier_list);
return init_srcu_struct(&head->track_srcu);
} }
/* /*

View File

@ -707,8 +707,27 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
if (!is_shadow_present_pte(*it.sptep)) { if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2]; table_gfn = gw->table_gfn[it.level - 2];
access = gw->pt_access[it.level - 2]; access = gw->pt_access[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
false, access); it.level-1, false, access);
/*
* We must synchronize the pagetable before linking it
* because the guest doesn't need to flush tlb when
* the gpte is changed from non-present to present.
* Otherwise, the guest may use the wrong mapping.
*
* For PG_LEVEL_4K, kvm_mmu_get_page() has already
* synchronized it transiently via kvm_sync_page().
*
* For higher level pagetable, we synchronize it via
* the slower mmu_sync_children(). If it needs to
* break, some progress has been made; return
* RET_PF_RETRY and retry on the next #PF.
* KVM_REQ_MMU_SYNC is not necessary but it
* expedites the process.
*/
if (sp->unsync_children &&
mmu_sync_children(vcpu, sp, false))
return RET_PF_RETRY;
} }
/* /*
@ -1047,14 +1066,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
* Using the cached information from sp->gfns is safe because: * Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn * - The spte has a reference to the struct page, so the pfn for a given gfn
* can't change unless all sptes pointing to it are nuked first. * can't change unless all sptes pointing to it are nuked first.
*
* Note:
* We should flush all tlbs if spte is dropped even though guest is
* responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
* used by guest then tlbs are not flushed, so guest is allowed to access the
* freed pages.
* And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
*/ */
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ {
@ -1107,13 +1118,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return 0; return 0;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
/* set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
* Update spte before increasing tlbs_dirty to make
* sure no tlb flush is lost after spte is zapped; see
* the comments in kvm_flush_remote_tlbs().
*/
smp_wmb();
vcpu->kvm->tlbs_dirty++;
continue; continue;
} }
@ -1128,12 +1133,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (gfn != sp->gfns[i]) { if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i]); drop_spte(vcpu->kvm, &sp->spt[i]);
/* set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
* The same as above where we are doing
* prefetch_invalid_gpte().
*/
smp_wmb();
vcpu->kvm->tlbs_dirty++;
continue; continue;
} }

View File

@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits); (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
svm->vmcb->control.int_state = svm->nested.ctl.int_state; svm->vmcb->control.int_state = svm->nested.ctl.int_state;
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
@ -579,7 +578,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
} }
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
struct vmcb *vmcb12) struct vmcb *vmcb12, bool from_vmrun)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
int ret; int ret;
@ -609,13 +608,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
nested_vmcb02_prepare_save(svm, vmcb12); nested_vmcb02_prepare_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
nested_npt_enabled(svm), true); nested_npt_enabled(svm), from_vmrun);
if (ret) if (ret)
return ret; return ret;
if (!npt_enabled) if (!npt_enabled)
vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested; vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
if (!from_vmrun)
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
svm_set_gif(svm, true); svm_set_gif(svm, true);
return 0; return 0;
@ -681,7 +683,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
svm->nested.nested_run_pending = 1; svm->nested.nested_run_pending = 1;
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12)) if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
goto out_exit_err; goto out_exit_err;
if (nested_svm_vmrun_msrpm(svm)) if (nested_svm_vmrun_msrpm(svm))

View File

@ -595,43 +595,50 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
return 0; return 0;
} }
static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
int *error)
{
struct sev_data_launch_update_vmsa vmsa;
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
/* Perform some pre-encryption checks against the VMSA */
ret = sev_es_sync_vmsa(svm);
if (ret)
return ret;
/*
* The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
* the VMSA memory content (i.e it will write the same memory region
* with the guest's key), so invalidate it first.
*/
clflush_cache_range(svm->vmsa, PAGE_SIZE);
vmsa.reserved = 0;
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
vmsa.address = __sme_pa(svm->vmsa);
vmsa.len = PAGE_SIZE;
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
}
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
{ {
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_launch_update_vmsa vmsa;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i, ret; int i, ret;
if (!sev_es_guest(kvm)) if (!sev_es_guest(kvm))
return -ENOTTY; return -ENOTTY;
vmsa.reserved = 0;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
struct vcpu_svm *svm = to_svm(vcpu); ret = mutex_lock_killable(&vcpu->mutex);
/* Perform some pre-encryption checks against the VMSA */
ret = sev_es_sync_vmsa(svm);
if (ret) if (ret)
return ret; return ret;
/* ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
* The LAUNCH_UPDATE_VMSA command will perform in-place
* encryption of the VMSA memory content (i.e it will write
* the same memory region with the guest's key), so invalidate
* it first.
*/
clflush_cache_range(svm->vmsa, PAGE_SIZE);
vmsa.handle = sev->handle; mutex_unlock(&vcpu->mutex);
vmsa.address = __sme_pa(svm->vmsa);
vmsa.len = PAGE_SIZE;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
&argp->error);
if (ret) if (ret)
return ret; return ret;
svm->vcpu.arch.guest_state_protected = true;
} }
return 0; return 0;
@ -1397,8 +1404,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Bind ASID to this guest */ /* Bind ASID to this guest */
ret = sev_bind_asid(kvm, start.handle, error); ret = sev_bind_asid(kvm, start.handle, error);
if (ret) if (ret) {
sev_decommission(start.handle);
goto e_free_session; goto e_free_session;
}
params.handle = start.handle; params.handle = start.handle;
if (copy_to_user((void __user *)(uintptr_t)argp->data, if (copy_to_user((void __user *)(uintptr_t)argp->data,
@ -1464,7 +1473,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Pin guest memory */ /* Pin guest memory */
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
PAGE_SIZE, &n, 0); PAGE_SIZE, &n, 1);
if (IS_ERR(guest_page)) { if (IS_ERR(guest_page)) {
ret = PTR_ERR(guest_page); ret = PTR_ERR(guest_page);
goto e_free_trans; goto e_free_trans;
@ -1501,6 +1510,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
} }
static bool cmd_allowed_from_miror(u32 cmd_id)
{
/*
* Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
* active mirror VMs. Also allow the debugging and status commands.
*/
if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
cmd_id == KVM_SEV_DBG_ENCRYPT)
return true;
return false;
}
int svm_mem_enc_op(struct kvm *kvm, void __user *argp) int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{ {
struct kvm_sev_cmd sev_cmd; struct kvm_sev_cmd sev_cmd;
@ -1517,8 +1540,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
/* enc_context_owner handles all memory enc operations */ /* Only the enc_context_owner handles some memory enc operations. */
if (is_mirroring_enc_context(kvm)) { if (is_mirroring_enc_context(kvm) &&
!cmd_allowed_from_miror(sev_cmd.id)) {
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
@ -1715,8 +1739,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
{ {
struct file *source_kvm_file; struct file *source_kvm_file;
struct kvm *source_kvm; struct kvm *source_kvm;
struct kvm_sev_info *mirror_sev; struct kvm_sev_info source_sev, *mirror_sev;
unsigned int asid;
int ret; int ret;
source_kvm_file = fget(source_fd); source_kvm_file = fget(source_fd);
@ -1739,7 +1762,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
goto e_source_unlock; goto e_source_unlock;
} }
asid = to_kvm_svm(source_kvm)->sev_info.asid; memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
sizeof(source_sev));
/* /*
* The mirror kvm holds an enc_context_owner ref so its asid can't * The mirror kvm holds an enc_context_owner ref so its asid can't
@ -1759,8 +1783,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
/* Set enc_context_owner and copy its encryption context over */ /* Set enc_context_owner and copy its encryption context over */
mirror_sev = &to_kvm_svm(kvm)->sev_info; mirror_sev = &to_kvm_svm(kvm)->sev_info;
mirror_sev->enc_context_owner = source_kvm; mirror_sev->enc_context_owner = source_kvm;
mirror_sev->asid = asid;
mirror_sev->active = true; mirror_sev->active = true;
mirror_sev->asid = source_sev.asid;
mirror_sev->fd = source_sev.fd;
mirror_sev->es_active = source_sev.es_active;
mirror_sev->handle = source_sev.handle;
/*
* Do not copy ap_jump_table. Since the mirror does not share the same
* KVM contexts as the original, and they may have different
* memory-views.
*/
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return 0; return 0;

View File

@ -1566,6 +1566,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
V_IRQ_INJECTION_BITS_MASK; V_IRQ_INJECTION_BITS_MASK;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
} }
vmcb_mark_dirty(svm->vmcb, VMCB_INTR); vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
@ -2222,6 +2224,10 @@ static int gp_interception(struct kvm_vcpu *vcpu)
if (error_code) if (error_code)
goto reinject; goto reinject;
/* All SVM instructions expect page aligned RAX */
if (svm->vmcb->save.rax & ~PAGE_MASK)
goto reinject;
/* Decode the instruction for usage later */ /* Decode the instruction for usage later */
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
goto reinject; goto reinject;
@ -4285,43 +4291,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
struct kvm_host_map map_save; struct kvm_host_map map_save;
int ret; int ret;
if (is_guest_mode(vcpu)) { if (!is_guest_mode(vcpu))
/* FED8h - SVM Guest */ return 0;
put_smstate(u64, smstate, 0x7ed8, 1);
/* FEE0h - SVM Guest VMCB Physical Address */
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; /* FED8h - SVM Guest */
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; put_smstate(u64, smstate, 0x7ed8, 1);
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; /* FEE0h - SVM Guest VMCB Physical Address */
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
ret = nested_svm_vmexit(svm); svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
if (ret) svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
return ret; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
/* ret = nested_svm_vmexit(svm);
* KVM uses VMCB01 to store L1 host state while L2 runs but if (ret)
* VMCB01 is going to be used during SMM and thus the state will return ret;
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
* format of the area is identical to guest save area offsetted
* by 0x400 (matches the offset of 'struct vmcb_save_area'
* within 'struct vmcb'). Note: HSAVE area may also be used by
* L1 hypervisor to save additional host context (e.g. KVM does
* that, see svm_prepare_guest_switch()) which must be
* preserved.
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); /*
* KVM uses VMCB01 to store L1 host state while L2 runs but
* VMCB01 is going to be used during SMM and thus the state will
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
* format of the area is identical to guest save area offsetted
* by 0x400 (matches the offset of 'struct vmcb_save_area'
* within 'struct vmcb'). Note: HSAVE area may also be used by
* L1 hypervisor to save additional host context (e.g. KVM does
* that, see svm_prepare_guest_switch()) which must be
* preserved.
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
svm_copy_vmrun_state(map_save.hva + 0x400, BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
&svm->vmcb01.ptr->save);
kvm_vcpu_unmap(vcpu, &map_save, true); svm_copy_vmrun_state(map_save.hva + 0x400,
} &svm->vmcb01.ptr->save);
kvm_vcpu_unmap(vcpu, &map_save, true);
return 0; return 0;
} }
@ -4329,50 +4336,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map, map_save; struct kvm_host_map map, map_save;
int ret = 0; u64 saved_efer, vmcb12_gpa;
struct vmcb *vmcb12;
int ret;
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); return 0;
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
struct vmcb *vmcb12;
if (guest) { /* Non-zero if SMI arrived while vCPU was in guest mode. */
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) if (!GET_SMSTATE(u64, smstate, 0x7ed8))
return 1; return 0;
if (!(saved_efer & EFER_SVME)) if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return 1; return 1;
if (kvm_vcpu_map(vcpu, saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) if (!(saved_efer & EFER_SVME))
return 1; return 1;
if (svm_allocate_nested(svm)) vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
return 1; if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
return 1;
vmcb12 = map.hva; ret = 1;
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
goto unmap_map;
nested_load_control_from_vmcb12(svm, &vmcb12->control); if (svm_allocate_nested(svm))
goto unmap_save;
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); /*
kvm_vcpu_unmap(vcpu, &map, true); * Restore L1 host state from L1 HSAVE area as VMCB01 was
* used during SMM (see svm_enter_smm())
*/
/* svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
* Restore L1 host state from L1 HSAVE area as VMCB01 was
* used during SMM (see svm_enter_smm())
*/
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
&map_save) == -EINVAL)
return 1;
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, /*
map_save.hva + 0x400); * Enter the nested guest now
*/
kvm_vcpu_unmap(vcpu, &map_save, true); vmcb12 = map.hva;
} nested_load_control_from_vmcb12(svm, &vmcb12->control);
} ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true);
unmap_map:
kvm_vcpu_unmap(vcpu, &map, true);
return ret; return ret;
} }

View File

@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
} }
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12); int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
void svm_leave_nested(struct vcpu_svm *svm); void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm);

View File

@ -353,14 +353,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
switch (msr_index) { switch (msr_index) {
case MSR_IA32_VMX_EXIT_CTLS: case MSR_IA32_VMX_EXIT_CTLS:
case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_TRUE_EXIT_CTLS:
ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
break; break;
case MSR_IA32_VMX_ENTRY_CTLS: case MSR_IA32_VMX_ENTRY_CTLS:
case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
break; break;
case MSR_IA32_VMX_PROCBASED_CTLS2: case MSR_IA32_VMX_PROCBASED_CTLS2:
ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
break;
case MSR_IA32_VMX_PINBASED_CTLS:
ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
break;
case MSR_IA32_VMX_VMFUNC:
ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
break; break;
} }

View File

@ -2583,8 +2583,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* Guest state is invalid and unrestricted guest is disabled, * Guest state is invalid and unrestricted guest is disabled,
* which means L1 attempted VMEntry to L2 with invalid state. * which means L1 attempted VMEntry to L2 with invalid state.
* Fail the VMEntry. * Fail the VMEntry.
*
* However when force loading the guest state (SMM exit or
* loading nested state after migration, it is possible to
* have invalid guest state now, which will be later fixed by
* restoring L2 register state
*/ */
if (CC(!vmx_guest_state_valid(vcpu))) { if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT; *entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL; return -EINVAL;
} }
@ -4351,6 +4356,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vmcs12->vm_exit_msr_load_count)) vmcs12->vm_exit_msr_load_count))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
} }
static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
@ -4899,14 +4906,7 @@ out_vmcs02:
return -ENOMEM; return -ENOMEM;
} }
/* /* Emulate the VMXON instruction. */
* Emulate the VMXON instruction.
* Currently, we just remember that VMX is active, and do not save or even
* inspect the argument to VMXON (the so-called "VMXON pointer") because we
* do not currently need to store anything in that guest-allocated memory
* region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
* argument is different from the VMXON pointer (which the spec says they do).
*/
static int handle_vmon(struct kvm_vcpu *vcpu) static int handle_vmon(struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
@ -5903,6 +5903,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
case EXIT_REASON_VMFUNC: case EXIT_REASON_VMFUNC:
/* VM functions are emulated through L2->L0 vmexits. */ /* VM functions are emulated through L2->L0 vmexits. */
return true; return true;
case EXIT_REASON_BUS_LOCK:
/*
* At present, bus lock VM exit is never exposed to L1.
* Handle L2's bus locks in L0 directly.
*/
return true;
default: default:
break; break;
} }

View File

@ -1323,7 +1323,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
vmx_prepare_switch_to_host(to_vmx(vcpu)); vmx_prepare_switch_to_host(to_vmx(vcpu));
} }
static bool emulation_required(struct kvm_vcpu *vcpu) bool vmx_emulation_required(struct kvm_vcpu *vcpu)
{ {
return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu); return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
} }
@ -1367,7 +1367,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmcs_writel(GUEST_RFLAGS, rflags); vmcs_writel(GUEST_RFLAGS, rflags);
if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
vmx->emulation_required = emulation_required(vcpu); vmx->emulation_required = vmx_emulation_required(vcpu);
} }
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
@ -1837,10 +1837,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
&msr_info->data)) &msr_info->data))
return 1; return 1;
/* /*
* Enlightened VMCS v1 doesn't have certain fields, but buggy * Enlightened VMCS v1 doesn't have certain VMCS fields but
* Hyper-V versions are still trying to use corresponding * instead of just ignoring the features, different Hyper-V
* features when they are exposed. Filter out the essential * versions are either trying to use them and fail or do some
* minimum. * sanity checking and refuse to boot. Filter all unsupported
* features out.
*/ */
if (!msr_info->host_initiated && if (!msr_info->host_initiated &&
vmx->nested.enlightened_vmcs_enabled) vmx->nested.enlightened_vmcs_enabled)
@ -3077,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
} }
/* depends on vcpu->arch.cr0 to be set to a new value */ /* depends on vcpu->arch.cr0 to be set to a new value */
vmx->emulation_required = emulation_required(vcpu); vmx->emulation_required = vmx_emulation_required(vcpu);
} }
static int vmx_get_max_tdp_level(void) static int vmx_get_max_tdp_level(void)
@ -3330,7 +3331,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int
{ {
__vmx_set_segment(vcpu, var, seg); __vmx_set_segment(vcpu, var, seg);
to_vmx(vcpu)->emulation_required = emulation_required(vcpu); to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
} }
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@ -6621,10 +6622,24 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->soft_vnmi_blocked)) vmx->loaded_vmcs->soft_vnmi_blocked))
vmx->loaded_vmcs->entry_time = ktime_get(); vmx->loaded_vmcs->entry_time = ktime_get();
/* Don't enter VMX if guest state is invalid, let the exit handler /*
start emulation until we arrive back to a valid state */ * Don't enter VMX if guest state is invalid, let the exit handler
if (vmx->emulation_required) * start emulation until we arrive back to a valid state. Synthesize a
* consistency check VM-Exit due to invalid guest state and bail.
*/
if (unlikely(vmx->emulation_required)) {
/* We don't emulate invalid state of a nested guest */
vmx->fail = is_guest_mode(vcpu);
vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
vmx->exit_reason.failed_vmentry = 1;
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
vmx->exit_intr_info = 0;
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
}
trace_kvm_entry(vcpu); trace_kvm_entry(vcpu);

View File

@ -248,12 +248,8 @@ struct vcpu_vmx {
* only loaded into hardware when necessary, e.g. SYSCALL #UDs outside * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
* of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
* be loaded into hardware if those conditions aren't met. * be loaded into hardware if those conditions aren't met.
* nr_active_uret_msrs tracks the number of MSRs that need to be loaded
* into hardware when running the guest. guest_uret_msrs[] is resorted
* whenever the number of "active" uret MSRs is modified.
*/ */
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
int nr_active_uret_msrs;
bool guest_uret_msrs_loaded; bool guest_uret_msrs_loaded;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base; u64 msr_host_kernel_gs_base;
@ -359,6 +355,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
unsigned long fs_base, unsigned long gs_base); unsigned long fs_base, unsigned long gs_base);
int vmx_get_cpl(struct kvm_vcpu *vcpu); int vmx_get_cpl(struct kvm_vcpu *vcpu);
bool vmx_emulation_required(struct kvm_vcpu *vcpu);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);

View File

@ -1332,6 +1332,13 @@ static const u32 msrs_to_save_all[] = {
MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
}; };
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
@ -2969,7 +2976,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
offsetof(struct compat_vcpu_info, time)); offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_set) if (vcpu->xen.vcpu_time_info_set)
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
if (v == kvm_get_vcpu(v->kvm, 0)) if (!v->vcpu_idx)
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0; return 0;
} }
@ -7658,6 +7665,13 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
/* Process a latched INIT or SMI, if any. */ /* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
/*
* Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
* on SMM exit we still need to reload them from
* guest memory
*/
vcpu->arch.pdptrs_from_userspace = false;
} }
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
@ -10652,6 +10666,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
int r; int r;
vcpu->arch.last_vmentry_cpu = -1; vcpu->arch.last_vmentry_cpu = -1;
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@ -10893,6 +10909,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0xfff0); kvm_rip_write(vcpu, 0xfff0);
vcpu->arch.cr3 = 0;
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
/* /*
* CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
* of Intel's SDM list CD/NW as being set on INIT, but they contradict * of Intel's SDM list CD/NW as being set on INIT, but they contradict
@ -11139,9 +11158,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
int ret;
if (type) if (type)
return -EINVAL; return -EINVAL;
ret = kvm_page_track_init(kvm);
if (ret)
return ret;
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@ -11174,7 +11199,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_apicv_init(kvm); kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm); kvm_hv_init_vm(kvm);
kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm); kvm_mmu_init_vm(kvm);
kvm_xen_init_vm(kvm); kvm_xen_init_vm(kvm);

View File

@ -37,10 +37,10 @@
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \ #define __get_next(t, insn) \
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); }) ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
#define __peek_nbyte_next(t, insn, n) \ #define __peek_nbyte_next(t, insn, n) \
({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); }) ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
#define get_next(t, insn) \ #define get_next(t, insn) \
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); }) ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })

View File

@ -710,7 +710,8 @@ oops:
static noinline void static noinline void
kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code, kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int signal, int si_code) unsigned long address, int signal, int si_code,
u32 pkey)
{ {
WARN_ON_ONCE(user_mode(regs)); WARN_ON_ONCE(user_mode(regs));
@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
set_signal_archinfo(address, error_code); set_signal_archinfo(address, error_code);
/* XXX: hwpoison faults will set the wrong code. */ if (si_code == SEGV_PKUERR) {
force_sig_fault(signal, si_code, (void __user *)address); force_sig_pkuerr((void __user *)address, pkey);
} else {
/* XXX: hwpoison faults will set the wrong code. */
force_sig_fault(signal, si_code, (void __user *)address);
}
} }
/* /*
@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current; struct task_struct *tsk = current;
if (!user_mode(regs)) { if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code); kernelmode_fixup_or_oops(regs, error_code, address,
SIGSEGV, si_code, pkey);
return; return;
} }
@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
{ {
/* Kernel mode? Handle exceptions or die: */ /* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) { if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR); kernelmode_fixup_or_oops(regs, error_code, address,
SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
return; return;
} }
@ -1396,7 +1403,8 @@ good_area:
*/ */
if (!user_mode(regs)) if (!user_mode(regs))
kernelmode_fixup_or_oops(regs, error_code, address, kernelmode_fixup_or_oops(regs, error_code, address,
SIGBUS, BUS_ADRERR); SIGBUS, BUS_ADRERR,
ARCH_DEFAULT_PKEY);
return; return;
} }
@ -1416,7 +1424,8 @@ good_area:
return; return;
if (fatal_signal_pending(current) && !user_mode(regs)) { if (fatal_signal_pending(current) && !user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address, 0, 0); kernelmode_fixup_or_oops(regs, error_code, address,
0, 0, ARCH_DEFAULT_PKEY);
return; return;
} }
@ -1424,7 +1433,8 @@ good_area:
/* Kernel mode? Handle exceptions or die: */ /* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) { if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address, kernelmode_fixup_or_oops(regs, error_code, address,
SIGSEGV, SEGV_MAPERR); SIGSEGV, SEGV_MAPERR,
ARCH_DEFAULT_PKEY);
return; return;
} }

View File

@ -1334,9 +1334,10 @@ st: if (is_imm8(insn->off))
if (insn->imm == (BPF_AND | BPF_FETCH) || if (insn->imm == (BPF_AND | BPF_FETCH) ||
insn->imm == (BPF_OR | BPF_FETCH) || insn->imm == (BPF_OR | BPF_FETCH) ||
insn->imm == (BPF_XOR | BPF_FETCH)) { insn->imm == (BPF_XOR | BPF_FETCH)) {
u8 *branch_target;
bool is64 = BPF_SIZE(insn->code) == BPF_DW; bool is64 = BPF_SIZE(insn->code) == BPF_DW;
u32 real_src_reg = src_reg; u32 real_src_reg = src_reg;
u32 real_dst_reg = dst_reg;
u8 *branch_target;
/* /*
* Can't be implemented with a single x86 insn. * Can't be implemented with a single x86 insn.
@ -1347,11 +1348,13 @@ st: if (is_imm8(insn->off))
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
if (src_reg == BPF_REG_0) if (src_reg == BPF_REG_0)
real_src_reg = BPF_REG_AX; real_src_reg = BPF_REG_AX;
if (dst_reg == BPF_REG_0)
real_dst_reg = BPF_REG_AX;
branch_target = prog; branch_target = prog;
/* Load old value */ /* Load old value */
emit_ldx(&prog, BPF_SIZE(insn->code), emit_ldx(&prog, BPF_SIZE(insn->code),
BPF_REG_0, dst_reg, insn->off); BPF_REG_0, real_dst_reg, insn->off);
/* /*
* Perform the (commutative) operation locally, * Perform the (commutative) operation locally,
* put the result in the AUX_REG. * put the result in the AUX_REG.
@ -1362,7 +1365,8 @@ st: if (is_imm8(insn->off))
add_2reg(0xC0, AUX_REG, real_src_reg)); add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */ /* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG, err = emit_atomic(&prog, BPF_CMPXCHG,
dst_reg, AUX_REG, insn->off, real_dst_reg, AUX_REG,
insn->off,
BPF_SIZE(insn->code)); BPF_SIZE(insn->code));
if (WARN_ON(err)) if (WARN_ON(err))
return err; return err;
@ -1376,11 +1380,10 @@ st: if (is_imm8(insn->off))
/* Restore R0 after clobbering RAX */ /* Restore R0 after clobbering RAX */
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
break; break;
} }
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
insn->off, BPF_SIZE(insn->code)); insn->off, BPF_SIZE(insn->code));
if (err) if (err)
return err; return err;
break; break;
@ -1737,7 +1740,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
} }
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_prog *p, int stack_size, bool mod_ret) struct bpf_prog *p, int stack_size, bool save_ret)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
u8 *jmp_insn; u8 *jmp_insn;
@ -1770,11 +1773,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (emit_call(&prog, p->bpf_func, prog)) if (emit_call(&prog, p->bpf_func, prog))
return -EINVAL; return -EINVAL;
/* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return /*
* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
* of the previous call which is then passed on the stack to * of the previous call which is then passed on the stack to
* the next BPF program. * the next BPF program.
*
* BPF_TRAMP_FENTRY trampoline may need to return the return
* value of BPF_PROG_TYPE_STRUCT_OPS prog.
*/ */
if (mod_ret) if (save_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
/* replace 2 nops with JE insn, since jmp target is known */ /* replace 2 nops with JE insn, since jmp target is known */
@ -1821,13 +1828,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
} }
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_progs *tp, int stack_size) struct bpf_tramp_progs *tp, int stack_size,
bool save_ret)
{ {
int i; int i;
u8 *prog = *pprog; u8 *prog = *pprog;
for (i = 0; i < tp->nr_progs; i++) { for (i = 0; i < tp->nr_progs; i++) {
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
save_ret))
return -EINVAL; return -EINVAL;
} }
*pprog = prog; *pprog = prog;
@ -1870,6 +1879,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
return 0; return 0;
} }
static bool is_valid_bpf_tramp_flags(unsigned int flags)
{
if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
(flags & BPF_TRAMP_F_SKIP_FRAME))
return false;
/*
* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
* and it must be used alone.
*/
if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
(flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
return false;
return true;
}
/* Example: /* Example:
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
* its 'struct btf_func_model' will be nr_args=2 * its 'struct btf_func_model' will be nr_args=2
@ -1942,17 +1968,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
u8 **branches = NULL; u8 **branches = NULL;
u8 *prog; u8 *prog;
bool save_ret;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */ /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
if (nr_args > 6) if (nr_args > 6)
return -ENOTSUPP; return -ENOTSUPP;
if ((flags & BPF_TRAMP_F_RESTORE_REGS) && if (!is_valid_bpf_tramp_flags(flags))
(flags & BPF_TRAMP_F_SKIP_FRAME))
return -EINVAL; return -EINVAL;
if (flags & BPF_TRAMP_F_CALL_ORIG) /* room for return value of orig_call or fentry prog */
stack_size += 8; /* room for return value of orig_call */ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
if (save_ret)
stack_size += 8;
if (flags & BPF_TRAMP_F_IP_ARG) if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */ stack_size += 8; /* room for IP address argument */
@ -1998,7 +2026,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
} }
if (fentry->nr_progs) if (fentry->nr_progs)
if (invoke_bpf(m, &prog, fentry, stack_size)) if (invoke_bpf(m, &prog, fentry, stack_size,
flags & BPF_TRAMP_F_RET_FENTRY_RET))
return -EINVAL; return -EINVAL;
if (fmod_ret->nr_progs) { if (fmod_ret->nr_progs) {
@ -2045,7 +2074,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
} }
if (fexit->nr_progs) if (fexit->nr_progs)
if (invoke_bpf(m, &prog, fexit, stack_size)) { if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
@ -2065,9 +2094,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
/* restore original return value back into RAX */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
} }
/* restore return value of orig_call or fentry prog back into RAX */
if (save_ret)
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
EMIT1(0x5B); /* pop rbx */ EMIT1(0x5B); /* pop rbx */
EMIT1(0xC9); /* leave */ EMIT1(0xC9); /* leave */

View File

@ -755,8 +755,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
preempt_enable(); preempt_enable();
} }
static void xen_convert_trap_info(const struct desc_ptr *desc, static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
struct trap_info *traps) struct trap_info *traps, bool full)
{ {
unsigned in, out, count; unsigned in, out, count;
@ -766,17 +766,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
for (in = out = 0; in < count; in++) { for (in = out = 0; in < count; in++) {
gate_desc *entry = (gate_desc *)(desc->address) + in; gate_desc *entry = (gate_desc *)(desc->address) + in;
if (cvt_gate_to_trap(in, entry, &traps[out])) if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
out++; out++;
} }
traps[out].address = 0;
return out;
} }
void xen_copy_trap_info(struct trap_info *traps) void xen_copy_trap_info(struct trap_info *traps)
{ {
const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
xen_convert_trap_info(desc, traps); xen_convert_trap_info(desc, traps, true);
} }
/* Load a new IDT into Xen. In principle this can be per-CPU, so we /* Load a new IDT into Xen. In principle this can be per-CPU, so we
@ -786,6 +787,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
{ {
static DEFINE_SPINLOCK(lock); static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257]; static struct trap_info traps[257];
unsigned out;
trace_xen_cpu_load_idt(desc); trace_xen_cpu_load_idt(desc);
@ -793,7 +795,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
xen_convert_trap_info(desc, traps); out = xen_convert_trap_info(desc, traps, false);
memset(&traps[out], 0, sizeof(traps[0]));
xen_mc_flush(); xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps)) if (HYPERVISOR_set_trap_table(traps))

View File

@ -18,7 +18,7 @@
#endif #endif
#include <linux/export.h> #include <linux/export.h>
int xen_swiotlb __read_mostly; static int xen_swiotlb __read_mostly;
/* /*
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
return xen_swiotlb; return xen_swiotlb;
} }
void __init pci_xen_swiotlb_init(void) static void __init pci_xen_swiotlb_init(void)
{ {
if (xen_swiotlb) { if (xen_swiotlb) {
xen_swiotlb_init_early(); xen_swiotlb_init_early();

View File

@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
gdt = get_cpu_gdt_rw(cpu); gdt = get_cpu_gdt_rw(cpu);
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
/* /*
* Bring up the CPU in cpu_bringup_and_idle() with the stack * Bring up the CPU in cpu_bringup_and_idle() with the stack
* pointing just below where pt_regs would be if it were a normal * pointing just below where pt_regs would be if it were a normal
@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
xen_copy_trap_info(ctxt->trap_ctxt); xen_copy_trap_info(ctxt->trap_ctxt);
ctxt->ldt_ents = 0;
BUG_ON((unsigned long)gdt & ~PAGE_MASK); BUG_ON((unsigned long)gdt & ~PAGE_MASK);
gdt_mfn = arbitrary_virt_to_mfn(gdt); gdt_mfn = arbitrary_virt_to_mfn(gdt);

View File

@ -1466,7 +1466,7 @@ again:
if (!bio_integrity_endio(bio)) if (!bio_integrity_endio(bio))
return; return;
if (bio->bi_bdev) if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio); rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {

View File

@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static void bsg_device_release(struct device *dev)
{
struct bsg_device *bd = container_of(dev, struct bsg_device, device);
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
kfree(bd);
}
void bsg_unregister_queue(struct bsg_device *bd) void bsg_unregister_queue(struct bsg_device *bd)
{ {
if (bd->queue->kobj.sd) if (bd->queue->kobj.sd)
sysfs_remove_link(&bd->queue->kobj, "bsg"); sysfs_remove_link(&bd->queue->kobj, "bsg");
cdev_device_del(&bd->cdev, &bd->device); cdev_device_del(&bd->cdev, &bd->device);
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); put_device(&bd->device);
kfree(bd);
} }
EXPORT_SYMBOL_GPL(bsg_unregister_queue); EXPORT_SYMBOL_GPL(bsg_unregister_queue);
@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
if (ret < 0) { if (ret < 0) {
if (ret == -ENOSPC) if (ret == -ENOSPC)
dev_err(parent, "bsg: too many bsg devices\n"); dev_err(parent, "bsg: too many bsg devices\n");
goto out_kfree; kfree(bd);
return ERR_PTR(ret);
} }
bd->device.devt = MKDEV(bsg_major, ret); bd->device.devt = MKDEV(bsg_major, ret);
bd->device.class = bsg_class; bd->device.class = bsg_class;
bd->device.parent = parent; bd->device.parent = parent;
bd->device.release = bsg_device_release;
dev_set_name(&bd->device, "%s", name); dev_set_name(&bd->device, "%s", name);
device_initialize(&bd->device); device_initialize(&bd->device);
@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
bd->cdev.owner = THIS_MODULE; bd->cdev.owner = THIS_MODULE;
ret = cdev_device_add(&bd->cdev, &bd->device); ret = cdev_device_add(&bd->cdev, &bd->device);
if (ret) if (ret)
goto out_ida_remove; goto out_put_device;
if (q->kobj.sd) { if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
out_device_del: out_device_del:
cdev_device_del(&bd->cdev, &bd->device); cdev_device_del(&bd->cdev, &bd->device);
out_ida_remove: out_put_device:
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); put_device(&bd->device);
out_kfree:
kfree(bd);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(bsg_register_queue); EXPORT_SYMBOL_GPL(bsg_register_queue);

View File

@ -14,6 +14,7 @@
#include <linux/task_io_accounting_ops.h> #include <linux/task_io_accounting_ops.h>
#include <linux/falloc.h> #include <linux/falloc.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/fs.h>
#include "blk.h" #include "blk.h"
static struct inode *bdev_file_inode(struct file *file) static struct inode *bdev_file_inode(struct file *file)
@ -553,7 +554,8 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
static long blkdev_fallocate(struct file *file, int mode, loff_t start, static long blkdev_fallocate(struct file *file, int mode, loff_t start,
loff_t len) loff_t len)
{ {
struct block_device *bdev = I_BDEV(bdev_file_inode(file)); struct inode *inode = bdev_file_inode(file);
struct block_device *bdev = I_BDEV(inode);
loff_t end = start + len - 1; loff_t end = start + len - 1;
loff_t isize; loff_t isize;
int error; int error;
@ -580,10 +582,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
if ((start | len) & (bdev_logical_block_size(bdev) - 1)) if ((start | len) & (bdev_logical_block_size(bdev) - 1))
return -EINVAL; return -EINVAL;
filemap_invalidate_lock(inode->i_mapping);
/* Invalidate the page cache, including dirty pages. */ /* Invalidate the page cache, including dirty pages. */
error = truncate_bdev_range(bdev, file->f_mode, start, end); error = truncate_bdev_range(bdev, file->f_mode, start, end);
if (error) if (error)
return error; goto fail;
switch (mode) { switch (mode) {
case FALLOC_FL_ZERO_RANGE: case FALLOC_FL_ZERO_RANGE:
@ -600,17 +604,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
GFP_KERNEL, 0); GFP_KERNEL, 0);
break; break;
default: default:
return -EOPNOTSUPP; error = -EOPNOTSUPP;
} }
if (error)
return error;
/* fail:
* Invalidate the page cache again; if someone wandered in and dirtied filemap_invalidate_unlock(inode->i_mapping);
* a page, we just discard it - userspace has no way of knowing whether return error;
* the write happened before or after discard completing...
*/
return truncate_bdev_range(bdev, file->f_mode, start, end);
} }
const struct file_operations def_blk_fops = { const struct file_operations def_blk_fops = {

View File

@ -284,8 +284,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
#define should_use_kmap(pfn) page_is_ram(pfn) #define should_use_kmap(pfn) page_is_ram(pfn)
#endif #endif
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz, static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
bool memory)
{ {
unsigned long pfn; unsigned long pfn;
@ -295,8 +294,7 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
return NULL; return NULL;
return (void __iomem __force *)kmap(pfn_to_page(pfn)); return (void __iomem __force *)kmap(pfn_to_page(pfn));
} else } else
return memory ? acpi_os_memmap(pg_off, pg_sz) : return acpi_os_ioremap(pg_off, pg_sz);
acpi_os_ioremap(pg_off, pg_sz);
} }
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
@ -311,10 +309,9 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
} }
/** /**
* __acpi_os_map_iomem - Get a virtual address for a given physical address range. * acpi_os_map_iomem - Get a virtual address for a given physical address range.
* @phys: Start of the physical address range to map. * @phys: Start of the physical address range to map.
* @size: Size of the physical address range to map. * @size: Size of the physical address range to map.
* @memory: true if remapping memory, false if IO
* *
* Look up the given physical address range in the list of existing ACPI memory * Look up the given physical address range in the list of existing ACPI memory
* mappings. If found, get a reference to it and return a pointer to it (its * mappings. If found, get a reference to it and return a pointer to it (its
@ -324,8 +321,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* During early init (when acpi_permanent_mmap has not been set yet) this * During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done. * routine simply calls __acpi_map_table() to get the job done.
*/ */
static void __iomem __ref void __iomem __ref
*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory) *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{ {
struct acpi_ioremap *map; struct acpi_ioremap *map;
void __iomem *virt; void __iomem *virt;
@ -356,7 +353,7 @@ static void __iomem __ref
pg_off = round_down(phys, PAGE_SIZE); pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
virt = acpi_map(phys, size, memory); virt = acpi_map(phys, size);
if (!virt) { if (!virt) {
mutex_unlock(&acpi_ioremap_lock); mutex_unlock(&acpi_ioremap_lock);
kfree(map); kfree(map);
@ -375,17 +372,11 @@ out:
mutex_unlock(&acpi_ioremap_lock); mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys); return map->virt + (phys - map->phys);
} }
void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
return __acpi_os_map_iomem(phys, size, false);
}
EXPORT_SYMBOL_GPL(acpi_os_map_iomem); EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{ {
return (void *)__acpi_os_map_iomem(phys, size, true); return (void *)acpi_os_map_iomem(phys, size);
} }
EXPORT_SYMBOL_GPL(acpi_os_map_memory); EXPORT_SYMBOL_GPL(acpi_os_map_memory);

View File

@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
} }
static void binder_transaction_buffer_release(struct binder_proc *proc, static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer, struct binder_buffer *buffer,
binder_size_t failed_at, binder_size_t failed_at,
bool is_failure) bool is_failure)
@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
&proc->alloc, &fd, buffer, &proc->alloc, &fd, buffer,
offset, sizeof(fd)); offset, sizeof(fd));
WARN_ON(err); WARN_ON(err);
if (!err) if (!err) {
binder_deferred_fd_close(fd); binder_deferred_fd_close(fd);
/*
* Need to make sure the thread goes
* back to userspace to complete the
* deferred close
*/
if (thread)
thread->looper_need_return = true;
}
} }
} break; } break;
default: default:
@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) { if (reply) {
binder_enqueue_thread_work(thread, tcomplete); binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
if (target_thread->is_dead || target_proc->is_frozen) { if (target_thread->is_dead) {
return_error = target_thread->is_dead ? return_error = BR_DEAD_REPLY;
BR_DEAD_REPLY : BR_FROZEN_REPLY;
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread; goto err_dead_proc_or_thread;
} }
@ -3105,7 +3113,7 @@ err_bad_parent:
err_copy_data_failed: err_copy_data_failed:
binder_free_txn_fixups(t); binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer); trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, binder_transaction_buffer_release(target_proc, NULL, t->buffer,
buffer_offset, true); buffer_offset, true);
if (target_node) if (target_node)
binder_dec_node_tmpref(target_node); binder_dec_node_tmpref(target_node);
@ -3184,7 +3192,9 @@ err_invalid_target_handle:
* Cleanup buffer and free it. * Cleanup buffer and free it.
*/ */
static void static void
binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) binder_free_buf(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer)
{ {
binder_inner_proc_lock(proc); binder_inner_proc_lock(proc);
if (buffer->transaction) { if (buffer->transaction) {
@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
binder_node_inner_unlock(buf_node); binder_node_inner_unlock(buf_node);
} }
trace_binder_transaction_buffer_release(buffer); trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, 0, false); binder_transaction_buffer_release(proc, thread, buffer, 0, false);
binder_alloc_free_buf(&proc->alloc, buffer); binder_alloc_free_buf(&proc->alloc, buffer);
} }
@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid, (u64)data_ptr, proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id, buffer->debug_id,
buffer->transaction ? "active" : "finished"); buffer->transaction ? "active" : "finished");
binder_free_buf(proc, buffer); binder_free_buf(proc, thread, buffer);
break; break;
} }
@ -4107,7 +4117,7 @@ retry:
buffer->transaction = NULL; buffer->transaction = NULL;
binder_cleanup_transaction(t, "fd fixups failed", binder_cleanup_transaction(t, "fd fixups failed",
BR_FAILED_REPLY); BR_FAILED_REPLY);
binder_free_buf(proc, buffer); binder_free_buf(proc, thread, buffer);
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
proc->pid, thread->pid, proc->pid, thread->pid,
@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
return 0; return 0;
} }
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
{
struct rb_node *n;
struct binder_thread *thread;
if (proc->outstanding_txns > 0)
return true;
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
thread = rb_entry(n, struct binder_thread, rb_node);
if (thread->transaction_stack)
return true;
}
return false;
}
static int binder_ioctl_freeze(struct binder_freeze_info *info, static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc) struct binder_proc *target_proc)
{ {
@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
(!target_proc->outstanding_txns), (!target_proc->outstanding_txns),
msecs_to_jiffies(info->timeout_ms)); msecs_to_jiffies(info->timeout_ms));
if (!ret && target_proc->outstanding_txns) /* Check pending transactions that wait for reply */
ret = -EAGAIN; if (ret >= 0) {
binder_inner_proc_lock(target_proc);
if (binder_txns_pending_ilocked(target_proc))
ret = -EAGAIN;
binder_inner_proc_unlock(target_proc);
}
if (ret < 0) { if (ret < 0) {
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
{ {
struct binder_proc *target_proc; struct binder_proc *target_proc;
bool found = false; bool found = false;
__u32 txns_pending;
info->sync_recv = 0; info->sync_recv = 0;
info->async_recv = 0; info->async_recv = 0;
@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
if (target_proc->pid == info->pid) { if (target_proc->pid == info->pid) {
found = true; found = true;
binder_inner_proc_lock(target_proc); binder_inner_proc_lock(target_proc);
info->sync_recv |= target_proc->sync_recv; txns_pending = binder_txns_pending_ilocked(target_proc);
info->sync_recv |= target_proc->sync_recv |
(txns_pending << 1);
info->async_recv |= target_proc->async_recv; info->async_recv |= target_proc->async_recv;
binder_inner_proc_unlock(target_proc); binder_inner_proc_unlock(target_proc);
} }

View File

@ -378,6 +378,8 @@ struct binder_ref {
* binder transactions * binder transactions
* (protected by @inner_lock) * (protected by @inner_lock)
* @sync_recv: process received sync transactions since last frozen * @sync_recv: process received sync transactions since last frozen
* bit 0: received sync transaction after being frozen
* bit 1: new pending sync transaction during freezing
* (protected by @inner_lock) * (protected by @inner_lock)
* @async_recv: process received async transactions since last frozen * @async_recv: process received async transactions since last frozen
* (protected by @inner_lock) * (protected by @inner_lock)

View File

@ -1116,6 +1116,9 @@ int device_create_managed_software_node(struct device *dev,
to_swnode(fwnode)->managed = true; to_swnode(fwnode)->managed = true;
set_secondary_fwnode(dev, fwnode); set_secondary_fwnode(dev, fwnode);
if (device_is_registered(dev))
software_node_notify(dev);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(device_create_managed_software_node); EXPORT_SYMBOL_GPL(device_create_managed_software_node);

View File

@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
mutex_lock(&dev->mutex); mutex_lock(&dev->mutex);
rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file); rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
mutex_unlock(&dev->mutex); mutex_unlock(&dev->mutex);
kfree(insns);
return rc; return rc;
} }

View File

@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
in_place ? DMA_BIDIRECTIONAL in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE); : DMA_TO_DEVICE);
if (ret) if (ret)
goto e_ctx; goto e_aad;
if (in_place) { if (in_place) {
dst = src; dst = src;
@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.aes.size = 0; op.u.aes.size = 0;
ret = cmd_q->ccp->vdata->perform->aes(&op); ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) if (ret)
goto e_dst; goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) { if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */ /* Put the ciphered tag after the ciphertext. */
@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (ret) if (ret)
goto e_tag; goto e_final_wa;
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
if (ret) if (ret) {
goto e_tag; ccp_dm_free(&tag);
goto e_final_wa;
}
ret = crypto_memneq(tag.address, final_wa.address, ret = crypto_memneq(tag.address, final_wa.address,
authsize) ? -EBADMSG : 0; authsize) ? -EBADMSG : 0;
ccp_dm_free(&tag); ccp_dm_free(&tag);
} }
e_tag: e_final_wa:
ccp_dm_free(&final_wa); ccp_dm_free(&final_wa);
e_dst: e_dst:

View File

@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
dimm->grain = pvt->mem_width_in_bytes; dimm->grain = pvt->mem_width_in_bytes;
dimm->dtype = dt; dimm->dtype = dt;
dimm->mtype = mt; dimm->mtype = mt;
dimm->edac_mode = EDAC_FLAG_SECDED; dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = pages_per_rank / csi->nr_channels; dimm->nr_pages = pages_per_rank / csi->nr_channels;
} }
} }

View File

@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
for (j = 0; j < csi->nr_channels; j++) { for (j = 0; j < csi->nr_channels; j++) {
dimm = csi->channels[j]->dimm; dimm = csi->channels[j]->dimm;
dimm->edac_mode = EDAC_FLAG_SECDED; dimm->edac_mode = EDAC_SECDED;
dimm->mtype = p_data->get_mtype(priv->baseaddr); dimm->mtype = p_data->get_mtype(priv->baseaddr);
dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels; dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
dimm->grain = SYNPS_EDAC_ERR_GRAIN; dimm->grain = SYNPS_EDAC_ERR_GRAIN;

View File

@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo,
{ {
unsigned int irq_base, nr_irqs; unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo; struct dfl_feature_info *finfo;
u8 revision = 0;
int ret; int ret;
u8 revision;
u64 v; u64 v;
v = readq(binfo->ioaddr + ofst); if (fid != FEATURE_ID_AFU) {
revision = FIELD_GET(DFH_REVISION, v); v = readq(binfo->ioaddr + ofst);
revision = FIELD_GET(DFH_REVISION, v);
/* read feature size and id if inputs are invalid */ /* read feature size and id if inputs are invalid */
size = size ? size : feature_size(v); size = size ? size : feature_size(v);
fid = fid ? fid : feature_id(v); fid = fid ? fid : feature_id(v);
}
if (binfo->len - ofst < size) if (binfo->len - ofst < size)
return -EINVAL; return -EINVAL;

View File

@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
goto fail; goto fail;
get_status(spi, &status); get_status(spi, &status);
if (test_bit(FAIL, &status)) if (test_bit(FAIL, &status)) {
ret = -EINVAL;
goto fail; goto fail;
}
dump_status_reg(&status); dump_status_reg(&status);
spi_message_init(&msg); spi_message_init(&msg);
@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
dump_status_reg(&status); dump_status_reg(&status);
if (!test_bit(DONE, &status)) { if (!test_bit(DONE, &status)) {
machxo2_cleanup(mgr); machxo2_cleanup(mgr);
ret = -EINVAL;
goto fail; goto fail;
} }
@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
break; break;
if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) { if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
machxo2_cleanup(mgr); machxo2_cleanup(mgr);
ret = -EINVAL;
goto fail; goto fail;
} }
} while (1); } while (1);

View File

@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
reg = ioread32(bank_reg(data, bank, reg_irq_status)); reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32) for_each_set_bit(p, &reg, 32)
generic_handle_domain_irq(gc->irq.domain, i * 32 + p); generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
} }
chained_irq_exit(ic, desc); chained_irq_exit(ic, desc);

View File

@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
mutex_lock(&chip->i2c_lock); mutex_lock(&chip->i2c_lock);
ret = regmap_read(chip->regmap, inreg, &reg_val); ret = regmap_read(chip->regmap, inreg, &reg_val);
mutex_unlock(&chip->i2c_lock); mutex_unlock(&chip->i2c_lock);
if (ret < 0) { if (ret < 0)
/* return ret;
* NOTE:
* diagnostic already emitted; that's all we should
* do unless gpio_*_value_cansleep() calls become different
* from their nonsleeping siblings (and report faults).
*/
return 0;
}
return !!(reg_val & bit); return !!(reg_val & bit);
} }

View File

@ -141,7 +141,7 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip,
u32 data; u32 data;
data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr); data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
if (data & BIT(offset)) if (data)
return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_IN;
@ -195,7 +195,7 @@ static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
unsigned int cur_div_reg; unsigned int cur_div_reg;
u64 div; u64 div;
if (!IS_ERR(bank->db_clk)) { if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) {
div_debounce_support = true; div_debounce_support = true;
freq = clk_get_rate(bank->db_clk); freq = clk_get_rate(bank->db_clk);
max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq; max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
@ -689,6 +689,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
struct device_node *pctlnp = of_get_parent(np); struct device_node *pctlnp = of_get_parent(np);
struct pinctrl_dev *pctldev = NULL; struct pinctrl_dev *pctldev = NULL;
struct rockchip_pin_bank *bank = NULL; struct rockchip_pin_bank *bank = NULL;
struct rockchip_pin_output_deferred *cfg;
static int gpio; static int gpio;
int id, ret; int id, ret;
@ -716,12 +717,33 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
/*
* Prevent clashes with a deferred output setting
* being added right at this moment.
*/
mutex_lock(&bank->deferred_lock);
ret = rockchip_gpiolib_register(bank); ret = rockchip_gpiolib_register(bank);
if (ret) { if (ret) {
clk_disable_unprepare(bank->clk); clk_disable_unprepare(bank->clk);
mutex_unlock(&bank->deferred_lock);
return ret; return ret;
} }
while (!list_empty(&bank->deferred_output)) {
cfg = list_first_entry(&bank->deferred_output,
struct rockchip_pin_output_deferred, head);
list_del(&cfg->head);
ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
if (ret)
dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
kfree(cfg);
}
mutex_unlock(&bank->deferred_lock);
platform_set_drvdata(pdev, bank); platform_set_drvdata(pdev, bank);
dev_info(dev, "probed %pOF\n", np); dev_info(dev, "probed %pOF\n", np);

View File

@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0); uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
return irq_chip_mask_parent(data); irq_chip_mask_parent(data);
} }
static void uniphier_gpio_irq_unmask(struct irq_data *data) static void uniphier_gpio_irq_unmask(struct irq_data *data)
@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask); uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
return irq_chip_unmask_parent(data); irq_chip_unmask_parent(data);
} }
static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type) static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)

View File

@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout); ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
if (ret) if (ret)
gpiochip_free_own_desc(desc); dev_warn(chip->parent,
"Failed to set debounce-timeout for pin 0x%04X, err %d\n",
pin, ret);
return ret ? ERR_PTR(ret) : desc; return desc;
} }
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in) static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)

View File

@ -971,7 +971,6 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd) void kgd2kfd_device_exit(struct kfd_dev *kfd)
{ {
if (kfd->init_complete) { if (kfd->init_complete) {
svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
device_queue_manager_uninit(kfd->dqm); device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd); kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd); kfd_topology_remove_device(kfd);

Some files were not shown because too many files have changed in this diff Show More