mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 07:04:10 +08:00
Merge branch 'linus' into x86/cleanups, to resolve conflict
Conflicts: arch/x86/kernel/kprobes/ftrace.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
ca8778c45e
@ -33,7 +33,7 @@ Contact: xfs@oss.sgi.com
|
|||||||
Description:
|
Description:
|
||||||
The current state of the log write grant head. It
|
The current state of the log write grant head. It
|
||||||
represents the total log reservation of all currently
|
represents the total log reservation of all currently
|
||||||
oustanding transactions, including regrants due to
|
outstanding transactions, including regrants due to
|
||||||
rolling transactions. The grant head is exported in
|
rolling transactions. The grant head is exported in
|
||||||
"cycle:bytes" format.
|
"cycle:bytes" format.
|
||||||
Users: xfstests
|
Users: xfstests
|
||||||
|
@ -21,6 +21,10 @@ properties:
|
|||||||
- fsl,vf610-spdif
|
- fsl,vf610-spdif
|
||||||
- fsl,imx6sx-spdif
|
- fsl,imx6sx-spdif
|
||||||
- fsl,imx8qm-spdif
|
- fsl,imx8qm-spdif
|
||||||
|
- fsl,imx8qxp-spdif
|
||||||
|
- fsl,imx8mq-spdif
|
||||||
|
- fsl,imx8mm-spdif
|
||||||
|
- fsl,imx8mn-spdif
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
@ -1495,7 +1495,8 @@ Fails if any VCPU has already been created.
|
|||||||
|
|
||||||
Define which vcpu is the Bootstrap Processor (BSP). Values are the same
|
Define which vcpu is the Bootstrap Processor (BSP). Values are the same
|
||||||
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
|
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
|
||||||
is vcpu 0.
|
is vcpu 0. This ioctl has to be called before vcpu creation,
|
||||||
|
otherwise it will return EBUSY error.
|
||||||
|
|
||||||
|
|
||||||
4.42 KVM_GET_XSAVE
|
4.42 KVM_GET_XSAVE
|
||||||
@ -4806,8 +4807,10 @@ If an MSR access is not permitted through the filtering, it generates a
|
|||||||
allows user space to deflect and potentially handle various MSR accesses
|
allows user space to deflect and potentially handle various MSR accesses
|
||||||
into user space.
|
into user space.
|
||||||
|
|
||||||
If a vCPU is in running state while this ioctl is invoked, the vCPU may
|
Note, invoking this ioctl with a vCPU is running is inherently racy. However,
|
||||||
experience inconsistent filtering behavior on MSR accesses.
|
KVM does guarantee that vCPUs will see either the previous filter or the new
|
||||||
|
filter, e.g. MSRs with identical settings in both the old and new filter will
|
||||||
|
have deterministic behavior.
|
||||||
|
|
||||||
4.127 KVM_XEN_HVM_SET_ATTR
|
4.127 KVM_XEN_HVM_SET_ATTR
|
||||||
--------------------------
|
--------------------------
|
||||||
|
@ -1181,7 +1181,7 @@ M: Joel Fernandes <joel@joelfernandes.org>
|
|||||||
M: Christian Brauner <christian@brauner.io>
|
M: Christian Brauner <christian@brauner.io>
|
||||||
M: Hridya Valsaraju <hridya@google.com>
|
M: Hridya Valsaraju <hridya@google.com>
|
||||||
M: Suren Baghdasaryan <surenb@google.com>
|
M: Suren Baghdasaryan <surenb@google.com>
|
||||||
L: devel@driverdev.osuosl.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
||||||
F: drivers/android/
|
F: drivers/android/
|
||||||
@ -8116,7 +8116,6 @@ F: drivers/crypto/hisilicon/sec2/sec_main.c
|
|||||||
|
|
||||||
HISILICON STAGING DRIVERS FOR HIKEY 960/970
|
HISILICON STAGING DRIVERS FOR HIKEY 960/970
|
||||||
M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
|
M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
|
||||||
L: devel@driverdev.osuosl.org
|
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/staging/hikey9xx/
|
F: drivers/staging/hikey9xx/
|
||||||
|
|
||||||
@ -17040,7 +17039,7 @@ F: drivers/staging/vt665?/
|
|||||||
|
|
||||||
STAGING SUBSYSTEM
|
STAGING SUBSYSTEM
|
||||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||||
L: devel@driverdev.osuosl.org
|
L: linux-staging@lists.linux.dev
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
||||||
F: drivers/staging/
|
F: drivers/staging/
|
||||||
@ -19135,7 +19134,7 @@ VME SUBSYSTEM
|
|||||||
M: Martyn Welch <martyn@welchs.me.uk>
|
M: Martyn Welch <martyn@welchs.me.uk>
|
||||||
M: Manohar Vanga <manohar.vanga@gmail.com>
|
M: Manohar Vanga <manohar.vanga@gmail.com>
|
||||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||||
L: devel@driverdev.osuosl.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
|
||||||
F: Documentation/driver-api/vme.rst
|
F: Documentation/driver-api/vme.rst
|
||||||
|
@ -9,7 +9,7 @@ int arch_check_ftrace_location(struct kprobe *p)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
|
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
|
||||||
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||||
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
||||||
{
|
{
|
||||||
|
@ -176,7 +176,7 @@ SECTIONS
|
|||||||
.fill : {
|
.fill : {
|
||||||
FILL(0);
|
FILL(0);
|
||||||
BYTE(0);
|
BYTE(0);
|
||||||
. = ALIGN(8);
|
STRUCT_ALIGN();
|
||||||
}
|
}
|
||||||
__appended_dtb = .;
|
__appended_dtb = .;
|
||||||
/* leave space for appended DTB */
|
/* leave space for appended DTB */
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
|
|
||||||
static inline bool early_cpu_has_feature(unsigned long feature)
|
static __always_inline bool early_cpu_has_feature(unsigned long feature)
|
||||||
{
|
{
|
||||||
return !!((CPU_FTRS_ALWAYS & feature) ||
|
return !!((CPU_FTRS_ALWAYS & feature) ||
|
||||||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
|
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
|
||||||
@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
|
|||||||
return static_branch_likely(&cpu_feature_keys[i]);
|
return static_branch_likely(&cpu_feature_keys[i]);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline bool cpu_has_feature(unsigned long feature)
|
static __always_inline bool cpu_has_feature(unsigned long feature)
|
||||||
{
|
{
|
||||||
return early_cpu_has_feature(feature);
|
return early_cpu_has_feature(feature);
|
||||||
}
|
}
|
||||||
|
@ -65,3 +65,14 @@ V_FUNCTION_END(__kernel_clock_getres)
|
|||||||
V_FUNCTION_BEGIN(__kernel_time)
|
V_FUNCTION_BEGIN(__kernel_time)
|
||||||
cvdso_call_time __c_kernel_time
|
cvdso_call_time __c_kernel_time
|
||||||
V_FUNCTION_END(__kernel_time)
|
V_FUNCTION_END(__kernel_time)
|
||||||
|
|
||||||
|
/* Routines for restoring integer registers, called by the compiler. */
|
||||||
|
/* Called with r11 pointing to the stack header word of the caller of the */
|
||||||
|
/* function, just beyond the end of the integer restore area. */
|
||||||
|
_GLOBAL(_restgpr_31_x)
|
||||||
|
_GLOBAL(_rest32gpr_31_x)
|
||||||
|
lwz r0,4(r11)
|
||||||
|
lwz r31,-4(r11)
|
||||||
|
mtlr r0
|
||||||
|
mr r1,r11
|
||||||
|
blr
|
||||||
|
@ -93,7 +93,6 @@ config RISCV
|
|||||||
select PCI_MSI if PCI
|
select PCI_MSI if PCI
|
||||||
select RISCV_INTC
|
select RISCV_INTC
|
||||||
select RISCV_TIMER if RISCV_SBI
|
select RISCV_TIMER if RISCV_SBI
|
||||||
select SPARSEMEM_STATIC if 32BIT
|
|
||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
select SYSCTL_EXCEPTION_TRACE
|
select SYSCTL_EXCEPTION_TRACE
|
||||||
select THREAD_INFO_IN_TASK
|
select THREAD_INFO_IN_TASK
|
||||||
@ -154,7 +153,8 @@ config ARCH_FLATMEM_ENABLE
|
|||||||
config ARCH_SPARSEMEM_ENABLE
|
config ARCH_SPARSEMEM_ENABLE
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on MMU
|
depends on MMU
|
||||||
select SPARSEMEM_VMEMMAP_ENABLE
|
select SPARSEMEM_STATIC if 32BIT && SPARSMEM
|
||||||
|
select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
|
||||||
|
|
||||||
config ARCH_SELECT_MEMORY_MODEL
|
config ARCH_SELECT_MEMORY_MODEL
|
||||||
def_bool ARCH_SPARSEMEM_ENABLE
|
def_bool ARCH_SPARSEMEM_ENABLE
|
||||||
|
@ -31,6 +31,8 @@ config SOC_CANAAN
|
|||||||
select SIFIVE_PLIC
|
select SIFIVE_PLIC
|
||||||
select ARCH_HAS_RESET_CONTROLLER
|
select ARCH_HAS_RESET_CONTROLLER
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
select COMMON_CLK
|
||||||
|
select COMMON_CLK_K210
|
||||||
help
|
help
|
||||||
This enables support for Canaan Kendryte K210 SoC platform hardware.
|
This enables support for Canaan Kendryte K210 SoC platform hardware.
|
||||||
|
|
||||||
|
@ -9,4 +9,20 @@ long long __lshrti3(long long a, int b);
|
|||||||
long long __ashrti3(long long a, int b);
|
long long __ashrti3(long long a, int b);
|
||||||
long long __ashlti3(long long a, int b);
|
long long __ashlti3(long long a, int b);
|
||||||
|
|
||||||
|
|
||||||
|
#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)
|
||||||
|
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_unknown);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_insn_fault);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_load_fault);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_load_misaligned);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_store_misaligned);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_store_fault);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_ecall_u);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
|
||||||
|
DECLARE_DO_ERROR_INFO(do_trap_break);
|
||||||
|
|
||||||
#endif /* _ASM_RISCV_PROTOTYPES_H */
|
#endif /* _ASM_RISCV_PROTOTYPES_H */
|
||||||
|
@ -12,4 +12,6 @@
|
|||||||
|
|
||||||
#include <asm-generic/irq.h>
|
#include <asm-generic/irq.h>
|
||||||
|
|
||||||
|
extern void __init init_IRQ(void);
|
||||||
|
|
||||||
#endif /* _ASM_RISCV_IRQ_H */
|
#endif /* _ASM_RISCV_IRQ_H */
|
||||||
|
@ -71,6 +71,7 @@ int riscv_of_processor_hartid(struct device_node *node);
|
|||||||
int riscv_of_parent_hartid(struct device_node *node);
|
int riscv_of_parent_hartid(struct device_node *node);
|
||||||
|
|
||||||
extern void riscv_fill_hwcap(void);
|
extern void riscv_fill_hwcap(void);
|
||||||
|
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
|
@ -119,6 +119,11 @@ extern int regs_query_register_offset(const char *name);
|
|||||||
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
||||||
unsigned int n);
|
unsigned int n);
|
||||||
|
|
||||||
|
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||||
|
unsigned long frame_pointer);
|
||||||
|
int do_syscall_trace_enter(struct pt_regs *regs);
|
||||||
|
void do_syscall_trace_exit(struct pt_regs *regs);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* regs_get_register() - get register value from its offset
|
* regs_get_register() - get register value from its offset
|
||||||
* @regs: pt_regs from which register value is gotten
|
* @regs: pt_regs from which register value is gotten
|
||||||
|
@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid {
|
|||||||
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
||||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
||||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
||||||
|
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum sbi_ext_hsm_fid {
|
enum sbi_ext_hsm_fid {
|
||||||
|
@ -88,4 +88,6 @@ static inline int read_current_timer(unsigned long *timer_val)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern void time_init(void);
|
||||||
|
|
||||||
#endif /* _ASM_RISCV_TIMEX_H */
|
#endif /* _ASM_RISCV_TIMEX_H */
|
||||||
|
@ -8,6 +8,7 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
|||||||
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
|
||||||
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
|
||||||
endif
|
endif
|
||||||
|
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||||
|
|
||||||
extra-y += head.o
|
extra-y += head.o
|
||||||
extra-y += vmlinux.lds
|
extra-y += vmlinux.lds
|
||||||
|
@ -2,39 +2,41 @@
|
|||||||
|
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
|
|
||||||
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
|
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
|
||||||
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||||
struct ftrace_ops *ops, struct ftrace_regs *regs)
|
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
||||||
{
|
{
|
||||||
struct kprobe *p;
|
struct kprobe *p;
|
||||||
|
struct pt_regs *regs;
|
||||||
struct kprobe_ctlblk *kcb;
|
struct kprobe_ctlblk *kcb;
|
||||||
|
|
||||||
p = get_kprobe((kprobe_opcode_t *)ip);
|
p = get_kprobe((kprobe_opcode_t *)ip);
|
||||||
if (unlikely(!p) || kprobe_disabled(p))
|
if (unlikely(!p) || kprobe_disabled(p))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
regs = ftrace_get_regs(fregs);
|
||||||
kcb = get_kprobe_ctlblk();
|
kcb = get_kprobe_ctlblk();
|
||||||
if (kprobe_running()) {
|
if (kprobe_running()) {
|
||||||
kprobes_inc_nmissed_count(p);
|
kprobes_inc_nmissed_count(p);
|
||||||
} else {
|
} else {
|
||||||
unsigned long orig_ip = instruction_pointer(&(regs->regs));
|
unsigned long orig_ip = instruction_pointer(regs);
|
||||||
|
|
||||||
instruction_pointer_set(&(regs->regs), ip);
|
instruction_pointer_set(regs, ip);
|
||||||
|
|
||||||
__this_cpu_write(current_kprobe, p);
|
__this_cpu_write(current_kprobe, p);
|
||||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||||
if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) {
|
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||||
/*
|
/*
|
||||||
* Emulate singlestep (and also recover regs->pc)
|
* Emulate singlestep (and also recover regs->pc)
|
||||||
* as if there is a nop
|
* as if there is a nop
|
||||||
*/
|
*/
|
||||||
instruction_pointer_set(&(regs->regs),
|
instruction_pointer_set(regs,
|
||||||
(unsigned long)p->addr + MCOUNT_INSN_SIZE);
|
(unsigned long)p->addr + MCOUNT_INSN_SIZE);
|
||||||
if (unlikely(p->post_handler)) {
|
if (unlikely(p->post_handler)) {
|
||||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||||
p->post_handler(p, &(regs->regs), 0);
|
p->post_handler(p, regs, 0);
|
||||||
}
|
}
|
||||||
instruction_pointer_set(&(regs->regs), orig_ip);
|
instruction_pointer_set(regs, orig_ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -256,8 +256,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
|
|||||||
* normal page fault.
|
* normal page fault.
|
||||||
*/
|
*/
|
||||||
regs->epc = (unsigned long) cur->addr;
|
regs->epc = (unsigned long) cur->addr;
|
||||||
if (!instruction_pointer(regs))
|
BUG_ON(!instruction_pointer(regs));
|
||||||
BUG();
|
|
||||||
|
|
||||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||||
restore_previous_kprobe(kcb);
|
restore_previous_kprobe(kcb);
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/sched/debug.h>
|
||||||
#include <linux/sched/task_stack.h>
|
#include <linux/sched/task_stack.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
|
@ -116,7 +116,7 @@ void sbi_clear_ipi(void)
|
|||||||
EXPORT_SYMBOL(sbi_clear_ipi);
|
EXPORT_SYMBOL(sbi_clear_ipi);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sbi_set_timer_v01() - Program the timer for next timer event.
|
* __sbi_set_timer_v01() - Program the timer for next timer event.
|
||||||
* @stime_value: The value after which next timer event should fire.
|
* @stime_value: The value after which next timer event should fire.
|
||||||
*
|
*
|
||||||
* Return: None
|
* Return: None
|
||||||
|
@ -147,7 +147,8 @@ static void __init init_resources(void)
|
|||||||
bss_res.end = __pa_symbol(__bss_stop) - 1;
|
bss_res.end = __pa_symbol(__bss_stop) - 1;
|
||||||
bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||||
|
|
||||||
mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
|
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
|
||||||
|
mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res);
|
||||||
mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
|
mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
|
||||||
if (!mem_res)
|
if (!mem_res)
|
||||||
panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
|
panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <asm/sbi.h>
|
#include <asm/sbi.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
#include <asm/timex.h>
|
||||||
|
|
||||||
unsigned long riscv_timebase;
|
unsigned long riscv_timebase;
|
||||||
EXPORT_SYMBOL_GPL(riscv_timebase);
|
EXPORT_SYMBOL_GPL(riscv_timebase);
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
|
||||||
|
#include <asm/asm-prototypes.h>
|
||||||
#include <asm/bug.h>
|
#include <asm/bug.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
@ -155,7 +155,7 @@ static void __init kasan_populate(void *start, void *end)
|
|||||||
memset(start, KASAN_SHADOW_INIT, end - start);
|
memset(start, KASAN_SHADOW_INIT, end - start);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init kasan_shallow_populate(void *start, void *end)
|
static void __init kasan_shallow_populate(void *start, void *end)
|
||||||
{
|
{
|
||||||
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
||||||
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
||||||
@ -187,6 +187,8 @@ void __init kasan_shallow_populate(void *start, void *end)
|
|||||||
}
|
}
|
||||||
vaddr += PAGE_SIZE;
|
vaddr += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
local_flush_tlb_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init kasan_init(void)
|
void __init kasan_init(void)
|
||||||
|
@ -202,7 +202,7 @@ extern unsigned int s390_pci_no_rid;
|
|||||||
----------------------------------------------------------------------------- */
|
----------------------------------------------------------------------------- */
|
||||||
/* Base stuff */
|
/* Base stuff */
|
||||||
int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
|
int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
|
||||||
void zpci_remove_device(struct zpci_dev *zdev);
|
void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
|
||||||
int zpci_enable_device(struct zpci_dev *);
|
int zpci_enable_device(struct zpci_dev *);
|
||||||
int zpci_disable_device(struct zpci_dev *);
|
int zpci_disable_device(struct zpci_dev *);
|
||||||
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
|
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
|
||||||
|
@ -968,7 +968,7 @@ static int cf_diag_all_start(void)
|
|||||||
*/
|
*/
|
||||||
static size_t cf_diag_needspace(unsigned int sets)
|
static size_t cf_diag_needspace(unsigned int sets)
|
||||||
{
|
{
|
||||||
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
|
struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
|
||||||
size_t bytes = 0;
|
size_t bytes = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -984,6 +984,7 @@ static size_t cf_diag_needspace(unsigned int sets)
|
|||||||
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
|
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
|
||||||
debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
|
debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
|
||||||
bytes);
|
bytes);
|
||||||
|
put_cpu_ptr(&cpu_cf_events);
|
||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +214,7 @@ void vtime_flush(struct task_struct *tsk)
|
|||||||
avg_steal = S390_lowcore.avg_steal_timer / 2;
|
avg_steal = S390_lowcore.avg_steal_timer / 2;
|
||||||
if ((s64) steal > 0) {
|
if ((s64) steal > 0) {
|
||||||
S390_lowcore.steal_timer = 0;
|
S390_lowcore.steal_timer = 0;
|
||||||
account_steal_time(steal);
|
account_steal_time(cputime_to_nsecs(steal));
|
||||||
avg_steal += steal;
|
avg_steal += steal;
|
||||||
}
|
}
|
||||||
S390_lowcore.avg_steal_timer = avg_steal;
|
S390_lowcore.avg_steal_timer = avg_steal;
|
||||||
|
@ -682,16 +682,36 @@ int zpci_disable_device(struct zpci_dev *zdev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
||||||
|
|
||||||
void zpci_remove_device(struct zpci_dev *zdev)
|
/* zpci_remove_device - Removes the given zdev from the PCI core
|
||||||
|
* @zdev: the zdev to be removed from the PCI core
|
||||||
|
* @set_error: if true the device's error state is set to permanent failure
|
||||||
|
*
|
||||||
|
* Sets a zPCI device to a configured but offline state; the zPCI
|
||||||
|
* device is still accessible through its hotplug slot and the zPCI
|
||||||
|
* API but is removed from the common code PCI bus, making it
|
||||||
|
* no longer available to drivers.
|
||||||
|
*/
|
||||||
|
void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
|
||||||
{
|
{
|
||||||
struct zpci_bus *zbus = zdev->zbus;
|
struct zpci_bus *zbus = zdev->zbus;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
|
|
||||||
|
if (!zdev->zbus->bus)
|
||||||
|
return;
|
||||||
|
|
||||||
pdev = pci_get_slot(zbus->bus, zdev->devfn);
|
pdev = pci_get_slot(zbus->bus, zdev->devfn);
|
||||||
if (pdev) {
|
if (pdev) {
|
||||||
if (pdev->is_virtfn)
|
if (set_error)
|
||||||
return zpci_iov_remove_virtfn(pdev, zdev->vfn);
|
pdev->error_state = pci_channel_io_perm_failure;
|
||||||
|
if (pdev->is_virtfn) {
|
||||||
|
zpci_iov_remove_virtfn(pdev, zdev->vfn);
|
||||||
|
/* balance pci_get_slot */
|
||||||
|
pci_dev_put(pdev);
|
||||||
|
return;
|
||||||
|
}
|
||||||
pci_stop_and_remove_bus_device_locked(pdev);
|
pci_stop_and_remove_bus_device_locked(pdev);
|
||||||
|
/* balance pci_get_slot */
|
||||||
|
pci_dev_put(pdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -765,7 +785,7 @@ void zpci_release_device(struct kref *kref)
|
|||||||
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
||||||
|
|
||||||
if (zdev->zbus->bus)
|
if (zdev->zbus->bus)
|
||||||
zpci_remove_device(zdev);
|
zpci_remove_device(zdev, false);
|
||||||
|
|
||||||
switch (zdev->state) {
|
switch (zdev->state) {
|
||||||
case ZPCI_FN_STATE_ONLINE:
|
case ZPCI_FN_STATE_ONLINE:
|
||||||
|
@ -76,13 +76,10 @@ void zpci_event_error(void *data)
|
|||||||
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||||
{
|
{
|
||||||
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
||||||
struct pci_dev *pdev = NULL;
|
|
||||||
enum zpci_state state;
|
enum zpci_state state;
|
||||||
|
struct pci_dev *pdev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (zdev && zdev->zbus->bus)
|
|
||||||
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
|
|
||||||
|
|
||||||
zpci_err("avail CCDF:\n");
|
zpci_err("avail CCDF:\n");
|
||||||
zpci_err_hex(ccdf, sizeof(*ccdf));
|
zpci_err_hex(ccdf, sizeof(*ccdf));
|
||||||
|
|
||||||
@ -124,8 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||||||
case 0x0303: /* Deconfiguration requested */
|
case 0x0303: /* Deconfiguration requested */
|
||||||
if (!zdev)
|
if (!zdev)
|
||||||
break;
|
break;
|
||||||
if (pdev)
|
zpci_remove_device(zdev, false);
|
||||||
zpci_remove_device(zdev);
|
|
||||||
|
|
||||||
ret = zpci_disable_device(zdev);
|
ret = zpci_disable_device(zdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -140,12 +136,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||||||
case 0x0304: /* Configured -> Standby|Reserved */
|
case 0x0304: /* Configured -> Standby|Reserved */
|
||||||
if (!zdev)
|
if (!zdev)
|
||||||
break;
|
break;
|
||||||
if (pdev) {
|
|
||||||
/* Give the driver a hint that the function is
|
/* Give the driver a hint that the function is
|
||||||
* already unusable. */
|
* already unusable.
|
||||||
pdev->error_state = pci_channel_io_perm_failure;
|
*/
|
||||||
zpci_remove_device(zdev);
|
zpci_remove_device(zdev, true);
|
||||||
}
|
|
||||||
|
|
||||||
zdev->fh = ccdf->fh;
|
zdev->fh = ccdf->fh;
|
||||||
zpci_disable_device(zdev);
|
zpci_disable_device(zdev);
|
||||||
|
@ -3659,6 +3659,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (event->attr.precise_ip) {
|
if (event->attr.precise_ip) {
|
||||||
|
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
|
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
|
||||||
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
||||||
if (!(event->attr.sample_type &
|
if (!(event->attr.sample_type &
|
||||||
|
@ -2010,7 +2010,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
|
|||||||
*/
|
*/
|
||||||
if (!pebs_status && cpuc->pebs_enabled &&
|
if (!pebs_status && cpuc->pebs_enabled &&
|
||||||
!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
|
!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
|
||||||
pebs_status = cpuc->pebs_enabled;
|
pebs_status = p->status = cpuc->pebs_enabled;
|
||||||
|
|
||||||
bit = find_first_bit((unsigned long *)&pebs_status,
|
bit = find_first_bit((unsigned long *)&pebs_status,
|
||||||
x86_pmu.max_pebs_events);
|
x86_pmu.max_pebs_events);
|
||||||
|
@ -884,12 +884,29 @@ struct kvm_hv_syndbg {
|
|||||||
u64 options;
|
u64 options;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Current state of Hyper-V TSC page clocksource */
|
||||||
|
enum hv_tsc_page_status {
|
||||||
|
/* TSC page was not set up or disabled */
|
||||||
|
HV_TSC_PAGE_UNSET = 0,
|
||||||
|
/* TSC page MSR was written by the guest, update pending */
|
||||||
|
HV_TSC_PAGE_GUEST_CHANGED,
|
||||||
|
/* TSC page MSR was written by KVM userspace, update pending */
|
||||||
|
HV_TSC_PAGE_HOST_CHANGED,
|
||||||
|
/* TSC page was properly set up and is currently active */
|
||||||
|
HV_TSC_PAGE_SET,
|
||||||
|
/* TSC page is currently being updated and therefore is inactive */
|
||||||
|
HV_TSC_PAGE_UPDATING,
|
||||||
|
/* TSC page was set up with an inaccessible GPA */
|
||||||
|
HV_TSC_PAGE_BROKEN,
|
||||||
|
};
|
||||||
|
|
||||||
/* Hyper-V emulation context */
|
/* Hyper-V emulation context */
|
||||||
struct kvm_hv {
|
struct kvm_hv {
|
||||||
struct mutex hv_lock;
|
struct mutex hv_lock;
|
||||||
u64 hv_guest_os_id;
|
u64 hv_guest_os_id;
|
||||||
u64 hv_hypercall;
|
u64 hv_hypercall;
|
||||||
u64 hv_tsc_page;
|
u64 hv_tsc_page;
|
||||||
|
enum hv_tsc_page_status hv_tsc_page_status;
|
||||||
|
|
||||||
/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
|
/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
|
||||||
u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
|
u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
|
||||||
@ -931,6 +948,12 @@ enum kvm_irqchip_mode {
|
|||||||
KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
|
KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct kvm_x86_msr_filter {
|
||||||
|
u8 count;
|
||||||
|
bool default_allow:1;
|
||||||
|
struct msr_bitmap_range ranges[16];
|
||||||
|
};
|
||||||
|
|
||||||
#define APICV_INHIBIT_REASON_DISABLE 0
|
#define APICV_INHIBIT_REASON_DISABLE 0
|
||||||
#define APICV_INHIBIT_REASON_HYPERV 1
|
#define APICV_INHIBIT_REASON_HYPERV 1
|
||||||
#define APICV_INHIBIT_REASON_NESTED 2
|
#define APICV_INHIBIT_REASON_NESTED 2
|
||||||
@ -1025,16 +1048,11 @@ struct kvm_arch {
|
|||||||
bool guest_can_read_msr_platform_info;
|
bool guest_can_read_msr_platform_info;
|
||||||
bool exception_payload_enabled;
|
bool exception_payload_enabled;
|
||||||
|
|
||||||
|
bool bus_lock_detection_enabled;
|
||||||
|
|
||||||
/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
|
/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
|
||||||
u32 user_space_msr_mask;
|
u32 user_space_msr_mask;
|
||||||
|
struct kvm_x86_msr_filter __rcu *msr_filter;
|
||||||
struct {
|
|
||||||
u8 count;
|
|
||||||
bool default_allow:1;
|
|
||||||
struct msr_bitmap_range ranges[16];
|
|
||||||
} msr_filter;
|
|
||||||
|
|
||||||
bool bus_lock_detection_enabled;
|
|
||||||
|
|
||||||
struct kvm_pmu_event_filter __rcu *pmu_event_filter;
|
struct kvm_pmu_event_filter __rcu *pmu_event_filter;
|
||||||
struct task_struct *nx_lpage_recovery_thread;
|
struct task_struct *nx_lpage_recovery_thread;
|
||||||
|
@ -551,15 +551,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
|||||||
*size = fpu_kernel_xstate_size;
|
*size = fpu_kernel_xstate_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Thread-synchronous status.
|
|
||||||
*
|
|
||||||
* This is different from the flags in that nobody else
|
|
||||||
* ever touches our thread-synchronous status, so we don't
|
|
||||||
* have to worry about atomic accesses.
|
|
||||||
*/
|
|
||||||
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
native_load_sp0(unsigned long sp0)
|
native_load_sp0(unsigned long sp0)
|
||||||
{
|
{
|
||||||
|
@ -205,10 +205,23 @@ static inline int arch_within_stack_frames(const void * const stack,
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Thread-synchronous status.
|
||||||
|
*
|
||||||
|
* This is different from the flags in that nobody else
|
||||||
|
* ever touches our thread-synchronous status, so we don't
|
||||||
|
* have to worry about atomic accesses.
|
||||||
|
*/
|
||||||
|
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
|
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
|
||||||
|
|
||||||
|
#define arch_set_restart_data(restart) \
|
||||||
|
do { restart->arch_data = current_thread_info()->status; } while (0)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
#define in_ia32_syscall() true
|
#define in_ia32_syscall() true
|
||||||
|
@ -2342,6 +2342,11 @@ static int cpuid_to_apicid[] = {
|
|||||||
[0 ... NR_CPUS - 1] = -1,
|
[0 ... NR_CPUS - 1] = -1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
||||||
|
{
|
||||||
|
return phys_id == cpuid_to_apicid[cpu];
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/**
|
/**
|
||||||
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
|
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
|
||||||
|
@ -1032,6 +1032,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
|
|||||||
if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
|
if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
|
||||||
irq = mp_irqs[idx].srcbusirq;
|
irq = mp_irqs[idx].srcbusirq;
|
||||||
legacy = mp_is_legacy_irq(irq);
|
legacy = mp_is_legacy_irq(irq);
|
||||||
|
/*
|
||||||
|
* IRQ2 is unusable for historical reasons on systems which
|
||||||
|
* have a legacy PIC. See the comment vs. IRQ2 further down.
|
||||||
|
*
|
||||||
|
* If this gets removed at some point then the related code
|
||||||
|
* in lapic_assign_system_vectors() needs to be adjusted as
|
||||||
|
* well.
|
||||||
|
*/
|
||||||
|
if (legacy && irq == PIC_CASCADE_IR)
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ioapic_mutex);
|
mutex_lock(&ioapic_mutex);
|
||||||
|
@ -836,28 +836,25 @@ static void kvm_kick_cpu(int cpu)
|
|||||||
|
|
||||||
static void kvm_wait(u8 *ptr, u8 val)
|
static void kvm_wait(u8 *ptr, u8 val)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (in_nmi())
|
if (in_nmi())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
if (READ_ONCE(*ptr) != val)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* halt until it's our turn and kicked. Note that we do safe halt
|
* halt until it's our turn and kicked. Note that we do safe halt
|
||||||
* for irq enabled case to avoid hang when lock info is overwritten
|
* for irq enabled case to avoid hang when lock info is overwritten
|
||||||
* in irq spinlock slowpath and no spurious interrupt occur to save us.
|
* in irq spinlock slowpath and no spurious interrupt occur to save us.
|
||||||
*/
|
*/
|
||||||
if (arch_irqs_disabled_flags(flags))
|
if (irqs_disabled()) {
|
||||||
|
if (READ_ONCE(*ptr) == val)
|
||||||
halt();
|
halt();
|
||||||
else
|
} else {
|
||||||
|
local_irq_disable();
|
||||||
|
|
||||||
|
if (READ_ONCE(*ptr) == val)
|
||||||
safe_halt();
|
safe_halt();
|
||||||
|
|
||||||
out:
|
local_irq_enable();
|
||||||
local_irq_restore(flags);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
@ -766,30 +766,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||||||
|
|
||||||
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* This function is fundamentally broken as currently
|
|
||||||
* implemented.
|
|
||||||
*
|
|
||||||
* The idea is that we want to trigger a call to the
|
|
||||||
* restart_block() syscall and that we want in_ia32_syscall(),
|
|
||||||
* in_x32_syscall(), etc. to match whatever they were in the
|
|
||||||
* syscall being restarted. We assume that the syscall
|
|
||||||
* instruction at (regs->ip - 2) matches whatever syscall
|
|
||||||
* instruction we used to enter in the first place.
|
|
||||||
*
|
|
||||||
* The problem is that we can get here when ptrace pokes
|
|
||||||
* syscall-like values into regs even if we're not in a syscall
|
|
||||||
* at all.
|
|
||||||
*
|
|
||||||
* For now, we maintain historical behavior and guess based on
|
|
||||||
* stored state. We could do better by saving the actual
|
|
||||||
* syscall arch in restart_block or (with caveats on x32) by
|
|
||||||
* checking if regs->ip points to 'int $0x80'. The current
|
|
||||||
* behavior is incorrect if a tracer has a different bitness
|
|
||||||
* than the tracee.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_IA32_EMULATION
|
#ifdef CONFIG_IA32_EMULATION
|
||||||
if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
|
if (current->restart_block.arch_data & TS_COMPAT)
|
||||||
return __NR_ia32_restart_syscall;
|
return __NR_ia32_restart_syscall;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_X86_X32_ABI
|
#ifdef CONFIG_X86_X32_ABI
|
||||||
|
@ -520,10 +520,10 @@ static u64 get_time_ref_counter(struct kvm *kvm)
|
|||||||
u64 tsc;
|
u64 tsc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The guest has not set up the TSC page or the clock isn't
|
* Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
|
||||||
* stable, fall back to get_kvmclock_ns.
|
* is broken, disabled or being updated.
|
||||||
*/
|
*/
|
||||||
if (!hv->tsc_ref.tsc_sequence)
|
if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
|
||||||
return div_u64(get_kvmclock_ns(kvm), 100);
|
return div_u64(get_kvmclock_ns(kvm), 100);
|
||||||
|
|
||||||
vcpu = kvm_get_vcpu(kvm, 0);
|
vcpu = kvm_get_vcpu(kvm, 0);
|
||||||
@ -1077,6 +1077,21 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't touch TSC page values if the guest has opted for TSC emulation after
|
||||||
|
* migration. KVM doesn't fully support reenlightenment notifications and TSC
|
||||||
|
* access emulation and Hyper-V is known to expect the values in TSC page to
|
||||||
|
* stay constant before TSC access emulation is disabled from guest side
|
||||||
|
* (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
|
||||||
|
* frequency and guest visible TSC value across migration (and prevent it when
|
||||||
|
* TSC scaling is unsupported).
|
||||||
|
*/
|
||||||
|
static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
|
||||||
|
{
|
||||||
|
return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
|
||||||
|
hv->hv_tsc_emulation_control;
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
||||||
struct pvclock_vcpu_time_info *hv_clock)
|
struct pvclock_vcpu_time_info *hv_clock)
|
||||||
{
|
{
|
||||||
@ -1087,7 +1102,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
|||||||
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
|
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
|
||||||
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
|
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
|
||||||
|
|
||||||
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
|
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
|
||||||
|
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&hv->hv_lock);
|
mutex_lock(&hv->hv_lock);
|
||||||
@ -1101,7 +1117,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
|||||||
*/
|
*/
|
||||||
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
|
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
|
||||||
&tsc_seq, sizeof(tsc_seq))))
|
&tsc_seq, sizeof(tsc_seq))))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
if (tsc_seq && tsc_page_update_unsafe(hv)) {
|
||||||
|
if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* While we're computing and writing the parameters, force the
|
* While we're computing and writing the parameters, force the
|
||||||
@ -1110,15 +1134,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
|||||||
hv->tsc_ref.tsc_sequence = 0;
|
hv->tsc_ref.tsc_sequence = 0;
|
||||||
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
|
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
|
||||||
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
|
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
|
||||||
goto out_unlock;
|
goto out_err;
|
||||||
|
|
||||||
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
|
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
|
||||||
goto out_unlock;
|
goto out_err;
|
||||||
|
|
||||||
/* Ensure sequence is zero before writing the rest of the struct. */
|
/* Ensure sequence is zero before writing the rest of the struct. */
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
|
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
|
||||||
goto out_unlock;
|
goto out_err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now switch to the TSC page mechanism by writing the sequence.
|
* Now switch to the TSC page mechanism by writing the sequence.
|
||||||
@ -1131,8 +1155,45 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
|||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
hv->tsc_ref.tsc_sequence = tsc_seq;
|
hv->tsc_ref.tsc_sequence = tsc_seq;
|
||||||
kvm_write_guest(kvm, gfn_to_gpa(gfn),
|
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
|
||||||
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
|
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
out_err:
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&hv->hv_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
struct kvm_hv *hv = to_kvm_hv(kvm);
|
||||||
|
u64 gfn;
|
||||||
|
|
||||||
|
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
|
||||||
|
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
|
||||||
|
tsc_page_update_unsafe(hv))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&hv->hv_lock);
|
||||||
|
|
||||||
|
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
/* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
|
||||||
|
if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
|
||||||
|
|
||||||
|
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
|
||||||
|
|
||||||
|
hv->tsc_ref.tsc_sequence = 0;
|
||||||
|
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
|
||||||
|
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&hv->hv_lock);
|
mutex_unlock(&hv->hv_lock);
|
||||||
}
|
}
|
||||||
@ -1193,8 +1254,15 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
|||||||
}
|
}
|
||||||
case HV_X64_MSR_REFERENCE_TSC:
|
case HV_X64_MSR_REFERENCE_TSC:
|
||||||
hv->hv_tsc_page = data;
|
hv->hv_tsc_page = data;
|
||||||
if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
|
if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
|
||||||
|
if (!host)
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
|
||||||
|
else
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
|
||||||
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
|
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
|
||||||
|
} else {
|
||||||
|
hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
|
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
|
||||||
return kvm_hv_msr_set_crash_data(kvm,
|
return kvm_hv_msr_set_crash_data(kvm,
|
||||||
@ -1229,6 +1297,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
|||||||
hv->hv_tsc_emulation_control = data;
|
hv->hv_tsc_emulation_control = data;
|
||||||
break;
|
break;
|
||||||
case HV_X64_MSR_TSC_EMULATION_STATUS:
|
case HV_X64_MSR_TSC_EMULATION_STATUS:
|
||||||
|
if (data && !host)
|
||||||
|
return 1;
|
||||||
|
|
||||||
hv->hv_tsc_emulation_status = data;
|
hv->hv_tsc_emulation_status = data;
|
||||||
break;
|
break;
|
||||||
case HV_X64_MSR_TIME_REF_COUNT:
|
case HV_X64_MSR_TIME_REF_COUNT:
|
||||||
|
@ -133,6 +133,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
|
|||||||
|
|
||||||
void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
||||||
struct pvclock_vcpu_time_info *hv_clock);
|
struct pvclock_vcpu_time_info *hv_clock);
|
||||||
|
void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
|
||||||
|
|
||||||
void kvm_hv_init_vm(struct kvm *kvm);
|
void kvm_hv_init_vm(struct kvm *kvm);
|
||||||
void kvm_hv_destroy_vm(struct kvm *kvm);
|
void kvm_hv_destroy_vm(struct kvm *kvm);
|
||||||
|
@ -78,6 +78,11 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
|
|||||||
return to_shadow_page(__pa(sptep));
|
return to_shadow_page(__pa(sptep));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
|
||||||
|
{
|
||||||
|
return sp->role.smm ? 1 : 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
|
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -20,6 +20,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
|
|||||||
return gfn & -KVM_PAGES_PER_HPAGE(level);
|
return gfn & -KVM_PAGES_PER_HPAGE(level);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the TDP iterator to the root PT and allow it to continue its
|
||||||
|
* traversal over the paging structure from there.
|
||||||
|
*/
|
||||||
|
void tdp_iter_restart(struct tdp_iter *iter)
|
||||||
|
{
|
||||||
|
iter->yielded_gfn = iter->next_last_level_gfn;
|
||||||
|
iter->level = iter->root_level;
|
||||||
|
|
||||||
|
iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
|
||||||
|
tdp_iter_refresh_sptep(iter);
|
||||||
|
|
||||||
|
iter->valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sets a TDP iterator to walk a pre-order traversal of the paging structure
|
* Sets a TDP iterator to walk a pre-order traversal of the paging structure
|
||||||
* rooted at root_pt, starting with the walk to translate next_last_level_gfn.
|
* rooted at root_pt, starting with the walk to translate next_last_level_gfn.
|
||||||
@ -31,16 +46,12 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
|
|||||||
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
|
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
|
||||||
|
|
||||||
iter->next_last_level_gfn = next_last_level_gfn;
|
iter->next_last_level_gfn = next_last_level_gfn;
|
||||||
iter->yielded_gfn = iter->next_last_level_gfn;
|
|
||||||
iter->root_level = root_level;
|
iter->root_level = root_level;
|
||||||
iter->min_level = min_level;
|
iter->min_level = min_level;
|
||||||
iter->level = root_level;
|
iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root_pt;
|
||||||
iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt;
|
iter->as_id = kvm_mmu_page_as_id(sptep_to_sp(root_pt));
|
||||||
|
|
||||||
iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
|
tdp_iter_restart(iter);
|
||||||
tdp_iter_refresh_sptep(iter);
|
|
||||||
|
|
||||||
iter->valid = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -159,8 +170,3 @@ void tdp_iter_next(struct tdp_iter *iter)
|
|||||||
iter->valid = false;
|
iter->valid = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter)
|
|
||||||
{
|
|
||||||
return iter->pt_path[iter->root_level - 1];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
@ -36,6 +36,8 @@ struct tdp_iter {
|
|||||||
int min_level;
|
int min_level;
|
||||||
/* The iterator's current level within the paging structure */
|
/* The iterator's current level within the paging structure */
|
||||||
int level;
|
int level;
|
||||||
|
/* The address space ID, i.e. SMM vs. regular. */
|
||||||
|
int as_id;
|
||||||
/* A snapshot of the value at sptep */
|
/* A snapshot of the value at sptep */
|
||||||
u64 old_spte;
|
u64 old_spte;
|
||||||
/*
|
/*
|
||||||
@ -62,6 +64,6 @@ tdp_ptep_t spte_to_child_pt(u64 pte, int level);
|
|||||||
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
|
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
|
||||||
int min_level, gfn_t next_last_level_gfn);
|
int min_level, gfn_t next_last_level_gfn);
|
||||||
void tdp_iter_next(struct tdp_iter *iter);
|
void tdp_iter_next(struct tdp_iter *iter);
|
||||||
tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter);
|
void tdp_iter_restart(struct tdp_iter *iter);
|
||||||
|
|
||||||
#endif /* __KVM_X86_MMU_TDP_ITER_H */
|
#endif /* __KVM_X86_MMU_TDP_ITER_H */
|
||||||
|
@ -203,11 +203,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
|
|||||||
u64 old_spte, u64 new_spte, int level,
|
u64 old_spte, u64 new_spte, int level,
|
||||||
bool shared);
|
bool shared);
|
||||||
|
|
||||||
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
|
|
||||||
{
|
|
||||||
return sp->role.smm ? 1 : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
|
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
|
||||||
{
|
{
|
||||||
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
|
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
|
||||||
@ -301,11 +296,16 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
|
|||||||
*
|
*
|
||||||
* Given a page table that has been removed from the TDP paging structure,
|
* Given a page table that has been removed from the TDP paging structure,
|
||||||
* iterates through the page table to clear SPTEs and free child page tables.
|
* iterates through the page table to clear SPTEs and free child page tables.
|
||||||
|
*
|
||||||
|
* Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
|
||||||
|
* protection. Since this thread removed it from the paging structure,
|
||||||
|
* this thread will be responsible for ensuring the page is freed. Hence the
|
||||||
|
* early rcu_dereferences in the function.
|
||||||
*/
|
*/
|
||||||
static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
|
static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
|
||||||
bool shared)
|
bool shared)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp = sptep_to_sp(pt);
|
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
|
||||||
int level = sp->role.level;
|
int level = sp->role.level;
|
||||||
gfn_t base_gfn = sp->gfn;
|
gfn_t base_gfn = sp->gfn;
|
||||||
u64 old_child_spte;
|
u64 old_child_spte;
|
||||||
@ -318,7 +318,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
|
|||||||
tdp_mmu_unlink_page(kvm, sp, shared);
|
tdp_mmu_unlink_page(kvm, sp, shared);
|
||||||
|
|
||||||
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
|
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
|
||||||
sptep = pt + i;
|
sptep = rcu_dereference(pt) + i;
|
||||||
gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
|
gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
|
||||||
|
|
||||||
if (shared) {
|
if (shared) {
|
||||||
@ -492,10 +492,6 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
|
|||||||
struct tdp_iter *iter,
|
struct tdp_iter *iter,
|
||||||
u64 new_spte)
|
u64 new_spte)
|
||||||
{
|
{
|
||||||
u64 *root_pt = tdp_iter_root_pt(iter);
|
|
||||||
struct kvm_mmu_page *root = sptep_to_sp(root_pt);
|
|
||||||
int as_id = kvm_mmu_page_as_id(root);
|
|
||||||
|
|
||||||
lockdep_assert_held_read(&kvm->mmu_lock);
|
lockdep_assert_held_read(&kvm->mmu_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -509,8 +505,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
|
|||||||
new_spte) != iter->old_spte)
|
new_spte) != iter->old_spte)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
|
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
|
||||||
iter->level, true);
|
new_spte, iter->level, true);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -538,7 +534,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
|
|||||||
* here since the SPTE is going from non-present
|
* here since the SPTE is going from non-present
|
||||||
* to non-present.
|
* to non-present.
|
||||||
*/
|
*/
|
||||||
WRITE_ONCE(*iter->sptep, 0);
|
WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -564,10 +560,6 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
|
|||||||
u64 new_spte, bool record_acc_track,
|
u64 new_spte, bool record_acc_track,
|
||||||
bool record_dirty_log)
|
bool record_dirty_log)
|
||||||
{
|
{
|
||||||
tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
|
|
||||||
struct kvm_mmu_page *root = sptep_to_sp(root_pt);
|
|
||||||
int as_id = kvm_mmu_page_as_id(root);
|
|
||||||
|
|
||||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -581,13 +573,13 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
|
|||||||
|
|
||||||
WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
|
WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
|
||||||
|
|
||||||
__handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
|
__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
|
||||||
iter->level, false);
|
new_spte, iter->level, false);
|
||||||
if (record_acc_track)
|
if (record_acc_track)
|
||||||
handle_changed_spte_acc_track(iter->old_spte, new_spte,
|
handle_changed_spte_acc_track(iter->old_spte, new_spte,
|
||||||
iter->level);
|
iter->level);
|
||||||
if (record_dirty_log)
|
if (record_dirty_log)
|
||||||
handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
|
handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
|
||||||
iter->old_spte, new_spte,
|
iter->old_spte, new_spte,
|
||||||
iter->level);
|
iter->level);
|
||||||
}
|
}
|
||||||
@ -659,9 +651,7 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
|
|||||||
|
|
||||||
WARN_ON(iter->gfn > iter->next_last_level_gfn);
|
WARN_ON(iter->gfn > iter->next_last_level_gfn);
|
||||||
|
|
||||||
tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
|
tdp_iter_restart(iter);
|
||||||
iter->root_level, iter->min_level,
|
|
||||||
iter->next_last_level_gfn);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1526,35 +1526,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
|
|||||||
|
|
||||||
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
|
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
|
||||||
{
|
{
|
||||||
|
struct kvm_x86_msr_filter *msr_filter;
|
||||||
|
struct msr_bitmap_range *ranges;
|
||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
|
bool allowed;
|
||||||
u32 count = kvm->arch.msr_filter.count;
|
|
||||||
u32 i;
|
|
||||||
bool r = kvm->arch.msr_filter.default_allow;
|
|
||||||
int idx;
|
int idx;
|
||||||
|
u32 i;
|
||||||
|
|
||||||
/* MSR filtering not set up or x2APIC enabled, allow everything */
|
/* x2APIC MSRs do not support filtering. */
|
||||||
if (!count || (index >= 0x800 && index <= 0x8ff))
|
if (index >= 0x800 && index <= 0x8ff)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* Prevent collision with set_msr_filter */
|
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
|
||||||
|
if (!msr_filter) {
|
||||||
|
allowed = true;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
allowed = msr_filter->default_allow;
|
||||||
|
ranges = msr_filter->ranges;
|
||||||
|
|
||||||
|
for (i = 0; i < msr_filter->count; i++) {
|
||||||
u32 start = ranges[i].base;
|
u32 start = ranges[i].base;
|
||||||
u32 end = start + ranges[i].nmsrs;
|
u32 end = start + ranges[i].nmsrs;
|
||||||
u32 flags = ranges[i].flags;
|
u32 flags = ranges[i].flags;
|
||||||
unsigned long *bitmap = ranges[i].bitmap;
|
unsigned long *bitmap = ranges[i].bitmap;
|
||||||
|
|
||||||
if ((index >= start) && (index < end) && (flags & type)) {
|
if ((index >= start) && (index < end) && (flags & type)) {
|
||||||
r = !!test_bit(index - start, bitmap);
|
allowed = !!test_bit(index - start, bitmap);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
srcu_read_unlock(&kvm->srcu, idx);
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
|
||||||
return r;
|
return allowed;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_msr_allowed);
|
EXPORT_SYMBOL_GPL(kvm_msr_allowed);
|
||||||
|
|
||||||
@ -2551,6 +2560,8 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
|
|||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
struct kvm_arch *ka = &kvm->arch;
|
struct kvm_arch *ka = &kvm->arch;
|
||||||
|
|
||||||
|
kvm_hv_invalidate_tsc_page(kvm);
|
||||||
|
|
||||||
spin_lock(&ka->pvclock_gtod_sync_lock);
|
spin_lock(&ka->pvclock_gtod_sync_lock);
|
||||||
kvm_make_mclock_inprogress_request(kvm);
|
kvm_make_mclock_inprogress_request(kvm);
|
||||||
/* no guest entries from this point */
|
/* no guest entries from this point */
|
||||||
@ -5352,25 +5363,34 @@ split_irqchip_unlock:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_clear_msr_filter(struct kvm *kvm)
|
static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
|
||||||
{
|
{
|
||||||
u32 i;
|
struct kvm_x86_msr_filter *msr_filter;
|
||||||
u32 count = kvm->arch.msr_filter.count;
|
|
||||||
struct msr_bitmap_range ranges[16];
|
|
||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
|
||||||
kvm->arch.msr_filter.count = 0;
|
if (!msr_filter)
|
||||||
memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));
|
return NULL;
|
||||||
mutex_unlock(&kvm->lock);
|
|
||||||
synchronize_srcu(&kvm->srcu);
|
|
||||||
|
|
||||||
for (i = 0; i < count; i++)
|
msr_filter->default_allow = default_allow;
|
||||||
kfree(ranges[i].bitmap);
|
return msr_filter;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)
|
static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
|
||||||
|
if (!msr_filter)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < msr_filter->count; i++)
|
||||||
|
kfree(msr_filter->ranges[i].bitmap);
|
||||||
|
|
||||||
|
kfree(msr_filter);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
|
||||||
|
struct kvm_msr_filter_range *user_range)
|
||||||
{
|
{
|
||||||
struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
|
|
||||||
struct msr_bitmap_range range;
|
struct msr_bitmap_range range;
|
||||||
unsigned long *bitmap = NULL;
|
unsigned long *bitmap = NULL;
|
||||||
size_t bitmap_size;
|
size_t bitmap_size;
|
||||||
@ -5404,11 +5424,9 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Everything ok, add this range identifier to our global pool */
|
/* Everything ok, add this range identifier. */
|
||||||
ranges[kvm->arch.msr_filter.count] = range;
|
msr_filter->ranges[msr_filter->count] = range;
|
||||||
/* Make sure we filled the array before we tell anyone to walk it */
|
msr_filter->count++;
|
||||||
smp_wmb();
|
|
||||||
kvm->arch.msr_filter.count++;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
@ -5419,10 +5437,11 @@ err:
|
|||||||
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
||||||
{
|
{
|
||||||
struct kvm_msr_filter __user *user_msr_filter = argp;
|
struct kvm_msr_filter __user *user_msr_filter = argp;
|
||||||
|
struct kvm_x86_msr_filter *new_filter, *old_filter;
|
||||||
struct kvm_msr_filter filter;
|
struct kvm_msr_filter filter;
|
||||||
bool default_allow;
|
bool default_allow;
|
||||||
int r = 0;
|
|
||||||
bool empty = true;
|
bool empty = true;
|
||||||
|
int r = 0;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
|
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
|
||||||
@ -5435,25 +5454,32 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
|||||||
if (empty && !default_allow)
|
if (empty && !default_allow)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
kvm_clear_msr_filter(kvm);
|
new_filter = kvm_alloc_msr_filter(default_allow);
|
||||||
|
if (!new_filter)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
kvm->arch.msr_filter.default_allow = default_allow;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Protect from concurrent calls to this function that could trigger
|
|
||||||
* a TOCTOU violation on kvm->arch.msr_filter.count.
|
|
||||||
*/
|
|
||||||
mutex_lock(&kvm->lock);
|
|
||||||
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
|
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
|
||||||
r = kvm_add_msr_filter(kvm, &filter.ranges[i]);
|
r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
|
||||||
if (r)
|
if (r) {
|
||||||
break;
|
kvm_free_msr_filter(new_filter);
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&kvm->lock);
|
||||||
|
|
||||||
|
/* The per-VM filter is protected by kvm->lock... */
|
||||||
|
old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
|
||||||
|
|
||||||
|
rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
|
||||||
|
synchronize_srcu(&kvm->srcu);
|
||||||
|
|
||||||
|
kvm_free_msr_filter(old_filter);
|
||||||
|
|
||||||
kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
|
kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
|
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
long kvm_arch_vm_ioctl(struct file *filp,
|
long kvm_arch_vm_ioctl(struct file *filp,
|
||||||
@ -6603,7 +6629,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
|
|||||||
int cpu = get_cpu();
|
int cpu = get_cpu();
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
|
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
|
||||||
smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
|
on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
|
||||||
wbinvd_ipi, NULL, 1);
|
wbinvd_ipi, NULL, 1);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
|
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
|
||||||
@ -10634,8 +10660,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
|
|||||||
|
|
||||||
void kvm_arch_destroy_vm(struct kvm *kvm)
|
void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
u32 i;
|
|
||||||
|
|
||||||
if (current->mm == kvm->mm) {
|
if (current->mm == kvm->mm) {
|
||||||
/*
|
/*
|
||||||
* Free memory regions allocated on behalf of userspace,
|
* Free memory regions allocated on behalf of userspace,
|
||||||
@ -10651,8 +10675,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|||||||
mutex_unlock(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
}
|
}
|
||||||
static_call_cond(kvm_x86_vm_destroy)(kvm);
|
static_call_cond(kvm_x86_vm_destroy)(kvm);
|
||||||
for (i = 0; i < kvm->arch.msr_filter.count; i++)
|
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
|
||||||
kfree(kvm->arch.msr_filter.ranges[i].bitmap);
|
|
||||||
kvm_pic_destroy(kvm);
|
kvm_pic_destroy(kvm);
|
||||||
kvm_ioapic_destroy(kvm);
|
kvm_ioapic_destroy(kvm);
|
||||||
kvm_free_vcpus(kvm);
|
kvm_free_vcpus(kvm);
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>");
|
MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>");
|
||||||
MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");
|
MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");
|
||||||
MODULE_SUPPORTED_DEVICE("Eurobraille/Iris");
|
|
||||||
|
|
||||||
static bool force;
|
static bool force;
|
||||||
|
|
||||||
|
@ -100,8 +100,6 @@ static LIST_HEAD(fore200e_boards);
|
|||||||
|
|
||||||
MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
|
MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
|
||||||
MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
|
MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
|
||||||
MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
|
|
||||||
|
|
||||||
|
|
||||||
static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
|
static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
|
||||||
{ BUFFER_S1_NBR, BUFFER_L1_NBR },
|
{ BUFFER_S1_NBR, BUFFER_L1_NBR },
|
||||||
|
@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev)
|
|||||||
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||||
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
||||||
{
|
{
|
||||||
bool use_links = dev->power.links_count > 0;
|
|
||||||
bool get = false;
|
|
||||||
int retval, idx;
|
int retval, idx;
|
||||||
bool put;
|
bool use_links = dev->power.links_count > 0;
|
||||||
|
|
||||||
if (dev->power.irq_safe) {
|
if (dev->power.irq_safe) {
|
||||||
spin_unlock(&dev->power.lock);
|
spin_unlock(&dev->power.lock);
|
||||||
} else if (!use_links) {
|
|
||||||
spin_unlock_irq(&dev->power.lock);
|
|
||||||
} else {
|
} else {
|
||||||
get = dev->power.runtime_status == RPM_RESUMING;
|
|
||||||
|
|
||||||
spin_unlock_irq(&dev->power.lock);
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
|
||||||
/* Resume suppliers if necessary. */
|
/*
|
||||||
if (get) {
|
* Resume suppliers if necessary.
|
||||||
|
*
|
||||||
|
* The device's runtime PM status cannot change until this
|
||||||
|
* routine returns, so it is safe to read the status outside of
|
||||||
|
* the lock.
|
||||||
|
*/
|
||||||
|
if (use_links && dev->power.runtime_status == RPM_RESUMING) {
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
|
||||||
retval = rpm_get_suppliers(dev);
|
retval = rpm_get_suppliers(dev);
|
||||||
@ -355,36 +355,24 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|||||||
|
|
||||||
if (dev->power.irq_safe) {
|
if (dev->power.irq_safe) {
|
||||||
spin_lock(&dev->power.lock);
|
spin_lock(&dev->power.lock);
|
||||||
return retval;
|
} else {
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irq(&dev->power.lock);
|
|
||||||
|
|
||||||
if (!use_links)
|
|
||||||
return retval;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the device is suspending and the callback has returned success,
|
* If the device is suspending and the callback has returned
|
||||||
* drop the usage counters of the suppliers that have been reference
|
* success, drop the usage counters of the suppliers that have
|
||||||
* counted on its resume.
|
* been reference counted on its resume.
|
||||||
*
|
*
|
||||||
* Do that if the resume fails too.
|
* Do that if resume fails too.
|
||||||
*/
|
*/
|
||||||
put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
|
if (use_links
|
||||||
if (put)
|
&& ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
|
||||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
|
||||||
else
|
|
||||||
put = get && retval;
|
|
||||||
|
|
||||||
if (put) {
|
|
||||||
spin_unlock_irq(&dev->power.lock);
|
|
||||||
|
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
rpm_put_suppliers(dev);
|
rpm_put_suppliers(dev);
|
||||||
|
|
||||||
device_links_read_unlock(idx);
|
device_links_read_unlock(idx);
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irq(&dev->power.lock);
|
spin_lock_irq(&dev->power.lock);
|
||||||
}
|
}
|
||||||
|
@ -5091,7 +5091,6 @@ module_param(floppy, charp, 0);
|
|||||||
module_param(FLOPPY_IRQ, int, 0);
|
module_param(FLOPPY_IRQ, int, 0);
|
||||||
module_param(FLOPPY_DMA, int, 0);
|
module_param(FLOPPY_DMA, int, 0);
|
||||||
MODULE_AUTHOR("Alain L. Knaff");
|
MODULE_AUTHOR("Alain L. Knaff");
|
||||||
MODULE_SUPPORTED_DEVICE("fd");
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
/* This doesn't actually get used other than for module information */
|
/* This doesn't actually get used other than for module information */
|
||||||
|
@ -194,5 +194,4 @@ module_init(rsi_91x_bt_module_init);
|
|||||||
module_exit(rsi_91x_bt_module_exit);
|
module_exit(rsi_91x_bt_module_exit);
|
||||||
MODULE_AUTHOR("Redpine Signals Inc");
|
MODULE_AUTHOR("Redpine Signals Inc");
|
||||||
MODULE_DESCRIPTION("RSI BT driver");
|
MODULE_DESCRIPTION("RSI BT driver");
|
||||||
MODULE_SUPPORTED_DEVICE("RSI-BT");
|
|
||||||
MODULE_LICENSE("Dual BSD/GPL");
|
MODULE_LICENSE("Dual BSD/GPL");
|
||||||
|
@ -81,9 +81,6 @@ MODULE_DESCRIPTION("Driver for Applicom Profibus card");
|
|||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_ALIAS_MISCDEV(AC_MINOR);
|
MODULE_ALIAS_MISCDEV(AC_MINOR);
|
||||||
|
|
||||||
MODULE_SUPPORTED_DEVICE("ac");
|
|
||||||
|
|
||||||
|
|
||||||
static struct applicom_board {
|
static struct applicom_board {
|
||||||
unsigned long PhysIO;
|
unsigned long PhysIO;
|
||||||
void __iomem *RamIO;
|
void __iomem *RamIO;
|
||||||
|
@ -64,7 +64,6 @@
|
|||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
|
MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
|
||||||
MODULE_DESCRIPTION("Toshiba laptop SMM driver");
|
MODULE_DESCRIPTION("Toshiba laptop SMM driver");
|
||||||
MODULE_SUPPORTED_DEVICE("toshiba");
|
|
||||||
|
|
||||||
static DEFINE_MUTEX(tosh_mutex);
|
static DEFINE_MUTEX(tosh_mutex);
|
||||||
static int tosh_fn;
|
static int tosh_fn;
|
||||||
|
@ -31,7 +31,7 @@ struct stm32_timer_cnt {
|
|||||||
struct counter_device counter;
|
struct counter_device counter;
|
||||||
struct regmap *regmap;
|
struct regmap *regmap;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
u32 ceiling;
|
u32 max_arr;
|
||||||
bool enabled;
|
bool enabled;
|
||||||
struct stm32_timer_regs bak;
|
struct stm32_timer_regs bak;
|
||||||
};
|
};
|
||||||
@ -44,13 +44,14 @@ struct stm32_timer_cnt {
|
|||||||
* @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
|
* @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
|
||||||
*/
|
*/
|
||||||
enum stm32_count_function {
|
enum stm32_count_function {
|
||||||
STM32_COUNT_SLAVE_MODE_DISABLED = -1,
|
STM32_COUNT_SLAVE_MODE_DISABLED,
|
||||||
STM32_COUNT_ENCODER_MODE_1,
|
STM32_COUNT_ENCODER_MODE_1,
|
||||||
STM32_COUNT_ENCODER_MODE_2,
|
STM32_COUNT_ENCODER_MODE_2,
|
||||||
STM32_COUNT_ENCODER_MODE_3,
|
STM32_COUNT_ENCODER_MODE_3,
|
||||||
};
|
};
|
||||||
|
|
||||||
static enum counter_count_function stm32_count_functions[] = {
|
static enum counter_count_function stm32_count_functions[] = {
|
||||||
|
[STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE,
|
||||||
[STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
|
[STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
|
||||||
[STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
|
[STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
|
||||||
[STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
|
[STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
|
||||||
@ -73,8 +74,10 @@ static int stm32_count_write(struct counter_device *counter,
|
|||||||
const unsigned long val)
|
const unsigned long val)
|
||||||
{
|
{
|
||||||
struct stm32_timer_cnt *const priv = counter->priv;
|
struct stm32_timer_cnt *const priv = counter->priv;
|
||||||
|
u32 ceiling;
|
||||||
|
|
||||||
if (val > priv->ceiling)
|
regmap_read(priv->regmap, TIM_ARR, &ceiling);
|
||||||
|
if (val > ceiling)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return regmap_write(priv->regmap, TIM_CNT, val);
|
return regmap_write(priv->regmap, TIM_CNT, val);
|
||||||
@ -90,6 +93,9 @@ static int stm32_count_function_get(struct counter_device *counter,
|
|||||||
regmap_read(priv->regmap, TIM_SMCR, &smcr);
|
regmap_read(priv->regmap, TIM_SMCR, &smcr);
|
||||||
|
|
||||||
switch (smcr & TIM_SMCR_SMS) {
|
switch (smcr & TIM_SMCR_SMS) {
|
||||||
|
case 0:
|
||||||
|
*function = STM32_COUNT_SLAVE_MODE_DISABLED;
|
||||||
|
return 0;
|
||||||
case 1:
|
case 1:
|
||||||
*function = STM32_COUNT_ENCODER_MODE_1;
|
*function = STM32_COUNT_ENCODER_MODE_1;
|
||||||
return 0;
|
return 0;
|
||||||
@ -99,9 +105,9 @@ static int stm32_count_function_get(struct counter_device *counter,
|
|||||||
case 3:
|
case 3:
|
||||||
*function = STM32_COUNT_ENCODER_MODE_3;
|
*function = STM32_COUNT_ENCODER_MODE_3;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
default:
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stm32_count_function_set(struct counter_device *counter,
|
static int stm32_count_function_set(struct counter_device *counter,
|
||||||
@ -112,6 +118,9 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|||||||
u32 cr1, sms;
|
u32 cr1, sms;
|
||||||
|
|
||||||
switch (function) {
|
switch (function) {
|
||||||
|
case STM32_COUNT_SLAVE_MODE_DISABLED:
|
||||||
|
sms = 0;
|
||||||
|
break;
|
||||||
case STM32_COUNT_ENCODER_MODE_1:
|
case STM32_COUNT_ENCODER_MODE_1:
|
||||||
sms = 1;
|
sms = 1;
|
||||||
break;
|
break;
|
||||||
@ -122,8 +131,7 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|||||||
sms = 3;
|
sms = 3;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
sms = 0;
|
return -EINVAL;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Store enable status */
|
/* Store enable status */
|
||||||
@ -131,10 +139,6 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|||||||
|
|
||||||
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
|
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
|
||||||
|
|
||||||
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
|
||||||
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
|
||||||
regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
|
|
||||||
|
|
||||||
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
|
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
|
||||||
|
|
||||||
/* Make sure that registers are updated */
|
/* Make sure that registers are updated */
|
||||||
@ -185,11 +189,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (ceiling > priv->max_arr)
|
||||||
|
return -ERANGE;
|
||||||
|
|
||||||
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
||||||
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
||||||
regmap_write(priv->regmap, TIM_ARR, ceiling);
|
regmap_write(priv->regmap, TIM_ARR, ceiling);
|
||||||
|
|
||||||
priv->ceiling = ceiling;
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,31 +280,36 @@ static int stm32_action_get(struct counter_device *counter,
|
|||||||
size_t function;
|
size_t function;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */
|
|
||||||
*action = STM32_SYNAPSE_ACTION_NONE;
|
|
||||||
|
|
||||||
err = stm32_count_function_get(counter, count, &function);
|
err = stm32_count_function_get(counter, count, &function);
|
||||||
if (err)
|
if (err)
|
||||||
return 0;
|
return err;
|
||||||
|
|
||||||
switch (function) {
|
switch (function) {
|
||||||
|
case STM32_COUNT_SLAVE_MODE_DISABLED:
|
||||||
|
/* counts on internal clock when CEN=1 */
|
||||||
|
*action = STM32_SYNAPSE_ACTION_NONE;
|
||||||
|
return 0;
|
||||||
case STM32_COUNT_ENCODER_MODE_1:
|
case STM32_COUNT_ENCODER_MODE_1:
|
||||||
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
|
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
|
||||||
if (synapse->signal->id == count->synapses[0].signal->id)
|
if (synapse->signal->id == count->synapses[0].signal->id)
|
||||||
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
||||||
break;
|
else
|
||||||
|
*action = STM32_SYNAPSE_ACTION_NONE;
|
||||||
|
return 0;
|
||||||
case STM32_COUNT_ENCODER_MODE_2:
|
case STM32_COUNT_ENCODER_MODE_2:
|
||||||
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
|
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
|
||||||
if (synapse->signal->id == count->synapses[1].signal->id)
|
if (synapse->signal->id == count->synapses[1].signal->id)
|
||||||
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
||||||
break;
|
else
|
||||||
|
*action = STM32_SYNAPSE_ACTION_NONE;
|
||||||
|
return 0;
|
||||||
case STM32_COUNT_ENCODER_MODE_3:
|
case STM32_COUNT_ENCODER_MODE_3:
|
||||||
/* counts up/down on both TI1FP1 and TI2FP2 edges */
|
/* counts up/down on both TI1FP1 and TI2FP2 edges */
|
||||||
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct counter_ops stm32_timer_cnt_ops = {
|
static const struct counter_ops stm32_timer_cnt_ops = {
|
||||||
@ -359,7 +370,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
priv->regmap = ddata->regmap;
|
priv->regmap = ddata->regmap;
|
||||||
priv->clk = ddata->clk;
|
priv->clk = ddata->clk;
|
||||||
priv->ceiling = ddata->max_arr;
|
priv->max_arr = ddata->max_arr;
|
||||||
|
|
||||||
priv->counter.name = dev_name(dev);
|
priv->counter.name = dev_name(dev);
|
||||||
priv->counter.parent = dev;
|
priv->counter.parent = dev;
|
||||||
|
@ -927,7 +927,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* first try to find a slot in an existing linked list entry */
|
/* first try to find a slot in an existing linked list entry */
|
||||||
for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
|
for (prsv = efi_memreserve_root->next; prsv; ) {
|
||||||
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
|
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
|
||||||
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
|
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
|
||||||
if (index < rsv->size) {
|
if (index < rsv->size) {
|
||||||
@ -937,6 +937,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
|||||||
memunmap(rsv);
|
memunmap(rsv);
|
||||||
return efi_mem_reserve_iomem(addr, size);
|
return efi_mem_reserve_iomem(addr, size);
|
||||||
}
|
}
|
||||||
|
prsv = rsv->next;
|
||||||
memunmap(rsv);
|
memunmap(rsv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,6 +484,10 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
case EFI_UNSUPPORTED:
|
||||||
|
err = -EOPNOTSUPP;
|
||||||
|
status = EFI_NOT_FOUND;
|
||||||
break;
|
break;
|
||||||
case EFI_NOT_FOUND:
|
case EFI_NOT_FOUND:
|
||||||
break;
|
break;
|
||||||
|
@ -571,6 +571,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|||||||
struct lock_class_key *lock_key,
|
struct lock_class_key *lock_key,
|
||||||
struct lock_class_key *request_key)
|
struct lock_class_key *request_key)
|
||||||
{
|
{
|
||||||
|
struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -594,6 +595,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|||||||
|
|
||||||
of_gpio_dev_init(gc, gdev);
|
of_gpio_dev_init(gc, gdev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Assign fwnode depending on the result of the previous calls,
|
||||||
|
* if none of them succeed, assign it to the parent's one.
|
||||||
|
*/
|
||||||
|
gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;
|
||||||
|
|
||||||
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
|
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
|
||||||
if (gdev->id < 0) {
|
if (gdev->id < 0) {
|
||||||
ret = gdev->id;
|
ret = gdev->id;
|
||||||
@ -4256,7 +4263,8 @@ static int __init gpiolib_dev_init(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (driver_register(&gpio_stub_drv) < 0) {
|
ret = driver_register(&gpio_stub_drv);
|
||||||
|
if (ret < 0) {
|
||||||
pr_err("gpiolib: could not register GPIO stub driver\n");
|
pr_err("gpiolib: could not register GPIO stub driver\n");
|
||||||
bus_unregister(&gpio_bus_type);
|
bus_unregister(&gpio_bus_type);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1507,36 +1507,6 @@ static void dcn20_update_dchubp_dpp(
|
|||||||
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|
||||||
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|
||||||
|| pipe_ctx->stream->update_flags.bits.out_csc) {
|
|| pipe_ctx->stream->update_flags.bits.out_csc) {
|
||||||
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
|
|
||||||
|
|
||||||
if (mpc->funcs->set_gamut_remap) {
|
|
||||||
int i;
|
|
||||||
int mpcc_id = hubp->inst;
|
|
||||||
struct mpc_grph_gamut_adjustment adjust;
|
|
||||||
bool enable_remap_dpp = false;
|
|
||||||
|
|
||||||
memset(&adjust, 0, sizeof(adjust));
|
|
||||||
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
|
|
||||||
|
|
||||||
/* save the enablement of gamut remap for dpp */
|
|
||||||
enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap;
|
|
||||||
|
|
||||||
/* force bypass gamut remap for dpp/cm */
|
|
||||||
pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
|
|
||||||
dc->hwss.program_gamut_remap(pipe_ctx);
|
|
||||||
|
|
||||||
/* restore gamut remap flag and use this remap into mpc */
|
|
||||||
pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp;
|
|
||||||
|
|
||||||
/* build remap matrix for top plane if enabled */
|
|
||||||
if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) {
|
|
||||||
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
|
|
||||||
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
|
|
||||||
adjust.temperature_matrix[i] =
|
|
||||||
pipe_ctx->stream->gamut_remap_matrix.matrix[i];
|
|
||||||
}
|
|
||||||
mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust);
|
|
||||||
} else
|
|
||||||
/* dpp/cm gamut remap*/
|
/* dpp/cm gamut remap*/
|
||||||
dc->hwss.program_gamut_remap(pipe_ctx);
|
dc->hwss.program_gamut_remap(pipe_ctx);
|
||||||
|
|
||||||
|
@ -1595,6 +1595,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
|||||||
dcn2_1_soc.num_chans = bw_params->num_channels;
|
dcn2_1_soc.num_chans = bw_params->num_channels;
|
||||||
|
|
||||||
ASSERT(clk_table->num_entries);
|
ASSERT(clk_table->num_entries);
|
||||||
|
/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
|
||||||
|
for (i = 0; i < dcn2_1_soc.num_states + 1; i++) {
|
||||||
|
clock_limits[i] = dcn2_1_soc.clock_limits[i];
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < clk_table->num_entries; i++) {
|
for (i = 0; i < clk_table->num_entries; i++) {
|
||||||
/* loop backwards*/
|
/* loop backwards*/
|
||||||
for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
|
for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
|
||||||
|
@ -113,6 +113,7 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|||||||
struct pwl_result_data *rgb_resulted;
|
struct pwl_result_data *rgb_resulted;
|
||||||
struct pwl_result_data *rgb;
|
struct pwl_result_data *rgb;
|
||||||
struct pwl_result_data *rgb_plus_1;
|
struct pwl_result_data *rgb_plus_1;
|
||||||
|
struct pwl_result_data *rgb_minus_1;
|
||||||
struct fixed31_32 end_value;
|
struct fixed31_32 end_value;
|
||||||
|
|
||||||
int32_t region_start, region_end;
|
int32_t region_start, region_end;
|
||||||
@ -140,7 +141,7 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|||||||
region_start = -MAX_LOW_POINT;
|
region_start = -MAX_LOW_POINT;
|
||||||
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
|
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
|
||||||
} else {
|
} else {
|
||||||
/* 10 segments
|
/* 11 segments
|
||||||
* segment is from 2^-10 to 2^0
|
* segment is from 2^-10 to 2^0
|
||||||
* There are less than 256 points, for optimization
|
* There are less than 256 points, for optimization
|
||||||
*/
|
*/
|
||||||
@ -154,9 +155,10 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|||||||
seg_distr[7] = 4;
|
seg_distr[7] = 4;
|
||||||
seg_distr[8] = 4;
|
seg_distr[8] = 4;
|
||||||
seg_distr[9] = 4;
|
seg_distr[9] = 4;
|
||||||
|
seg_distr[10] = 1;
|
||||||
|
|
||||||
region_start = -10;
|
region_start = -10;
|
||||||
region_end = 0;
|
region_end = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
|
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
|
||||||
@ -189,6 +191,10 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|||||||
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
|
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
|
||||||
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
|
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
|
||||||
|
|
||||||
|
rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
|
||||||
|
rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
|
||||||
|
rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
|
||||||
|
|
||||||
// All 3 color channels have same x
|
// All 3 color channels have same x
|
||||||
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
|
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
|
||||||
dc_fixpt_from_int(region_start));
|
dc_fixpt_from_int(region_start));
|
||||||
@ -259,15 +265,18 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|||||||
|
|
||||||
rgb = rgb_resulted;
|
rgb = rgb_resulted;
|
||||||
rgb_plus_1 = rgb_resulted + 1;
|
rgb_plus_1 = rgb_resulted + 1;
|
||||||
|
rgb_minus_1 = rgb;
|
||||||
|
|
||||||
i = 1;
|
i = 1;
|
||||||
while (i != hw_points + 1) {
|
while (i != hw_points + 1) {
|
||||||
|
if (i >= hw_points - 1) {
|
||||||
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
|
if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
|
||||||
rgb_plus_1->red = rgb->red;
|
rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
|
||||||
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
|
if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
|
||||||
rgb_plus_1->green = rgb->green;
|
rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
|
||||||
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
|
if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
|
||||||
rgb_plus_1->blue = rgb->blue;
|
rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
|
||||||
|
}
|
||||||
|
|
||||||
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
|
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
|
||||||
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
|
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
|
||||||
@ -283,6 +292,7 @@ bool cm3_helper_translate_curve_to_hw_format(
|
|||||||
}
|
}
|
||||||
|
|
||||||
++rgb_plus_1;
|
++rgb_plus_1;
|
||||||
|
rgb_minus_1 = rgb;
|
||||||
++rgb;
|
++rgb;
|
||||||
++i;
|
++i;
|
||||||
}
|
}
|
||||||
|
@ -603,7 +603,6 @@ static int append_oa_sample(struct i915_perf_stream *stream,
|
|||||||
{
|
{
|
||||||
int report_size = stream->oa_buffer.format_size;
|
int report_size = stream->oa_buffer.format_size;
|
||||||
struct drm_i915_perf_record_header header;
|
struct drm_i915_perf_record_header header;
|
||||||
u32 sample_flags = stream->sample_flags;
|
|
||||||
|
|
||||||
header.type = DRM_I915_PERF_RECORD_SAMPLE;
|
header.type = DRM_I915_PERF_RECORD_SAMPLE;
|
||||||
header.pad = 0;
|
header.pad = 0;
|
||||||
@ -617,10 +616,8 @@ static int append_oa_sample(struct i915_perf_stream *stream,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
buf += sizeof(header);
|
buf += sizeof(header);
|
||||||
|
|
||||||
if (sample_flags & SAMPLE_OA_REPORT) {
|
|
||||||
if (copy_to_user(buf, report, report_size))
|
if (copy_to_user(buf, report, report_size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
|
||||||
|
|
||||||
(*offset) += header.size;
|
(*offset) += header.size;
|
||||||
|
|
||||||
@ -2682,7 +2679,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
|
|||||||
|
|
||||||
stream->perf->ops.oa_enable(stream);
|
stream->perf->ops.oa_enable(stream);
|
||||||
|
|
||||||
if (stream->periodic)
|
if (stream->sample_flags & SAMPLE_OA_REPORT)
|
||||||
hrtimer_start(&stream->poll_check_timer,
|
hrtimer_start(&stream->poll_check_timer,
|
||||||
ns_to_ktime(stream->poll_oa_period),
|
ns_to_ktime(stream->poll_oa_period),
|
||||||
HRTIMER_MODE_REL_PINNED);
|
HRTIMER_MODE_REL_PINNED);
|
||||||
@ -2745,7 +2742,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
|
|||||||
{
|
{
|
||||||
stream->perf->ops.oa_disable(stream);
|
stream->perf->ops.oa_disable(stream);
|
||||||
|
|
||||||
if (stream->periodic)
|
if (stream->sample_flags & SAMPLE_OA_REPORT)
|
||||||
hrtimer_cancel(&stream->poll_check_timer);
|
hrtimer_cancel(&stream->poll_check_timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3028,7 +3025,7 @@ static ssize_t i915_perf_read(struct file *file,
|
|||||||
* disabled stream as an error. In particular it might otherwise lead
|
* disabled stream as an error. In particular it might otherwise lead
|
||||||
* to a deadlock for blocking file descriptors...
|
* to a deadlock for blocking file descriptors...
|
||||||
*/
|
*/
|
||||||
if (!stream->enabled)
|
if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (!(file->f_flags & O_NONBLOCK)) {
|
if (!(file->f_flags & O_NONBLOCK)) {
|
||||||
|
@ -3316,7 +3316,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||||||
|
|
||||||
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
|
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
|
||||||
#define ILK_FBCQ_DIS (1 << 22)
|
#define ILK_FBCQ_DIS (1 << 22)
|
||||||
#define ILK_PABSTRETCH_DIS (1 << 21)
|
#define ILK_PABSTRETCH_DIS REG_BIT(21)
|
||||||
|
#define ILK_SABSTRETCH_DIS REG_BIT(20)
|
||||||
|
#define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20)
|
||||||
|
#define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0)
|
||||||
|
#define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1)
|
||||||
|
#define IVB_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 2)
|
||||||
|
#define IVB_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 3)
|
||||||
|
#define IVB_SPR_STRETCH_MAX_MASK REG_GENMASK(19, 18)
|
||||||
|
#define IVB_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 0)
|
||||||
|
#define IVB_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 1)
|
||||||
|
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
|
||||||
|
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -8039,6 +8050,16 @@ enum {
|
|||||||
|
|
||||||
#define _CHICKEN_PIPESL_1_A 0x420b0
|
#define _CHICKEN_PIPESL_1_A 0x420b0
|
||||||
#define _CHICKEN_PIPESL_1_B 0x420b4
|
#define _CHICKEN_PIPESL_1_B 0x420b4
|
||||||
|
#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
|
||||||
|
#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
|
||||||
|
#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
|
||||||
|
#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
|
||||||
|
#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
|
||||||
|
#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
|
||||||
|
#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
|
||||||
|
#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
|
||||||
|
#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
|
||||||
|
#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
|
||||||
#define HSW_FBCQ_DIS (1 << 22)
|
#define HSW_FBCQ_DIS (1 << 22)
|
||||||
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
|
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
|
||||||
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
|
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
|
||||||
|
@ -7245,11 +7245,16 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||||||
intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
|
intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
|
||||||
intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
|
intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
|
||||||
|
|
||||||
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
|
|
||||||
for_each_pipe(dev_priv, pipe) {
|
for_each_pipe(dev_priv, pipe) {
|
||||||
|
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
|
||||||
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
|
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
|
||||||
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
|
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
|
||||||
BDW_DPRS_MASK_VBLANK_SRD);
|
BDW_DPRS_MASK_VBLANK_SRD);
|
||||||
|
|
||||||
|
/* Undocumented but fixes async flip + VT-d corruption */
|
||||||
|
if (intel_vtd_active())
|
||||||
|
intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
|
||||||
|
HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* WaVSRefCountFullforceMissDisable:bdw */
|
/* WaVSRefCountFullforceMissDisable:bdw */
|
||||||
@ -7285,11 +7290,20 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
|
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
enum pipe pipe;
|
||||||
|
|
||||||
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
|
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
|
||||||
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
|
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
|
||||||
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
|
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
|
||||||
HSW_FBCQ_DIS);
|
HSW_FBCQ_DIS);
|
||||||
|
|
||||||
|
for_each_pipe(dev_priv, pipe) {
|
||||||
|
/* Undocumented but fixes async flip + VT-d corruption */
|
||||||
|
if (intel_vtd_active())
|
||||||
|
intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
|
||||||
|
HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1);
|
||||||
|
}
|
||||||
|
|
||||||
/* This is required by WaCatErrorRejectionIssue:hsw */
|
/* This is required by WaCatErrorRejectionIssue:hsw */
|
||||||
intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
|
intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
|
||||||
intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
|
intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
|
||||||
|
@ -551,6 +551,10 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
|||||||
|
|
||||||
if (!ttm_dma)
|
if (!ttm_dma)
|
||||||
return;
|
return;
|
||||||
|
if (!ttm_dma->pages) {
|
||||||
|
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Don't waste time looping if the object is coherent */
|
/* Don't waste time looping if the object is coherent */
|
||||||
if (nvbo->force_coherent)
|
if (nvbo->force_coherent)
|
||||||
@ -583,6 +587,10 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
|||||||
|
|
||||||
if (!ttm_dma)
|
if (!ttm_dma)
|
||||||
return;
|
return;
|
||||||
|
if (!ttm_dma->pages) {
|
||||||
|
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Don't waste time looping if the object is coherent */
|
/* Don't waste time looping if the object is coherent */
|
||||||
if (nvbo->force_coherent)
|
if (nvbo->force_coherent)
|
||||||
|
@ -2149,11 +2149,12 @@ static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
|
|||||||
const struct mipi_dsi_msg *msg)
|
const struct mipi_dsi_msg *msg)
|
||||||
{
|
{
|
||||||
struct mipi_dsi_packet pkt;
|
struct mipi_dsi_packet pkt;
|
||||||
|
int ret;
|
||||||
u32 r;
|
u32 r;
|
||||||
|
|
||||||
r = mipi_dsi_create_packet(&pkt, msg);
|
ret = mipi_dsi_create_packet(&pkt, msg);
|
||||||
if (r < 0)
|
if (ret < 0)
|
||||||
return r;
|
return ret;
|
||||||
|
|
||||||
WARN_ON(!dsi_bus_is_locked(dsi));
|
WARN_ON(!dsi_bus_is_locked(dsi));
|
||||||
|
|
||||||
|
@ -266,6 +266,8 @@ config ADI_AXI_ADC
|
|||||||
select IIO_BUFFER
|
select IIO_BUFFER
|
||||||
select IIO_BUFFER_HW_CONSUMER
|
select IIO_BUFFER_HW_CONSUMER
|
||||||
select IIO_BUFFER_DMAENGINE
|
select IIO_BUFFER_DMAENGINE
|
||||||
|
depends on HAS_IOMEM
|
||||||
|
depends on OF
|
||||||
help
|
help
|
||||||
Say yes here to build support for Analog Devices Generic
|
Say yes here to build support for Analog Devices Generic
|
||||||
AXI ADC IP core. The IP core is used for interfacing with
|
AXI ADC IP core. The IP core is used for interfacing with
|
||||||
@ -923,6 +925,7 @@ config STM32_ADC_CORE
|
|||||||
depends on ARCH_STM32 || COMPILE_TEST
|
depends on ARCH_STM32 || COMPILE_TEST
|
||||||
depends on OF
|
depends on OF
|
||||||
depends on REGULATOR
|
depends on REGULATOR
|
||||||
|
depends on HAS_IOMEM
|
||||||
select IIO_BUFFER
|
select IIO_BUFFER
|
||||||
select MFD_STM32_TIMERS
|
select MFD_STM32_TIMERS
|
||||||
select IIO_STM32_TIMER_TRIGGER
|
select IIO_STM32_TIMER_TRIGGER
|
||||||
|
@ -918,7 +918,7 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
|
|||||||
return processed;
|
return processed;
|
||||||
|
|
||||||
/* Return millivolt or milliamps or millicentigrades */
|
/* Return millivolt or milliamps or millicentigrades */
|
||||||
*val = processed * 1000;
|
*val = processed;
|
||||||
return IIO_VAL_INT;
|
return IIO_VAL_INT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
|
|||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
int bits_per_word = ad7949_adc->resolution;
|
int bits_per_word = ad7949_adc->resolution;
|
||||||
int mask = GENMASK(ad7949_adc->resolution, 0);
|
int mask = GENMASK(ad7949_adc->resolution - 1, 0);
|
||||||
struct spi_message msg;
|
struct spi_message msg;
|
||||||
struct spi_transfer tx[] = {
|
struct spi_transfer tx[] = {
|
||||||
{
|
{
|
||||||
|
@ -597,7 +597,7 @@ static const struct vadc_channels vadc_chans[] = {
|
|||||||
VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1)
|
VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1)
|
||||||
|
|
||||||
VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0)
|
VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0)
|
||||||
VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0)
|
VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT)
|
||||||
VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0)
|
VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0)
|
||||||
VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0)
|
VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0)
|
||||||
VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0)
|
VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0)
|
||||||
|
@ -551,6 +551,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
|
|||||||
MPU3050_FIFO_R,
|
MPU3050_FIFO_R,
|
||||||
&fifo_values[offset],
|
&fifo_values[offset],
|
||||||
toread);
|
toread);
|
||||||
|
if (ret)
|
||||||
|
goto out_trigger_unlock;
|
||||||
|
|
||||||
dev_dbg(mpu3050->dev,
|
dev_dbg(mpu3050->dev,
|
||||||
"%04x %04x %04x %04x %04x\n",
|
"%04x %04x %04x %04x %04x\n",
|
||||||
|
@ -15,7 +15,10 @@
|
|||||||
struct hid_humidity_state {
|
struct hid_humidity_state {
|
||||||
struct hid_sensor_common common_attributes;
|
struct hid_sensor_common common_attributes;
|
||||||
struct hid_sensor_hub_attribute_info humidity_attr;
|
struct hid_sensor_hub_attribute_info humidity_attr;
|
||||||
|
struct {
|
||||||
s32 humidity_data;
|
s32 humidity_data;
|
||||||
|
u64 timestamp __aligned(8);
|
||||||
|
} scan;
|
||||||
int scale_pre_decml;
|
int scale_pre_decml;
|
||||||
int scale_post_decml;
|
int scale_post_decml;
|
||||||
int scale_precision;
|
int scale_precision;
|
||||||
@ -125,8 +128,7 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev,
|
|||||||
struct hid_humidity_state *humid_st = iio_priv(indio_dev);
|
struct hid_humidity_state *humid_st = iio_priv(indio_dev);
|
||||||
|
|
||||||
if (atomic_read(&humid_st->common_attributes.data_ready))
|
if (atomic_read(&humid_st->common_attributes.data_ready))
|
||||||
iio_push_to_buffers_with_timestamp(indio_dev,
|
iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan,
|
||||||
&humid_st->humidity_data,
|
|
||||||
iio_get_time_ns(indio_dev));
|
iio_get_time_ns(indio_dev));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -142,7 +144,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev,
|
|||||||
|
|
||||||
switch (usage_id) {
|
switch (usage_id) {
|
||||||
case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY:
|
case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY:
|
||||||
humid_st->humidity_data = *(s32 *)raw_data;
|
humid_st->scan.humidity_data = *(s32 *)raw_data;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
default:
|
default:
|
||||||
|
@ -462,8 +462,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_ret;
|
goto err_ret;
|
||||||
|
|
||||||
ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
|
if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) {
|
||||||
if (ret != 1) {
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_ret;
|
goto err_ret;
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,9 @@ struct prox_state {
|
|||||||
struct hid_sensor_common common_attributes;
|
struct hid_sensor_common common_attributes;
|
||||||
struct hid_sensor_hub_attribute_info prox_attr;
|
struct hid_sensor_hub_attribute_info prox_attr;
|
||||||
u32 human_presence;
|
u32 human_presence;
|
||||||
|
int scale_pre_decml;
|
||||||
|
int scale_post_decml;
|
||||||
|
int scale_precision;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Channel definitions */
|
/* Channel definitions */
|
||||||
@ -93,8 +96,9 @@ static int prox_read_raw(struct iio_dev *indio_dev,
|
|||||||
ret_type = IIO_VAL_INT;
|
ret_type = IIO_VAL_INT;
|
||||||
break;
|
break;
|
||||||
case IIO_CHAN_INFO_SCALE:
|
case IIO_CHAN_INFO_SCALE:
|
||||||
*val = prox_state->prox_attr.units;
|
*val = prox_state->scale_pre_decml;
|
||||||
ret_type = IIO_VAL_INT;
|
*val2 = prox_state->scale_post_decml;
|
||||||
|
ret_type = prox_state->scale_precision;
|
||||||
break;
|
break;
|
||||||
case IIO_CHAN_INFO_OFFSET:
|
case IIO_CHAN_INFO_OFFSET:
|
||||||
*val = hid_sensor_convert_exponent(
|
*val = hid_sensor_convert_exponent(
|
||||||
@ -234,6 +238,11 @@ static int prox_parse_report(struct platform_device *pdev,
|
|||||||
HID_USAGE_SENSOR_HUMAN_PRESENCE,
|
HID_USAGE_SENSOR_HUMAN_PRESENCE,
|
||||||
&st->common_attributes.sensitivity);
|
&st->common_attributes.sensitivity);
|
||||||
|
|
||||||
|
st->scale_precision = hid_sensor_format_scale(
|
||||||
|
hsdev->usage,
|
||||||
|
&st->prox_attr,
|
||||||
|
&st->scale_pre_decml, &st->scale_post_decml);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,7 +15,10 @@
|
|||||||
struct temperature_state {
|
struct temperature_state {
|
||||||
struct hid_sensor_common common_attributes;
|
struct hid_sensor_common common_attributes;
|
||||||
struct hid_sensor_hub_attribute_info temperature_attr;
|
struct hid_sensor_hub_attribute_info temperature_attr;
|
||||||
|
struct {
|
||||||
s32 temperature_data;
|
s32 temperature_data;
|
||||||
|
u64 timestamp __aligned(8);
|
||||||
|
} scan;
|
||||||
int scale_pre_decml;
|
int scale_pre_decml;
|
||||||
int scale_post_decml;
|
int scale_post_decml;
|
||||||
int scale_precision;
|
int scale_precision;
|
||||||
@ -32,7 +35,7 @@ static const struct iio_chan_spec temperature_channels[] = {
|
|||||||
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
|
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
|
||||||
BIT(IIO_CHAN_INFO_HYSTERESIS),
|
BIT(IIO_CHAN_INFO_HYSTERESIS),
|
||||||
},
|
},
|
||||||
IIO_CHAN_SOFT_TIMESTAMP(3),
|
IIO_CHAN_SOFT_TIMESTAMP(1),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Adjust channel real bits based on report descriptor */
|
/* Adjust channel real bits based on report descriptor */
|
||||||
@ -123,8 +126,7 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev,
|
|||||||
struct temperature_state *temp_st = iio_priv(indio_dev);
|
struct temperature_state *temp_st = iio_priv(indio_dev);
|
||||||
|
|
||||||
if (atomic_read(&temp_st->common_attributes.data_ready))
|
if (atomic_read(&temp_st->common_attributes.data_ready))
|
||||||
iio_push_to_buffers_with_timestamp(indio_dev,
|
iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan,
|
||||||
&temp_st->temperature_data,
|
|
||||||
iio_get_time_ns(indio_dev));
|
iio_get_time_ns(indio_dev));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -140,7 +142,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev,
|
|||||||
|
|
||||||
switch (usage_id) {
|
switch (usage_id) {
|
||||||
case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE:
|
case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE:
|
||||||
temp_st->temperature_data = *(s32 *)raw_data;
|
temp_st->scan.temperature_data = *(s32 *)raw_data;
|
||||||
return 0;
|
return 0;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
|
|
||||||
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
|
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
|
||||||
MODULE_DESCRIPTION("Joystick device interfaces");
|
MODULE_DESCRIPTION("Joystick device interfaces");
|
||||||
MODULE_SUPPORTED_DEVICE("input/js");
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
#define JOYDEV_MINOR_BASE 0
|
#define JOYDEV_MINOR_BASE 0
|
||||||
|
@ -2714,7 +2714,6 @@ static int __init early_amd_iommu_init(void)
|
|||||||
struct acpi_table_header *ivrs_base;
|
struct acpi_table_header *ivrs_base;
|
||||||
int i, remap_cache_sz, ret;
|
int i, remap_cache_sz, ret;
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
u32 pci_id;
|
|
||||||
|
|
||||||
if (!amd_iommu_detected)
|
if (!amd_iommu_detected)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@ -2804,16 +2803,6 @@ static int __init early_amd_iommu_init(void)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* Disable IOMMU if there's Stoney Ridge graphics */
|
|
||||||
for (i = 0; i < 32; i++) {
|
|
||||||
pci_id = read_pci_config(0, i, 0, 0);
|
|
||||||
if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
|
|
||||||
pr_info("Disable IOMMU on Stoney Ridge\n");
|
|
||||||
amd_iommu_disabled = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Disable any previously enabled IOMMUs */
|
/* Disable any previously enabled IOMMUs */
|
||||||
if (!is_kdump_kernel() || amd_iommu_disabled)
|
if (!is_kdump_kernel() || amd_iommu_disabled)
|
||||||
disable_iommus();
|
disable_iommus();
|
||||||
@ -2880,6 +2869,7 @@ static bool detect_ivrs(void)
|
|||||||
{
|
{
|
||||||
struct acpi_table_header *ivrs_base;
|
struct acpi_table_header *ivrs_base;
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
|
int i;
|
||||||
|
|
||||||
status = acpi_get_table("IVRS", 0, &ivrs_base);
|
status = acpi_get_table("IVRS", 0, &ivrs_base);
|
||||||
if (status == AE_NOT_FOUND)
|
if (status == AE_NOT_FOUND)
|
||||||
@ -2892,6 +2882,17 @@ static bool detect_ivrs(void)
|
|||||||
|
|
||||||
acpi_put_table(ivrs_base);
|
acpi_put_table(ivrs_base);
|
||||||
|
|
||||||
|
/* Don't use IOMMU if there is Stoney Ridge graphics */
|
||||||
|
for (i = 0; i < 32; i++) {
|
||||||
|
u32 pci_id;
|
||||||
|
|
||||||
|
pci_id = read_pci_config(0, i, 0, 0);
|
||||||
|
if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
|
||||||
|
pr_info("Disable IOMMU on Stoney Ridge\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Make sure ACS will be enabled during PCI probe */
|
/* Make sure ACS will be enabled during PCI probe */
|
||||||
pci_request_acs();
|
pci_request_acs();
|
||||||
|
|
||||||
@ -2918,12 +2919,12 @@ static int __init state_next(void)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IOMMU_IVRS_DETECTED:
|
case IOMMU_IVRS_DETECTED:
|
||||||
ret = early_amd_iommu_init();
|
if (amd_iommu_disabled) {
|
||||||
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
|
|
||||||
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
|
|
||||||
pr_info("AMD IOMMU disabled\n");
|
|
||||||
init_state = IOMMU_CMDLINE_DISABLED;
|
init_state = IOMMU_CMDLINE_DISABLED;
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
} else {
|
||||||
|
ret = early_amd_iommu_init();
|
||||||
|
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IOMMU_ACPI_FINISHED:
|
case IOMMU_ACPI_FINISHED:
|
||||||
@ -3001,8 +3002,11 @@ int __init amd_iommu_prepare(void)
|
|||||||
amd_iommu_irq_remap = true;
|
amd_iommu_irq_remap = true;
|
||||||
|
|
||||||
ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
|
ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
amd_iommu_irq_remap = false;
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return amd_iommu_irq_remap ? 0 : -ENODEV;
|
return amd_iommu_irq_remap ? 0 : -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -849,12 +849,11 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
|
|||||||
smmu = tegra_smmu_find(args.np);
|
smmu = tegra_smmu_find(args.np);
|
||||||
if (smmu) {
|
if (smmu) {
|
||||||
err = tegra_smmu_configure(smmu, dev, &args);
|
err = tegra_smmu_configure(smmu, dev, &args);
|
||||||
|
|
||||||
|
if (err < 0) {
|
||||||
of_node_put(args.np);
|
of_node_put(args.np);
|
||||||
|
|
||||||
if (err < 0)
|
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
}
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
of_node_put(args.np);
|
of_node_put(args.np);
|
||||||
|
@ -430,4 +430,3 @@ MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>");
|
|||||||
MODULE_AUTHOR("Ben Backx <ben@bbackx.com>");
|
MODULE_AUTHOR("Ben Backx <ben@bbackx.com>");
|
||||||
MODULE_DESCRIPTION("FireDTV DVB Driver");
|
MODULE_DESCRIPTION("FireDTV DVB Driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_SUPPORTED_DEVICE("FireDTV DVB");
|
|
||||||
|
@ -41,7 +41,6 @@ MODULE_PARM_DESC(debug,
|
|||||||
|
|
||||||
MODULE_AUTHOR("Andy Walls");
|
MODULE_AUTHOR("Andy Walls");
|
||||||
MODULE_DESCRIPTION("CX23418 ALSA Interface");
|
MODULE_DESCRIPTION("CX23418 ALSA Interface");
|
||||||
MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder");
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
MODULE_VERSION(CX18_VERSION);
|
MODULE_VERSION(CX18_VERSION);
|
||||||
|
@ -232,7 +232,6 @@ MODULE_PARM_DESC(cx18_first_minor,
|
|||||||
|
|
||||||
MODULE_AUTHOR("Hans Verkuil");
|
MODULE_AUTHOR("Hans Verkuil");
|
||||||
MODULE_DESCRIPTION("CX23418 driver");
|
MODULE_DESCRIPTION("CX23418 driver");
|
||||||
MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder");
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
MODULE_VERSION(CX18_VERSION);
|
MODULE_VERSION(CX18_VERSION);
|
||||||
|
@ -104,7 +104,6 @@ MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s).");
|
|||||||
MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards");
|
MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards");
|
||||||
MODULE_AUTHOR("Hiep Huynh");
|
MODULE_AUTHOR("Hiep Huynh");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); /* "{{Conexant,23881}," */
|
|
||||||
|
|
||||||
static unsigned int debug;
|
static unsigned int debug;
|
||||||
module_param(debug, int, 0644);
|
module_param(debug, int, 0644);
|
||||||
|
@ -98,7 +98,6 @@ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
|
|||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_VERSION(CX88_VERSION);
|
MODULE_VERSION(CX88_VERSION);
|
||||||
|
|
||||||
MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}");
|
|
||||||
static unsigned int debug;
|
static unsigned int debug;
|
||||||
module_param(debug, int, 0644);
|
module_param(debug, int, 0644);
|
||||||
MODULE_PARM_DESC(debug, "enable debug messages");
|
MODULE_PARM_DESC(debug, "enable debug messages");
|
||||||
|
@ -38,7 +38,6 @@ MODULE_PARM_DESC(index,
|
|||||||
|
|
||||||
MODULE_AUTHOR("Andy Walls");
|
MODULE_AUTHOR("Andy Walls");
|
||||||
MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface");
|
MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface");
|
||||||
MODULE_SUPPORTED_DEVICE("CX23415/CX23416 MPEG2 encoder");
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
MODULE_VERSION(IVTV_VERSION);
|
MODULE_VERSION(IVTV_VERSION);
|
||||||
|
@ -275,9 +275,6 @@ MODULE_PARM_DESC(ivtv_first_minor, "Set device node number assigned to first car
|
|||||||
|
|
||||||
MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil");
|
MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil");
|
||||||
MODULE_DESCRIPTION("CX23415/CX23416 driver");
|
MODULE_DESCRIPTION("CX23415/CX23416 driver");
|
||||||
MODULE_SUPPORTED_DEVICE
|
|
||||||
("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n"
|
|
||||||
"\t\t\tYuan MPG series and similar)");
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
MODULE_VERSION(IVTV_VERSION);
|
MODULE_VERSION(IVTV_VERSION);
|
||||||
|
@ -1269,6 +1269,5 @@ late_initcall_sync(sta2x11_vip_init_module);
|
|||||||
MODULE_DESCRIPTION("STA2X11 Video Input Port driver");
|
MODULE_DESCRIPTION("STA2X11 Video Input Port driver");
|
||||||
MODULE_AUTHOR("Wind River");
|
MODULE_AUTHOR("Wind River");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_SUPPORTED_DEVICE("sta2x11 video input");
|
|
||||||
MODULE_VERSION(DRV_VERSION);
|
MODULE_VERSION(DRV_VERSION);
|
||||||
MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
|
MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
|
||||||
|
@ -1363,4 +1363,3 @@ module_platform_driver(atmel_isi_driver);
|
|||||||
MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
|
MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
|
||||||
MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
|
MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_SUPPORTED_DEVICE("video");
|
|
||||||
|
@ -330,4 +330,3 @@ module_platform_driver(atmel_isc_driver);
|
|||||||
MODULE_AUTHOR("Songjun Wu");
|
MODULE_AUTHOR("Songjun Wu");
|
||||||
MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
|
MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_SUPPORTED_DEVICE("video");
|
|
||||||
|
@ -44,10 +44,6 @@
|
|||||||
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
|
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
|
||||||
MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
|
MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_SUPPORTED_DEVICE("Video");
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
struct cafe_camera {
|
struct cafe_camera {
|
||||||
int registered; /* Fully initialized? */
|
int registered; /* Fully initialized? */
|
||||||
|
@ -2149,4 +2149,3 @@ MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
|
|||||||
MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
|
MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
|
||||||
MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
|
MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_SUPPORTED_DEVICE("video");
|
|
||||||
|
@ -56,7 +56,6 @@ MODULE_PARM_DESC(flicker_mode, "Flicker frequency (0 (disabled), " __stringify(5
|
|||||||
|
|
||||||
MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
|
MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
|
||||||
MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
|
MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
|
||||||
MODULE_SUPPORTED_DEVICE("video");
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_VERSION(CPIA_VERSION);
|
MODULE_VERSION(CPIA_VERSION);
|
||||||
|
|
||||||
|
@ -51,7 +51,6 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
|
|||||||
MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
|
MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
|
||||||
MODULE_AUTHOR("Mauro Carvalho Chehab");
|
MODULE_AUTHOR("Mauro Carvalho Chehab");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}");
|
|
||||||
static unsigned int debug;
|
static unsigned int debug;
|
||||||
module_param(debug, int, 0644);
|
module_param(debug, int, 0644);
|
||||||
MODULE_PARM_DESC(debug, "enable debug messages");
|
MODULE_PARM_DESC(debug, "enable debug messages");
|
||||||
|
@ -23,8 +23,6 @@ MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV ca
|
|||||||
MODULE_AUTHOR("Mauro Carvalho Chehab");
|
MODULE_AUTHOR("Mauro Carvalho Chehab");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}");
|
|
||||||
|
|
||||||
static int debug;
|
static int debug;
|
||||||
|
|
||||||
module_param(debug, int, 0644);
|
module_param(debug, int, 0644);
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
|
|
||||||
MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
|
MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
|
||||||
MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
|
MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
|
||||||
MODULE_SUPPORTED_DEVICE(DRIVER_NAME);
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_VERSION("2.1");
|
MODULE_VERSION("2.1");
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
|
|
||||||
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
|
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
|
||||||
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
|
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
|
||||||
MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
||||||
#define PCIEFD_DRV_NAME "peak_pciefd"
|
#define PCIEFD_DRV_NAME "peak_pciefd"
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
|
|
||||||
MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
|
MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
|
||||||
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
|
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
|
||||||
MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card");
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
||||||
#define EMS_PCI_V1_MAX_CHAN 2
|
#define EMS_PCI_V1_MAX_CHAN 2
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
|
|
||||||
MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>");
|
MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>");
|
||||||
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards");
|
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards");
|
||||||
MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card");
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
||||||
#define EMS_PCMCIA_MAX_CHAN 2
|
#define EMS_PCMCIA_MAX_CHAN 2
|
||||||
|
@ -33,7 +33,6 @@
|
|||||||
|
|
||||||
MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>");
|
MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>");
|
||||||
MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards");
|
MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards");
|
||||||
MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card");
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
||||||
#define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */
|
#define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */
|
||||||
|
@ -24,8 +24,6 @@
|
|||||||
|
|
||||||
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
|
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
|
||||||
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
|
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
|
||||||
MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards");
|
|
||||||
MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards");
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
||||||
#define DRV_NAME "peak_pci"
|
#define DRV_NAME "peak_pci"
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
|
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
|
||||||
MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
|
MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card");
|
|
||||||
|
|
||||||
/* PEAK-System PCMCIA driver name */
|
/* PEAK-System PCMCIA driver name */
|
||||||
#define PCC_NAME "peak_pcmcia"
|
#define PCC_NAME "peak_pcmcia"
|
||||||
|
@ -25,18 +25,6 @@
|
|||||||
MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
|
MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
|
||||||
MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
|
MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
|
||||||
"the SJA1000 chips");
|
"the SJA1000 chips");
|
||||||
MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
|
|
||||||
"Adlink PCI-7841/cPCI-7841 SE, "
|
|
||||||
"Marathon CAN-bus-PCI, "
|
|
||||||
"Marathon CAN-bus-PCIe, "
|
|
||||||
"TEWS TECHNOLOGIES TPMC810, "
|
|
||||||
"esd CAN-PCI/CPCI/PCI104/200, "
|
|
||||||
"esd CAN-PCI/PMC/266, "
|
|
||||||
"esd CAN-PCIe/2000, "
|
|
||||||
"Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
|
|
||||||
"IXXAT PC-I 04/PCI, "
|
|
||||||
"ELCUS CAN-200-PCI, "
|
|
||||||
"ASEM DUAL CAN-RAW")
|
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
||||||
#define PLX_PCI_MAX_CHAN 2
|
#define PLX_PCI_MAX_CHAN 2
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user