mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
837e8ac871
@ -535,6 +535,7 @@ ForEachMacros:
|
||||
- 'perf_hpp_list__for_each_sort_list_safe'
|
||||
- 'perf_pmu__for_each_hybrid_pmu'
|
||||
- 'ping_portaddr_for_each_entry'
|
||||
- 'ping_portaddr_for_each_entry_rcu'
|
||||
- 'plist_for_each'
|
||||
- 'plist_for_each_continue'
|
||||
- 'plist_for_each_entry'
|
||||
|
@ -148,7 +148,7 @@ allOf:
|
||||
items:
|
||||
- const: oscclk
|
||||
- const: dout_clkcmu_fsys1_bus
|
||||
- const: dout_clkcmu_fsys1_mmc_card
|
||||
- const: gout_clkcmu_fsys1_mmc_card
|
||||
- const: dout_clkcmu_fsys1_usbdrd
|
||||
|
||||
- if:
|
||||
|
42
Documentation/loongarch/booting.rst
Normal file
42
Documentation/loongarch/booting.rst
Normal file
@ -0,0 +1,42 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=======================
|
||||
Booting Linux/LoongArch
|
||||
=======================
|
||||
|
||||
:Author: Yanteng Si <siyanteng@loongson.cn>
|
||||
:Date: 18 Nov 2022
|
||||
|
||||
Information passed from BootLoader to kernel
|
||||
============================================
|
||||
|
||||
LoongArch supports ACPI and FDT. The information that needs to be passed
|
||||
to the kernel includes the memmap, the initrd, the command line, optionally
|
||||
the ACPI/FDT tables, and so on.
|
||||
|
||||
The kernel is passed the following arguments on `kernel_entry` :
|
||||
|
||||
- a0 = efi_boot: `efi_boot` is a flag indicating whether
|
||||
this boot environment is fully UEFI-compliant.
|
||||
|
||||
- a1 = cmdline: `cmdline` is a pointer to the kernel command line.
|
||||
|
||||
- a2 = systemtable: `systemtable` points to the EFI system table.
|
||||
All pointers involved at this stage are in physical addresses.
|
||||
|
||||
Header of Linux/LoongArch kernel images
|
||||
=======================================
|
||||
|
||||
Linux/LoongArch kernel images are EFI images. Being PE files, they have
|
||||
a 64-byte header structured like::
|
||||
|
||||
u32 MZ_MAGIC /* "MZ", MS-DOS header */
|
||||
u32 res0 = 0 /* Reserved */
|
||||
u64 kernel_entry /* Kernel entry point */
|
||||
u64 _end - _text /* Kernel image effective size */
|
||||
u64 load_offset /* Kernel image load offset from start of RAM */
|
||||
u64 res1 = 0 /* Reserved */
|
||||
u64 res2 = 0 /* Reserved */
|
||||
u64 res3 = 0 /* Reserved */
|
||||
u32 LINUX_PE_MAGIC /* Magic number */
|
||||
u32 pe_header - _head /* Offset to the PE header */
|
@ -9,6 +9,7 @@ LoongArch Architecture
|
||||
:numbered:
|
||||
|
||||
introduction
|
||||
booting
|
||||
irq-chip-model
|
||||
|
||||
features
|
||||
|
48
Documentation/translations/zh_CN/loongarch/booting.rst
Normal file
48
Documentation/translations/zh_CN/loongarch/booting.rst
Normal file
@ -0,0 +1,48 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
.. include:: ../disclaimer-zh_CN.rst
|
||||
|
||||
:Original: Documentation/loongarch/booting.rst
|
||||
|
||||
:翻译:
|
||||
|
||||
司延腾 Yanteng Si <siyanteng@loongson.cn>
|
||||
|
||||
====================
|
||||
启动 Linux/LoongArch
|
||||
====================
|
||||
|
||||
:作者: 司延腾 <siyanteng@loongson.cn>
|
||||
:日期: 2022年11月18日
|
||||
|
||||
BootLoader传递给内核的信息
|
||||
==========================
|
||||
|
||||
LoongArch支持ACPI和FDT启动,需要传递给内核的信息包括memmap、initrd、cmdline、可
|
||||
选的ACPI/FDT表等。
|
||||
|
||||
内核在 `kernel_entry` 入口处被传递以下参数:
|
||||
|
||||
- a0 = efi_boot: `efi_boot` 是一个标志,表示这个启动环境是否完全符合UEFI
|
||||
的要求。
|
||||
|
||||
- a1 = cmdline: `cmdline` 是一个指向内核命令行的指针。
|
||||
|
||||
- a2 = systemtable: `systemtable` 指向EFI的系统表,在这个阶段涉及的所有
|
||||
指针都是物理地址。
|
||||
|
||||
Linux/LoongArch内核镜像文件头
|
||||
=============================
|
||||
|
||||
内核镜像是EFI镜像。作为PE文件,它们有一个64字节的头部结构体,如下所示::
|
||||
|
||||
u32 MZ_MAGIC /* "MZ", MS-DOS 头 */
|
||||
u32 res0 = 0 /* 保留 */
|
||||
u64 kernel_entry /* 内核入口点 */
|
||||
u64 _end - _text /* 内核镜像有效大小 */
|
||||
u64 load_offset /* 加载内核镜像相对内存起始地址的偏移量 */
|
||||
u64 res1 = 0 /* 保留 */
|
||||
u64 res2 = 0 /* 保留 */
|
||||
u64 res3 = 0 /* 保留 */
|
||||
u32 LINUX_PE_MAGIC /* 魔术数 */
|
||||
u32 pe_header - _head /* 到PE头的偏移量 */
|
@ -14,6 +14,7 @@ LoongArch体系结构
|
||||
:numbered:
|
||||
|
||||
introduction
|
||||
booting
|
||||
irq-chip-model
|
||||
|
||||
features
|
||||
|
@ -7213,14 +7213,13 @@ veto the transition.
|
||||
:Parameters: args[0] is the maximum poll time in nanoseconds
|
||||
:Returns: 0 on success; -1 on error
|
||||
|
||||
This capability overrides the kvm module parameter halt_poll_ns for the
|
||||
target VM.
|
||||
KVM_CAP_HALT_POLL overrides the kvm.halt_poll_ns module parameter to set the
|
||||
maximum halt-polling time for all vCPUs in the target VM. This capability can
|
||||
be invoked at any time and any number of times to dynamically change the
|
||||
maximum halt-polling time.
|
||||
|
||||
VCPU polling allows a VCPU to poll for wakeup events instead of immediately
|
||||
scheduling during guest halts. The maximum time a VCPU can spend polling is
|
||||
controlled by the kvm module parameter halt_poll_ns. This capability allows
|
||||
the maximum halt time to specified on a per-VM basis, effectively overriding
|
||||
the module parameter for the target VM.
|
||||
See Documentation/virt/kvm/halt-polling.rst for more information on halt
|
||||
polling.
|
||||
|
||||
7.21 KVM_CAP_X86_USER_SPACE_MSR
|
||||
-------------------------------
|
||||
|
@ -119,6 +119,19 @@ These module parameters can be set from the debugfs files in:
|
||||
Note: that these module parameters are system wide values and are not able to
|
||||
be tuned on a per vm basis.
|
||||
|
||||
Any changes to these parameters will be picked up by new and existing vCPUs the
|
||||
next time they halt, with the notable exception of VMs using KVM_CAP_HALT_POLL
|
||||
(see next section).
|
||||
|
||||
KVM_CAP_HALT_POLL
|
||||
=================
|
||||
|
||||
KVM_CAP_HALT_POLL is a VM capability that allows userspace to override halt_poll_ns
|
||||
on a per-VM basis. VMs using KVM_CAP_HALT_POLL ignore halt_poll_ns completely (but
|
||||
still obey halt_poll_ns_grow, halt_poll_ns_grow_start, and halt_poll_ns_shrink).
|
||||
|
||||
See Documentation/virt/kvm/api.rst for more information on this capability.
|
||||
|
||||
Further Notes
|
||||
=============
|
||||
|
@ -17,4 +17,5 @@ KVM
|
||||
|
||||
locking
|
||||
vcpu-requests
|
||||
halt-polling
|
||||
review-checklist
|
||||
|
@ -10,7 +10,6 @@ KVM for x86 systems
|
||||
amd-memory-encryption
|
||||
cpuid
|
||||
errata
|
||||
halt-polling
|
||||
hypercalls
|
||||
mmu
|
||||
msr
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -666,7 +666,7 @@
|
||||
compatible = "atmel,at91rm9200-udc";
|
||||
reg = <0xfffb0000 0x4000>;
|
||||
interrupts = <11 IRQ_TYPE_LEVEL_HIGH 2>;
|
||||
clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 2>;
|
||||
clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 1>;
|
||||
clock-names = "pclk", "hclk";
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -26,7 +26,7 @@ static void sama5_l2c310_write_sec(unsigned long val, unsigned reg)
|
||||
static void __init sama5_secure_cache_init(void)
|
||||
{
|
||||
sam_secure_init();
|
||||
if (sam_linux_is_optee_available())
|
||||
if (IS_ENABLED(CONFIG_OUTER_CACHE) && sam_linux_is_optee_available())
|
||||
outer_cache.write_sec = sama5_l2c310_write_sec;
|
||||
}
|
||||
|
||||
|
@ -14,16 +14,8 @@
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
extern void efi_init(void);
|
||||
|
||||
bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg);
|
||||
#else
|
||||
#define efi_init()
|
||||
|
||||
static inline
|
||||
bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <linux/linkage.h>
|
||||
|
||||
SYM_FUNC_START(__efi_rt_asm_wrapper)
|
||||
stp x29, x30, [sp, #-112]!
|
||||
stp x29, x30, [sp, #-32]!
|
||||
mov x29, sp
|
||||
|
||||
/*
|
||||
@ -16,20 +16,6 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
|
||||
*/
|
||||
stp x1, x18, [sp, #16]
|
||||
|
||||
/*
|
||||
* Preserve all callee saved registers and record the stack pointer
|
||||
* value in a per-CPU variable so we can recover from synchronous
|
||||
* exceptions occurring while running the firmware routines.
|
||||
*/
|
||||
stp x19, x20, [sp, #32]
|
||||
stp x21, x22, [sp, #48]
|
||||
stp x23, x24, [sp, #64]
|
||||
stp x25, x26, [sp, #80]
|
||||
stp x27, x28, [sp, #96]
|
||||
|
||||
adr_this_cpu x8, __efi_rt_asm_recover_sp, x9
|
||||
str x29, [x8]
|
||||
|
||||
/*
|
||||
* We are lucky enough that no EFI runtime services take more than
|
||||
* 5 arguments, so all are passed in registers rather than via the
|
||||
@ -45,7 +31,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
|
||||
|
||||
ldp x1, x2, [sp, #16]
|
||||
cmp x2, x18
|
||||
ldp x29, x30, [sp], #112
|
||||
ldp x29, x30, [sp], #32
|
||||
b.ne 0f
|
||||
ret
|
||||
0:
|
||||
@ -59,18 +45,3 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
|
||||
mov x18, x2
|
||||
b efi_handle_corrupted_x18 // tail call
|
||||
SYM_FUNC_END(__efi_rt_asm_wrapper)
|
||||
|
||||
SYM_FUNC_START(__efi_rt_asm_recover)
|
||||
ldr_this_cpu x8, __efi_rt_asm_recover_sp, x9
|
||||
mov sp, x8
|
||||
|
||||
ldp x0, x18, [sp, #16]
|
||||
ldp x19, x20, [sp, #32]
|
||||
ldp x21, x22, [sp, #48]
|
||||
ldp x23, x24, [sp, #64]
|
||||
ldp x25, x26, [sp, #80]
|
||||
ldp x27, x28, [sp, #96]
|
||||
ldp x29, x30, [sp], #112
|
||||
|
||||
b efi_handle_runtime_exception
|
||||
SYM_FUNC_END(__efi_rt_asm_recover)
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/efi.h>
|
||||
|
||||
@ -145,28 +144,3 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
|
||||
pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
|
||||
return s;
|
||||
}
|
||||
|
||||
asmlinkage DEFINE_PER_CPU(u64, __efi_rt_asm_recover_sp);
|
||||
|
||||
asmlinkage efi_status_t __efi_rt_asm_recover(void);
|
||||
|
||||
asmlinkage efi_status_t efi_handle_runtime_exception(const char *f)
|
||||
{
|
||||
pr_err(FW_BUG "Synchronous exception occurred in EFI runtime service %s()\n", f);
|
||||
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
||||
return EFI_ABORTED;
|
||||
}
|
||||
|
||||
bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
|
||||
{
|
||||
/* Check whether the exception occurred while running the firmware */
|
||||
if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
|
||||
return false;
|
||||
|
||||
pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
|
||||
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
|
||||
dump_stack();
|
||||
|
||||
regs->pc = (u64)__efi_rt_asm_recover;
|
||||
return true;
|
||||
}
|
||||
|
@ -36,7 +36,22 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
{
|
||||
unsigned long start = (unsigned long)page_address(page);
|
||||
|
||||
dcache_clean_poc(start, start + size);
|
||||
/*
|
||||
* The architecture only requires a clean to the PoC here in order to
|
||||
* meet the requirements of the DMA API. However, some vendors (i.e.
|
||||
* Qualcomm) abuse the DMA API for transferring buffers from the
|
||||
* non-secure to the secure world, resetting the system if a non-secure
|
||||
* access shows up after the buffer has been transferred:
|
||||
*
|
||||
* https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org
|
||||
*
|
||||
* Using clean+invalidate appears to make this issue less likely, but
|
||||
* the drivers themselves still need fixing as the CPU could issue a
|
||||
* speculative read from the buffer via the linear mapping irrespective
|
||||
* of the cache maintenance we use. Once the drivers are fixed, we can
|
||||
* relax this to a clean operation.
|
||||
*/
|
||||
dcache_clean_inval_poc(start, start + size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include <asm/bug.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/efi.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
@ -392,9 +391,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
|
||||
msg = "paging request";
|
||||
}
|
||||
|
||||
if (efi_runtime_fixup_exception(regs, msg))
|
||||
return;
|
||||
|
||||
die_kernel_fault(msg, addr, esr, regs);
|
||||
}
|
||||
|
||||
|
@ -490,6 +490,7 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
#define pmd_young pmd_young
|
||||
static inline int pmd_young(pmd_t pmd)
|
||||
{
|
||||
return !!(pmd_val(pmd) & _PAGE_ACCESSED);
|
||||
|
@ -78,16 +78,6 @@ extern void calculate_cpu_foreign_map(void);
|
||||
*/
|
||||
extern void show_ipi_list(struct seq_file *p, int prec);
|
||||
|
||||
/*
|
||||
* This function sends a 'reschedule' IPI to another CPU.
|
||||
* it goes straight through and wastes no time serializing
|
||||
* anything. Worst case is that we lose a reschedule ...
|
||||
*/
|
||||
static inline void smp_send_reschedule(int cpu)
|
||||
{
|
||||
loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
|
||||
}
|
||||
|
||||
static inline void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION);
|
||||
|
@ -149,6 +149,17 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||
ipi_write_action(cpu_logical_map(i), (u32)action);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function sends a 'reschedule' IPI to another CPU.
|
||||
* it goes straight through and wastes no time serializing
|
||||
* anything. Worst case is that we lose a reschedule ...
|
||||
*/
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_send_reschedule);
|
||||
|
||||
irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
|
||||
{
|
||||
unsigned int action;
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
#define INVTLB_ADDR_GFALSE_AND_ASID 5
|
||||
|
||||
#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
|
||||
#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
|
||||
#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
|
||||
@ -136,13 +138,10 @@ tlb_huge_update_load:
|
||||
ori t0, ra, _PAGE_VALID
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
tlbsrch
|
||||
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
tlbwr
|
||||
|
||||
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
|
||||
csrrd ra, LOONGARCH_CSR_ASID
|
||||
csrrd t1, LOONGARCH_CSR_BADV
|
||||
andi ra, ra, CSR_ASID_ASID
|
||||
invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
|
||||
|
||||
/*
|
||||
* A huge PTE describes an area the size of the
|
||||
@ -287,13 +286,11 @@ tlb_huge_update_store:
|
||||
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
tlbsrch
|
||||
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
|
||||
addi.d ra, t1, 0
|
||||
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
|
||||
tlbwr
|
||||
csrrd ra, LOONGARCH_CSR_ASID
|
||||
csrrd t1, LOONGARCH_CSR_BADV
|
||||
andi ra, ra, CSR_ASID_ASID
|
||||
invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
|
||||
|
||||
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
|
||||
/*
|
||||
* A huge PTE describes an area the size of the
|
||||
* configured huge page size. This is twice the
|
||||
@ -436,6 +433,11 @@ tlb_huge_update_modify:
|
||||
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
|
||||
st.d t0, t1, 0
|
||||
#endif
|
||||
csrrd ra, LOONGARCH_CSR_ASID
|
||||
csrrd t1, LOONGARCH_CSR_BADV
|
||||
andi ra, ra, CSR_ASID_ASID
|
||||
invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
|
||||
|
||||
/*
|
||||
* A huge PTE describes an area the size of the
|
||||
* configured huge page size. This is twice the
|
||||
@ -466,7 +468,7 @@ tlb_huge_update_modify:
|
||||
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
|
||||
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
|
||||
|
||||
tlbwr
|
||||
tlbfill
|
||||
|
||||
/* Reset default page size */
|
||||
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
|
||||
|
@ -622,6 +622,7 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
#define pmd_young pmd_young
|
||||
static inline int pmd_young(pmd_t pmd)
|
||||
{
|
||||
return !!(pmd_val(pmd) & _PAGE_ACCESSED);
|
||||
|
@ -602,6 +602,7 @@ ____##func(struct pt_regs *regs)
|
||||
/* kernel/traps.c */
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
DECLARE_INTERRUPT_HANDLER_RAW(machine_check_early_boot);
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
|
||||
#endif
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
|
||||
|
@ -113,23 +113,19 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Initialize tail_call_cnt, to be skipped if we do tail calls. */
|
||||
EMIT(PPC_RAW_LI(_R4, 0));
|
||||
|
||||
#define BPF_TAILCALL_PROLOGUE_SIZE 4
|
||||
|
||||
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
|
||||
|
||||
if (ctx->seen & SEEN_TAILCALL)
|
||||
EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
|
||||
|
||||
/* First arg comes in as a 32 bits pointer. */
|
||||
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
|
||||
EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
|
||||
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
|
||||
|
||||
/*
|
||||
* Initialize tail_call_cnt in stack frame if we do tail calls.
|
||||
* Otherwise, put in NOPs so that it can be skipped when we are
|
||||
* invoked through a tail call.
|
||||
*/
|
||||
if (ctx->seen & SEEN_TAILCALL)
|
||||
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_1) - 1, _R1,
|
||||
bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
|
||||
else
|
||||
EMIT(PPC_RAW_NOP());
|
||||
|
||||
#define BPF_TAILCALL_PROLOGUE_SIZE 16
|
||||
|
||||
/*
|
||||
* We need a stack frame, but we don't necessarily need to
|
||||
@ -170,6 +166,16 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
|
||||
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
|
||||
if (bpf_is_seen_register(ctx, i))
|
||||
EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
|
||||
|
||||
if (ctx->seen & SEEN_FUNC)
|
||||
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
|
||||
|
||||
/* Tear down our stack frame */
|
||||
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
|
||||
|
||||
if (ctx->seen & SEEN_FUNC)
|
||||
EMIT(PPC_RAW_MTLR(_R0));
|
||||
|
||||
}
|
||||
|
||||
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
|
||||
@ -178,16 +184,6 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
|
||||
|
||||
bpf_jit_emit_common_epilogue(image, ctx);
|
||||
|
||||
/* Tear down our stack frame */
|
||||
|
||||
if (ctx->seen & SEEN_FUNC)
|
||||
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
|
||||
|
||||
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
|
||||
|
||||
if (ctx->seen & SEEN_FUNC)
|
||||
EMIT(PPC_RAW_MTLR(_R0));
|
||||
|
||||
EMIT(PPC_RAW_BLR());
|
||||
}
|
||||
|
||||
@ -244,7 +240,6 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
|
||||
EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
|
||||
EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
|
||||
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
|
||||
EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
|
||||
|
||||
/*
|
||||
* if (prog == NULL)
|
||||
@ -255,19 +250,14 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
|
||||
|
||||
/* goto *(prog->bpf_func + prologue_size); */
|
||||
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
|
||||
|
||||
if (ctx->seen & SEEN_FUNC)
|
||||
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
|
||||
|
||||
EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
|
||||
|
||||
if (ctx->seen & SEEN_FUNC)
|
||||
EMIT(PPC_RAW_MTLR(_R0));
|
||||
|
||||
EMIT(PPC_RAW_MTCTR(_R3));
|
||||
|
||||
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
|
||||
|
||||
/* Put tail_call_cnt in r4 */
|
||||
EMIT(PPC_RAW_MR(_R4, _R0));
|
||||
|
||||
/* tear restore NVRs, ... */
|
||||
bpf_jit_emit_common_epilogue(image, ctx);
|
||||
|
||||
|
@ -317,9 +317,9 @@ config SMP
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-512)"
|
||||
depends on SMP
|
||||
range 2 512 if !SBI_V01
|
||||
range 2 32 if SBI_V01 && 32BIT
|
||||
range 2 64 if SBI_V01 && 64BIT
|
||||
range 2 512 if !RISCV_SBI_V01
|
||||
range 2 32 if RISCV_SBI_V01 && 32BIT
|
||||
range 2 64 if RISCV_SBI_V01 && 64BIT
|
||||
default "32" if 32BIT
|
||||
default "64" if 64BIT
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#define REG_L __REG_SEL(ld, lw)
|
||||
#define REG_S __REG_SEL(sd, sw)
|
||||
#define REG_SC __REG_SEL(sc.d, sc.w)
|
||||
#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
|
||||
#define REG_ASM __REG_SEL(.dword, .word)
|
||||
#define SZREG __REG_SEL(8, 4)
|
||||
#define LGREG __REG_SEL(3, 2)
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
extern void efi_init(void);
|
||||
@ -20,7 +21,10 @@ extern void efi_init(void);
|
||||
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
|
||||
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
|
||||
|
||||
#define arch_efi_call_virt_setup() efi_virtmap_load()
|
||||
#define arch_efi_call_virt_setup() ({ \
|
||||
sync_kernel_mappings(efi_mm.pgd); \
|
||||
efi_virtmap_load(); \
|
||||
})
|
||||
#define arch_efi_call_virt_teardown() efi_virtmap_unload()
|
||||
|
||||
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
|
||||
|
@ -127,6 +127,13 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
|
||||
#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
|
||||
#endif /* __PAGETABLE_PMD_FOLDED */
|
||||
|
||||
static inline void sync_kernel_mappings(pgd_t *pgd)
|
||||
{
|
||||
memcpy(pgd + USER_PTRS_PER_PGD,
|
||||
init_mm.pgd + USER_PTRS_PER_PGD,
|
||||
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
@ -135,9 +142,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
if (likely(pgd != NULL)) {
|
||||
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
||||
/* Copy kernel mappings */
|
||||
memcpy(pgd + USER_PTRS_PER_PGD,
|
||||
init_mm.pgd + USER_PTRS_PER_PGD,
|
||||
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
||||
sync_kernel_mappings(pgd);
|
||||
}
|
||||
return pgd;
|
||||
}
|
||||
|
@ -600,6 +600,7 @@ static inline int pmd_dirty(pmd_t pmd)
|
||||
return pte_dirty(pmd_pte(pmd));
|
||||
}
|
||||
|
||||
#define pmd_young pmd_young
|
||||
static inline int pmd_young(pmd_t pmd)
|
||||
{
|
||||
return pte_young(pmd_pte(pmd));
|
||||
|
@ -50,6 +50,9 @@ void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
|
||||
/* Clear IPI for current CPU */
|
||||
void riscv_clear_ipi(void);
|
||||
|
||||
/* Check other CPUs stop or not */
|
||||
bool smp_crash_stop_failed(void);
|
||||
|
||||
/* Secondary hart entry */
|
||||
asmlinkage void smp_callin(void);
|
||||
|
||||
|
@ -404,6 +404,19 @@ handle_syscall_trace_exit:
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
handle_kernel_stack_overflow:
|
||||
/*
|
||||
* Takes the psuedo-spinlock for the shadow stack, in case multiple
|
||||
* harts are concurrently overflowing their kernel stacks. We could
|
||||
* store any value here, but since we're overflowing the kernel stack
|
||||
* already we only have SP to use as a scratch register. So we just
|
||||
* swap in the address of the spinlock, as that's definately non-zero.
|
||||
*
|
||||
* Pairs with a store_release in handle_bad_stack().
|
||||
*/
|
||||
1: la sp, spin_shadow_stack
|
||||
REG_AMOSWAP_AQ sp, sp, (sp)
|
||||
bnez sp, 1b
|
||||
|
||||
la sp, shadow_stack
|
||||
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
|
||||
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <linux/compiler.h> /* For unreachable() */
|
||||
#include <linux/cpu.h> /* For cpu_down() */
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
/*
|
||||
* kexec_image_info - Print received image details
|
||||
@ -138,20 +140,35 @@ void machine_shutdown(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Override the weak function in kernel/panic.c */
|
||||
void crash_smp_send_stop(void)
|
||||
static void machine_kexec_mask_interrupts(void)
|
||||
{
|
||||
static int cpus_stopped;
|
||||
unsigned int i;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/*
|
||||
* This function can be called twice in panic path, but obviously
|
||||
* we execute this only once.
|
||||
*/
|
||||
if (cpus_stopped)
|
||||
return;
|
||||
for_each_irq_desc(i, desc) {
|
||||
struct irq_chip *chip;
|
||||
int ret;
|
||||
|
||||
smp_send_stop();
|
||||
cpus_stopped = 1;
|
||||
chip = irq_desc_get_chip(desc);
|
||||
if (!chip)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* First try to remove the active state. If this
|
||||
* fails, try to EOI the interrupt.
|
||||
*/
|
||||
ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
|
||||
|
||||
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
|
||||
chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
|
||||
if (chip->irq_mask)
|
||||
chip->irq_mask(&desc->irq_data);
|
||||
|
||||
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
|
||||
chip->irq_disable(&desc->irq_data);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -169,6 +186,8 @@ machine_crash_shutdown(struct pt_regs *regs)
|
||||
crash_smp_send_stop();
|
||||
|
||||
crash_save_cpu(regs, smp_processor_id());
|
||||
machine_kexec_mask_interrupts();
|
||||
|
||||
pr_info("Starting crashdump kernel...\n");
|
||||
}
|
||||
|
||||
@ -195,6 +214,11 @@ machine_kexec(struct kimage *image)
|
||||
void *control_code_buffer = page_address(image->control_code_page);
|
||||
riscv_kexec_method kexec_method = NULL;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
WARN(smp_crash_stop_failed(),
|
||||
"Some CPUs may be stale, kdump will be unreliable.\n");
|
||||
#endif
|
||||
|
||||
if (image->type != KEXEC_TYPE_CRASH)
|
||||
kexec_method = control_code_buffer;
|
||||
else
|
||||
|
@ -322,10 +322,11 @@ subsys_initcall(topology_init);
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
|
||||
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
|
||||
IS_ENABLED(CONFIG_64BIT) ?
|
||||
set_memory_rw : set_memory_rw_nx);
|
||||
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
|
||||
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx);
|
||||
if (IS_ENABLED(CONFIG_64BIT))
|
||||
set_kernel_memory(__init_begin, __init_end, set_memory_nx);
|
||||
}
|
||||
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/sched.h>
|
||||
@ -22,11 +23,13 @@
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
|
||||
enum ipi_message_type {
|
||||
IPI_RESCHEDULE,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CPU_STOP,
|
||||
IPI_CPU_CRASH_STOP,
|
||||
IPI_IRQ_WORK,
|
||||
IPI_TIMER,
|
||||
IPI_MAX
|
||||
@ -71,6 +74,32 @@ static void ipi_stop(void)
|
||||
wait_for_interrupt();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
|
||||
|
||||
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
|
||||
{
|
||||
crash_save_cpu(regs, cpu);
|
||||
|
||||
atomic_dec(&waiting_for_crash_ipi);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpu_has_hotplug(cpu))
|
||||
cpu_ops[cpu]->cpu_stop();
|
||||
#endif
|
||||
|
||||
for(;;)
|
||||
wait_for_interrupt();
|
||||
}
|
||||
#else
|
||||
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
|
||||
{
|
||||
unreachable();
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
|
||||
|
||||
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
|
||||
@ -124,8 +153,9 @@ void arch_irq_work_raise(void)
|
||||
|
||||
void handle_IPI(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
|
||||
unsigned long *stats = ipi_data[smp_processor_id()].stats;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long *pending_ipis = &ipi_data[cpu].bits;
|
||||
unsigned long *stats = ipi_data[cpu].stats;
|
||||
|
||||
riscv_clear_ipi();
|
||||
|
||||
@ -154,6 +184,10 @@ void handle_IPI(struct pt_regs *regs)
|
||||
ipi_stop();
|
||||
}
|
||||
|
||||
if (ops & (1 << IPI_CPU_CRASH_STOP)) {
|
||||
ipi_cpu_crash_stop(cpu, get_irq_regs());
|
||||
}
|
||||
|
||||
if (ops & (1 << IPI_IRQ_WORK)) {
|
||||
stats[IPI_IRQ_WORK]++;
|
||||
irq_work_run();
|
||||
@ -176,6 +210,7 @@ static const char * const ipi_names[] = {
|
||||
[IPI_RESCHEDULE] = "Rescheduling interrupts",
|
||||
[IPI_CALL_FUNC] = "Function call interrupts",
|
||||
[IPI_CPU_STOP] = "CPU stop interrupts",
|
||||
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
|
||||
[IPI_IRQ_WORK] = "IRQ work interrupts",
|
||||
[IPI_TIMER] = "Timer broadcast interrupts",
|
||||
};
|
||||
@ -235,6 +270,64 @@ void smp_send_stop(void)
|
||||
cpumask_pr_args(cpu_online_mask));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
/*
|
||||
* The number of CPUs online, not counting this CPU (which may not be
|
||||
* fully online and so not counted in num_online_cpus()).
|
||||
*/
|
||||
static inline unsigned int num_other_online_cpus(void)
|
||||
{
|
||||
unsigned int this_cpu_online = cpu_online(smp_processor_id());
|
||||
|
||||
return num_online_cpus() - this_cpu_online;
|
||||
}
|
||||
|
||||
void crash_smp_send_stop(void)
|
||||
{
|
||||
static int cpus_stopped;
|
||||
cpumask_t mask;
|
||||
unsigned long timeout;
|
||||
|
||||
/*
|
||||
* This function can be called twice in panic path, but obviously
|
||||
* we execute this only once.
|
||||
*/
|
||||
if (cpus_stopped)
|
||||
return;
|
||||
|
||||
cpus_stopped = 1;
|
||||
|
||||
/*
|
||||
* If this cpu is the only one alive at this point in time, online or
|
||||
* not, there are no stop messages to be sent around, so just back out.
|
||||
*/
|
||||
if (num_other_online_cpus() == 0)
|
||||
return;
|
||||
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
|
||||
|
||||
pr_crit("SMP: stopping secondary CPUs\n");
|
||||
send_ipi_mask(&mask, IPI_CPU_CRASH_STOP);
|
||||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
timeout = USEC_PER_SEC;
|
||||
while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
|
||||
udelay(1);
|
||||
|
||||
if (atomic_read(&waiting_for_crash_ipi) > 0)
|
||||
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
|
||||
cpumask_pr_args(&mask));
|
||||
}
|
||||
|
||||
bool smp_crash_stop_failed(void)
|
||||
{
|
||||
return (atomic_read(&waiting_for_crash_ipi) > 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
send_ipi_single(cpu, IPI_RESCHEDULE);
|
||||
|
@ -221,11 +221,29 @@ asmlinkage unsigned long get_overflow_stack(void)
|
||||
OVERFLOW_STACK_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* A pseudo spinlock to protect the shadow stack from being used by multiple
|
||||
* harts concurrently. This isn't a real spinlock because the lock side must
|
||||
* be taken without a valid stack and only a single register, it's only taken
|
||||
* while in the process of panicing anyway so the performance and error
|
||||
* checking a proper spinlock gives us doesn't matter.
|
||||
*/
|
||||
unsigned long spin_shadow_stack;
|
||||
|
||||
asmlinkage void handle_bad_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long tsk_stk = (unsigned long)current->stack;
|
||||
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
|
||||
|
||||
/*
|
||||
* We're done with the shadow stack by this point, as we're on the
|
||||
* overflow stack. Tell any other concurrent overflowing harts that
|
||||
* they can proceed with panicing by releasing the pseudo-spinlock.
|
||||
*
|
||||
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
|
||||
*/
|
||||
smp_store_release(&spin_shadow_stack, 0);
|
||||
|
||||
console_verbose();
|
||||
|
||||
pr_emerg("Insufficient stack space to handle exception!\n");
|
||||
|
@ -17,6 +17,7 @@ vdso-syms += flush_icache
|
||||
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
|
||||
|
||||
ccflags-y := -fno-stack-protector
|
||||
ccflags-y += -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
ifneq ($(c-gettimeofday-y),)
|
||||
CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
|
||||
|
@ -763,6 +763,7 @@ static inline int pmd_dirty(pmd_t pmd)
|
||||
return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
|
||||
}
|
||||
|
||||
#define pmd_young pmd_young
|
||||
static inline int pmd_young(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
|
||||
|
@ -546,8 +546,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
|
||||
scb_s->eca |= scb_o->eca & ECA_CEI;
|
||||
/* Epoch Extension */
|
||||
if (test_kvm_facility(vcpu->kvm, 139))
|
||||
if (test_kvm_facility(vcpu->kvm, 139)) {
|
||||
scb_s->ecd |= scb_o->ecd & ECD_MEF;
|
||||
scb_s->epdx = scb_o->epdx;
|
||||
}
|
||||
|
||||
/* etoken */
|
||||
if (test_kvm_facility(vcpu->kvm, 156))
|
||||
|
@ -693,6 +693,7 @@ static inline unsigned long pmd_dirty(pmd_t pmd)
|
||||
return pte_dirty(pte);
|
||||
}
|
||||
|
||||
#define pmd_young pmd_young
|
||||
static inline unsigned long pmd_young(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
@ -321,7 +321,7 @@ static inline void indirect_branch_prediction_barrier(void)
|
||||
/* The Intel SPEC CTRL MSR base value cache */
|
||||
extern u64 x86_spec_ctrl_base;
|
||||
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
extern void write_spec_ctrl_current(u64 val, bool force);
|
||||
extern void update_spec_ctrl_cond(u64 val);
|
||||
extern u64 spec_ctrl_current(void);
|
||||
|
||||
/*
|
||||
|
@ -139,6 +139,7 @@ static inline int pmd_dirty(pmd_t pmd)
|
||||
return pmd_flags(pmd) & _PAGE_DIRTY;
|
||||
}
|
||||
|
||||
#define pmd_young pmd_young
|
||||
static inline int pmd_young(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_ACCESSED;
|
||||
@ -1438,6 +1439,14 @@ static inline bool arch_has_hw_pte_young(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
|
||||
static inline bool arch_has_hw_nonleaf_pmd_young(void)
|
||||
{
|
||||
return !cpu_feature_enabled(X86_FEATURE_XENPV);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PAGE_TABLE_CHECK
|
||||
static inline bool pte_user_accessible_page(pte_t pte)
|
||||
{
|
||||
|
@ -60,11 +60,18 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
static void update_spec_ctrl(u64 val)
|
||||
{
|
||||
this_cpu_write(x86_spec_ctrl_current, val);
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
|
||||
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
|
||||
*/
|
||||
void write_spec_ctrl_current(u64 val, bool force)
|
||||
void update_spec_ctrl_cond(u64 val)
|
||||
{
|
||||
if (this_cpu_read(x86_spec_ctrl_current) == val)
|
||||
return;
|
||||
@ -75,7 +82,7 @@ void write_spec_ctrl_current(u64 val, bool force)
|
||||
* When KERNEL_IBRS this MSR is written on return-to-user, unless
|
||||
* forced the update can be delayed until that time.
|
||||
*/
|
||||
if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
|
||||
if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
||||
}
|
||||
|
||||
@ -1328,7 +1335,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
|
||||
|
||||
if (ia32_cap & ARCH_CAP_RRSBA) {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1450,7 +1457,7 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
|
||||
if (spectre_v2_in_ibrs_mode(mode)) {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
@ -1564,7 +1571,7 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
static void update_stibp_msr(void * __unused)
|
||||
{
|
||||
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
|
||||
write_spec_ctrl_current(val, true);
|
||||
update_spec_ctrl(val);
|
||||
}
|
||||
|
||||
/* Update x86_spec_ctrl_base in case SMT state changed. */
|
||||
@ -1797,7 +1804,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
x86_amd_ssb_disable();
|
||||
} else {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2048,7 +2055,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
void x86_spec_ctrl_setup_ap(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
||||
x86_amd_ssb_disable();
|
||||
|
@ -600,7 +600,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
||||
}
|
||||
|
||||
if (updmsr)
|
||||
write_spec_ctrl_current(msr, false);
|
||||
update_spec_ctrl_cond(msr);
|
||||
}
|
||||
|
||||
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
||||
|
@ -10574,8 +10574,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
|
||||
vcpu->mmio_needed = 0;
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
|
||||
/* Page is swapped out. Do synthetic halt */
|
||||
|
@ -562,17 +562,26 @@ static int initiator_cmp(void *priv, const struct list_head *a,
|
||||
{
|
||||
struct memory_initiator *ia;
|
||||
struct memory_initiator *ib;
|
||||
unsigned long *p_nodes = priv;
|
||||
|
||||
ia = list_entry(a, struct memory_initiator, node);
|
||||
ib = list_entry(b, struct memory_initiator, node);
|
||||
|
||||
set_bit(ia->processor_pxm, p_nodes);
|
||||
set_bit(ib->processor_pxm, p_nodes);
|
||||
|
||||
return ia->processor_pxm - ib->processor_pxm;
|
||||
}
|
||||
|
||||
static int initiators_to_nodemask(unsigned long *p_nodes)
|
||||
{
|
||||
struct memory_initiator *initiator;
|
||||
|
||||
if (list_empty(&initiators))
|
||||
return -ENXIO;
|
||||
|
||||
list_for_each_entry(initiator, &initiators, node)
|
||||
set_bit(initiator->processor_pxm, p_nodes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hmat_register_target_initiators(struct memory_target *target)
|
||||
{
|
||||
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
|
||||
@ -609,7 +618,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
|
||||
* initiators.
|
||||
*/
|
||||
bitmap_zero(p_nodes, MAX_NUMNODES);
|
||||
list_sort(p_nodes, &initiators, initiator_cmp);
|
||||
list_sort(NULL, &initiators, initiator_cmp);
|
||||
if (initiators_to_nodemask(p_nodes) < 0)
|
||||
return;
|
||||
|
||||
if (!access0done) {
|
||||
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
|
||||
loc = localities_types[i];
|
||||
@ -643,8 +655,9 @@ static void hmat_register_target_initiators(struct memory_target *target)
|
||||
|
||||
/* Access 1 ignores Generic Initiators */
|
||||
bitmap_zero(p_nodes, MAX_NUMNODES);
|
||||
list_sort(p_nodes, &initiators, initiator_cmp);
|
||||
best = 0;
|
||||
if (initiators_to_nodemask(p_nodes) < 0)
|
||||
return;
|
||||
|
||||
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
|
||||
loc = localities_types[i];
|
||||
if (!loc)
|
||||
|
@ -109,7 +109,7 @@ struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv, const char *con
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hpriv->n_clks; i++) {
|
||||
if (!strcmp(hpriv->clks[i].id, con_id))
|
||||
if (hpriv->clks[i].id && !strcmp(hpriv->clks[i].id, con_id))
|
||||
return hpriv->clks[i].clk;
|
||||
}
|
||||
|
||||
|
@ -2056,6 +2056,11 @@ static int btusb_setup_csr(struct hci_dev *hdev)
|
||||
|
||||
rp = (struct hci_rp_read_local_version *)skb->data;
|
||||
|
||||
bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x; LMP ver=%u subver=%04x; manufacturer=%u",
|
||||
le16_to_cpu(rp->hci_ver), le16_to_cpu(rp->hci_rev),
|
||||
le16_to_cpu(rp->lmp_ver), le16_to_cpu(rp->lmp_subver),
|
||||
le16_to_cpu(rp->manufacturer));
|
||||
|
||||
/* Detect a wide host of Chinese controllers that aren't CSR.
|
||||
*
|
||||
* Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891
|
||||
@ -2118,6 +2123,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
|
||||
* without these the controller will lock up.
|
||||
*/
|
||||
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
|
||||
|
||||
|
@ -401,13 +401,14 @@ int tpm_pm_suspend(struct device *dev)
|
||||
!pm_suspend_via_firmware())
|
||||
goto suspended;
|
||||
|
||||
if (!tpm_chip_start(chip)) {
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (!rc) {
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
tpm2_shutdown(chip, TPM2_SU_STATE);
|
||||
else
|
||||
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
|
||||
|
||||
tpm_chip_stop(chip);
|
||||
tpm_put_ops(chip);
|
||||
}
|
||||
|
||||
suspended:
|
||||
|
@ -40,7 +40,7 @@ static const struct clk_pll_characteristics rm9200_pll_characteristics = {
|
||||
};
|
||||
|
||||
static const struct sck at91rm9200_systemck[] = {
|
||||
{ .n = "udpck", .p = "usbck", .id = 2 },
|
||||
{ .n = "udpck", .p = "usbck", .id = 1 },
|
||||
{ .n = "uhpck", .p = "usbck", .id = 4 },
|
||||
{ .n = "pck0", .p = "prog0", .id = 8 },
|
||||
{ .n = "pck1", .p = "prog1", .id = 9 },
|
||||
|
@ -5364,6 +5364,8 @@ static struct clk_branch gcc_ufs_1_card_clkref_clk = {
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gcc_ufs_1_card_clkref_clk",
|
||||
.parent_data = &gcc_parent_data_tcxo,
|
||||
.num_parents = 1,
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
@ -5432,6 +5434,8 @@ static struct clk_branch gcc_ufs_card_clkref_clk = {
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gcc_ufs_card_clkref_clk",
|
||||
.parent_data = &gcc_parent_data_tcxo,
|
||||
.num_parents = 1,
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
@ -5848,6 +5852,8 @@ static struct clk_branch gcc_ufs_ref_clkref_clk = {
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gcc_ufs_ref_clkref_clk",
|
||||
.parent_data = &gcc_parent_data_tcxo,
|
||||
.num_parents = 1,
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/reset-controller.h>
|
||||
@ -56,22 +55,6 @@ enum gdsc_status {
|
||||
GDSC_ON
|
||||
};
|
||||
|
||||
static int gdsc_pm_runtime_get(struct gdsc *sc)
|
||||
{
|
||||
if (!sc->dev)
|
||||
return 0;
|
||||
|
||||
return pm_runtime_resume_and_get(sc->dev);
|
||||
}
|
||||
|
||||
static int gdsc_pm_runtime_put(struct gdsc *sc)
|
||||
{
|
||||
if (!sc->dev)
|
||||
return 0;
|
||||
|
||||
return pm_runtime_put_sync(sc->dev);
|
||||
}
|
||||
|
||||
/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
|
||||
static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
|
||||
{
|
||||
@ -271,8 +254,9 @@ static void gdsc_retain_ff_on(struct gdsc *sc)
|
||||
regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
|
||||
}
|
||||
|
||||
static int _gdsc_enable(struct gdsc *sc)
|
||||
static int gdsc_enable(struct generic_pm_domain *domain)
|
||||
{
|
||||
struct gdsc *sc = domain_to_gdsc(domain);
|
||||
int ret;
|
||||
|
||||
if (sc->pwrsts == PWRSTS_ON)
|
||||
@ -328,22 +312,11 @@ static int _gdsc_enable(struct gdsc *sc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdsc_enable(struct generic_pm_domain *domain)
|
||||
static int gdsc_disable(struct generic_pm_domain *domain)
|
||||
{
|
||||
struct gdsc *sc = domain_to_gdsc(domain);
|
||||
int ret;
|
||||
|
||||
ret = gdsc_pm_runtime_get(sc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return _gdsc_enable(sc);
|
||||
}
|
||||
|
||||
static int _gdsc_disable(struct gdsc *sc)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (sc->pwrsts == PWRSTS_ON)
|
||||
return gdsc_assert_reset(sc);
|
||||
|
||||
@ -388,18 +361,6 @@ static int _gdsc_disable(struct gdsc *sc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdsc_disable(struct generic_pm_domain *domain)
|
||||
{
|
||||
struct gdsc *sc = domain_to_gdsc(domain);
|
||||
int ret;
|
||||
|
||||
ret = _gdsc_disable(sc);
|
||||
|
||||
gdsc_pm_runtime_put(sc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gdsc_init(struct gdsc *sc)
|
||||
{
|
||||
u32 mask, val;
|
||||
@ -447,11 +408,6 @@ static int gdsc_init(struct gdsc *sc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* ...and the power-domain */
|
||||
ret = gdsc_pm_runtime_get(sc);
|
||||
if (ret)
|
||||
goto err_disable_supply;
|
||||
|
||||
/*
|
||||
* Votable GDSCs can be ON due to Vote from other masters.
|
||||
* If a Votable GDSC is ON, make sure we have a Vote.
|
||||
@ -459,14 +415,14 @@ static int gdsc_init(struct gdsc *sc)
|
||||
if (sc->flags & VOTABLE) {
|
||||
ret = gdsc_update_collapse_bit(sc, false);
|
||||
if (ret)
|
||||
goto err_put_rpm;
|
||||
goto err_disable_supply;
|
||||
}
|
||||
|
||||
/* Turn on HW trigger mode if supported */
|
||||
if (sc->flags & HW_CTRL) {
|
||||
ret = gdsc_hwctrl(sc, true);
|
||||
if (ret < 0)
|
||||
goto err_put_rpm;
|
||||
goto err_disable_supply;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -496,13 +452,10 @@ static int gdsc_init(struct gdsc *sc)
|
||||
|
||||
ret = pm_genpd_init(&sc->pd, NULL, !on);
|
||||
if (ret)
|
||||
goto err_put_rpm;
|
||||
goto err_disable_supply;
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_rpm:
|
||||
if (on)
|
||||
gdsc_pm_runtime_put(sc);
|
||||
err_disable_supply:
|
||||
if (on && sc->rsupply)
|
||||
regulator_disable(sc->rsupply);
|
||||
@ -541,8 +494,6 @@ int gdsc_register(struct gdsc_desc *desc,
|
||||
for (i = 0; i < num; i++) {
|
||||
if (!scs[i])
|
||||
continue;
|
||||
if (pm_runtime_enabled(dev))
|
||||
scs[i]->dev = dev;
|
||||
scs[i]->regmap = regmap;
|
||||
scs[i]->rcdev = rcdev;
|
||||
ret = gdsc_init(scs[i]);
|
||||
|
@ -30,7 +30,6 @@ struct reset_controller_dev;
|
||||
* @resets: ids of resets associated with this gdsc
|
||||
* @reset_count: number of @resets
|
||||
* @rcdev: reset controller
|
||||
* @dev: the device holding the GDSC, used for pm_runtime calls
|
||||
*/
|
||||
struct gdsc {
|
||||
struct generic_pm_domain pd;
|
||||
@ -74,7 +73,6 @@ struct gdsc {
|
||||
|
||||
const char *supply;
|
||||
struct regulator *rsupply;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct gdsc_desc {
|
||||
|
@ -81,17 +81,19 @@ MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
|
||||
static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
|
||||
{
|
||||
const struct exynos_clkout_variant *variant;
|
||||
const struct of_device_id *match;
|
||||
|
||||
if (!dev->parent) {
|
||||
dev_err(dev, "not instantiated from MFD\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
variant = of_device_get_match_data(dev->parent);
|
||||
if (!variant) {
|
||||
match = of_match_device(exynos_clkout_ids, dev->parent);
|
||||
if (!match) {
|
||||
dev_err(dev, "cannot match parent device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
variant = match->data;
|
||||
|
||||
*mux_mask = variant->mux_mask;
|
||||
|
||||
|
@ -231,7 +231,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
|
||||
CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
|
||||
DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "fout_shared0_pll",
|
||||
CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
|
||||
DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "fout_shared0_pll",
|
||||
DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2",
|
||||
CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
|
||||
DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "fout_shared0_pll",
|
||||
CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3),
|
||||
@ -239,7 +239,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
|
||||
CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
|
||||
DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "fout_shared1_pll",
|
||||
CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
|
||||
DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "fout_shared1_pll",
|
||||
DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
|
||||
CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
|
||||
|
||||
/* CORE */
|
||||
|
@ -51,7 +51,7 @@ static int riscv_clock_next_event(unsigned long delta,
|
||||
static unsigned int riscv_clock_event_irq;
|
||||
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
|
||||
.name = "riscv_timer_clockevent",
|
||||
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
|
||||
.features = CLOCK_EVT_FEAT_ONESHOT,
|
||||
.rating = 100,
|
||||
.set_next_event = riscv_clock_next_event,
|
||||
};
|
||||
|
@ -8,6 +8,13 @@
|
||||
static bool nohmem;
|
||||
module_param_named(disable, nohmem, bool, 0444);
|
||||
|
||||
static struct resource hmem_active = {
|
||||
.name = "HMEM devices",
|
||||
.start = 0,
|
||||
.end = -1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
void hmem_register_device(int target_nid, struct resource *r)
|
||||
{
|
||||
/* define a clean / non-busy resource for the platform device */
|
||||
@ -41,6 +48,12 @@ void hmem_register_device(int target_nid, struct resource *r)
|
||||
goto out_pdev;
|
||||
}
|
||||
|
||||
if (!__request_region(&hmem_active, res.start, resource_size(&res),
|
||||
dev_name(&pdev->dev), 0)) {
|
||||
dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
|
||||
goto out_active;
|
||||
}
|
||||
|
||||
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
|
||||
info = (struct memregion_info) {
|
||||
.target_node = target_nid,
|
||||
@ -66,6 +79,8 @@ void hmem_register_device(int target_nid, struct resource *r)
|
||||
return;
|
||||
|
||||
out_resource:
|
||||
__release_region(&hmem_active, res.start, resource_size(&res));
|
||||
out_active:
|
||||
platform_device_put(pdev);
|
||||
out_pdev:
|
||||
memregion_free(id);
|
||||
@ -73,15 +88,6 @@ out_pdev:
|
||||
|
||||
static __init int hmem_register_one(struct resource *res, void *data)
|
||||
{
|
||||
/*
|
||||
* If the resource is not a top-level resource it was already
|
||||
* assigned to a device by the HMAT parsing.
|
||||
*/
|
||||
if (res->parent != &iomem_resource) {
|
||||
pr_info("HMEM: skip %pr, already claimed\n", res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hmem_register_device(phys_to_target_node(res->start), res);
|
||||
|
||||
return 0;
|
||||
|
@ -226,7 +226,10 @@ found:
|
||||
ioport_unmap(gp.pm);
|
||||
goto out;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out:
|
||||
pci_dev_put(pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -234,6 +237,7 @@ static void __exit amd_gpio_exit(void)
|
||||
{
|
||||
gpiochip_remove(&gp.chip);
|
||||
ioport_unmap(gp.pm);
|
||||
pci_dev_put(gp.pdev);
|
||||
}
|
||||
|
||||
module_init(amd_gpio_init);
|
||||
|
@ -610,6 +610,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
|
||||
return -ENODATA;
|
||||
|
||||
pctldev = of_pinctrl_get(pctlnp);
|
||||
of_node_put(pctlnp);
|
||||
if (!pctldev)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -526,12 +526,13 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* From this point, the .release() function cleans up gpio_device */
|
||||
gdev->dev.release = gpiodevice_release;
|
||||
|
||||
ret = gpiochip_sysfs_register(gdev);
|
||||
if (ret)
|
||||
goto err_remove_device;
|
||||
|
||||
/* From this point, the .release() function cleans up gpio_device */
|
||||
gdev->dev.release = gpiodevice_release;
|
||||
dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base,
|
||||
gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic");
|
||||
|
||||
@ -597,10 +598,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
||||
struct fwnode_handle *fwnode = NULL;
|
||||
struct gpio_device *gdev;
|
||||
unsigned long flags;
|
||||
int base = gc->base;
|
||||
unsigned int i;
|
||||
u32 ngpios = 0;
|
||||
int base = 0;
|
||||
int ret = 0;
|
||||
u32 ngpios;
|
||||
|
||||
if (gc->fwnode)
|
||||
fwnode = gc->fwnode;
|
||||
@ -647,17 +648,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
||||
else
|
||||
gdev->owner = THIS_MODULE;
|
||||
|
||||
gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
|
||||
if (!gdev->descs) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_dev_name;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try the device properties if the driver didn't supply the number
|
||||
* of GPIO lines.
|
||||
*/
|
||||
if (gc->ngpio == 0) {
|
||||
ngpios = gc->ngpio;
|
||||
if (ngpios == 0) {
|
||||
ret = device_property_read_u32(&gdev->dev, "ngpios", &ngpios);
|
||||
if (ret == -ENODATA)
|
||||
/*
|
||||
@ -668,7 +664,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
||||
*/
|
||||
ngpios = 0;
|
||||
else if (ret)
|
||||
goto err_free_descs;
|
||||
goto err_free_dev_name;
|
||||
|
||||
gc->ngpio = ngpios;
|
||||
}
|
||||
@ -676,13 +672,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
||||
if (gc->ngpio == 0) {
|
||||
chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
|
||||
ret = -EINVAL;
|
||||
goto err_free_descs;
|
||||
goto err_free_dev_name;
|
||||
}
|
||||
|
||||
if (gc->ngpio > FASTPATH_NGPIO)
|
||||
chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
|
||||
gc->ngpio, FASTPATH_NGPIO);
|
||||
|
||||
gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL);
|
||||
if (!gdev->descs) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_dev_name;
|
||||
}
|
||||
|
||||
gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL);
|
||||
if (!gdev->label) {
|
||||
ret = -ENOMEM;
|
||||
@ -701,11 +703,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
||||
* it may be a pipe dream. It will not happen before we get rid
|
||||
* of the sysfs interface anyways.
|
||||
*/
|
||||
base = gc->base;
|
||||
if (base < 0) {
|
||||
base = gpiochip_find_base(gc->ngpio);
|
||||
if (base < 0) {
|
||||
ret = base;
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
ret = base;
|
||||
base = 0;
|
||||
goto err_free_label;
|
||||
}
|
||||
/*
|
||||
@ -816,6 +820,11 @@ err_remove_of_chip:
|
||||
err_free_gpiochip_mask:
|
||||
gpiochip_remove_pin_ranges(gc);
|
||||
gpiochip_free_valid_mask(gc);
|
||||
if (gdev->dev.release) {
|
||||
/* release() has been registered by gpiochip_setup_dev() */
|
||||
put_device(&gdev->dev);
|
||||
goto err_print_message;
|
||||
}
|
||||
err_remove_from_list:
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
list_del(&gdev->list);
|
||||
@ -829,13 +838,14 @@ err_free_dev_name:
|
||||
err_free_ida:
|
||||
ida_free(&gpio_ida, gdev->id);
|
||||
err_free_gdev:
|
||||
kfree(gdev);
|
||||
err_print_message:
|
||||
/* failures here can mean systems won't boot... */
|
||||
if (ret != -EPROBE_DEFER) {
|
||||
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
|
||||
gdev->base, gdev->base + gdev->ngpio - 1,
|
||||
base, base + (int)ngpios - 1,
|
||||
gc->label ? : "generic", ret);
|
||||
}
|
||||
kfree(gdev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
|
||||
|
@ -156,6 +156,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
break;
|
||||
case IP_VERSION(3, 0, 2):
|
||||
fw_name = FIRMWARE_VANGOGH;
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 16):
|
||||
fw_name = FIRMWARE_DIMGREY_CAVEFISH;
|
||||
|
@ -5,6 +5,7 @@ menu "Display Engine Configuration"
|
||||
config DRM_AMD_DC
|
||||
bool "AMD DC - Enable new display engine"
|
||||
default y
|
||||
depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
|
||||
select SND_HDA_COMPONENT if SND_HDA_CORE
|
||||
select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128)
|
||||
help
|
||||
@ -12,6 +13,12 @@ config DRM_AMD_DC
|
||||
support for AMDGPU. This adds required support for Vega and
|
||||
Raven ASICs.
|
||||
|
||||
calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64)
|
||||
architectures built with Clang (all released versions), whereby the stack
|
||||
frame gets blown up to well over 5k. This would cause an immediate kernel
|
||||
panic on most architectures. We'll revert this when the following bug report
|
||||
has been resolved: https://github.com/llvm/llvm-project/issues/41896.
|
||||
|
||||
config DRM_AMD_DC_DCN
|
||||
def_bool n
|
||||
help
|
||||
|
@ -3723,12 +3723,16 @@ out:
|
||||
|
||||
static u8 bigjoiner_pipes(struct drm_i915_private *i915)
|
||||
{
|
||||
u8 pipes;
|
||||
|
||||
if (DISPLAY_VER(i915) >= 12)
|
||||
return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
|
||||
pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
|
||||
else if (DISPLAY_VER(i915) >= 11)
|
||||
return BIT(PIPE_B) | BIT(PIPE_C);
|
||||
pipes = BIT(PIPE_B) | BIT(PIPE_C);
|
||||
else
|
||||
return 0;
|
||||
pipes = 0;
|
||||
|
||||
return pipes & RUNTIME_INFO(i915)->pipe_mask;
|
||||
}
|
||||
|
||||
static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
|
||||
|
@ -625,8 +625,13 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
return timeout ? timeout : intel_uc_wait_for_idle(>->uc,
|
||||
remaining_timeout);
|
||||
if (timeout)
|
||||
return timeout;
|
||||
|
||||
if (remaining_timeout < 0)
|
||||
remaining_timeout = 0;
|
||||
|
||||
return intel_uc_wait_for_idle(>->uc, remaining_timeout);
|
||||
}
|
||||
|
||||
int intel_gt_init(struct intel_gt *gt)
|
||||
@ -1017,6 +1022,11 @@ static void mmio_invalidate_full(struct intel_gt *gt)
|
||||
if (!i915_mmio_reg_offset(rb.reg))
|
||||
continue;
|
||||
|
||||
if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
|
||||
engine->class == VIDEO_ENHANCEMENT_CLASS ||
|
||||
engine->class == COMPUTE_CLASS))
|
||||
rb.bit = _MASKED_BIT_ENABLE(rb.bit);
|
||||
|
||||
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
|
||||
awake |= engine->mask;
|
||||
}
|
||||
|
@ -199,7 +199,7 @@ out_active: spin_lock(&timelines->lock);
|
||||
if (remaining_timeout)
|
||||
*remaining_timeout = timeout;
|
||||
|
||||
return active_count ? timeout : 0;
|
||||
return active_count ? timeout ?: -ETIME : 0;
|
||||
}
|
||||
|
||||
static void retire_work_handler(struct work_struct *work)
|
||||
|
@ -471,8 +471,7 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915)
|
||||
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
|
||||
struct dram_info *dram_info = &i915->dram_info;
|
||||
|
||||
val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val);
|
||||
switch (val) {
|
||||
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
|
||||
case 0:
|
||||
dram_info->type = INTEL_DRAM_DDR4;
|
||||
break;
|
||||
|
@ -1315,6 +1315,9 @@ static s32 snto32(__u32 value, unsigned n)
|
||||
if (!value || !n)
|
||||
return 0;
|
||||
|
||||
if (n > 32)
|
||||
n = 32;
|
||||
|
||||
switch (n) {
|
||||
case 8: return ((__s8)value);
|
||||
case 16: return ((__s16)value);
|
||||
|
@ -274,6 +274,7 @@
|
||||
#define USB_DEVICE_ID_CH_AXIS_295 0x001c
|
||||
|
||||
#define USB_VENDOR_ID_CHERRY 0x046a
|
||||
#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c
|
||||
#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
|
||||
#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
|
||||
|
||||
@ -917,6 +918,7 @@
|
||||
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
|
||||
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
|
||||
#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
|
||||
#define USB_DEVICE_ID_MS_MOUSE_0783 0x0783
|
||||
|
||||
#define USB_VENDOR_ID_MOJO 0x8282
|
||||
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
|
||||
@ -1215,6 +1217,7 @@
|
||||
#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A 0x6e21
|
||||
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
|
||||
#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
|
||||
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017 0x73f6
|
||||
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
|
||||
|
||||
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
|
||||
@ -1381,6 +1384,7 @@
|
||||
|
||||
#define USB_VENDOR_ID_PRIMAX 0x0461
|
||||
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
|
||||
#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A 0x4e2a
|
||||
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
|
||||
#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
|
||||
#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
|
||||
|
@ -121,6 +121,11 @@ static const struct hid_device_id ite_devices[] = {
|
||||
USB_VENDOR_ID_SYNAPTICS,
|
||||
USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
|
||||
.driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
|
||||
/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_SYNAPTICS,
|
||||
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017),
|
||||
.driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hid, ite_devices);
|
||||
|
@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
|
||||
return -ENOMEM;
|
||||
|
||||
i = strlen(lbuf);
|
||||
|
||||
if (i == 0) {
|
||||
kfree(lbuf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (lbuf[i-1] == '\n') {
|
||||
if (i == 1) {
|
||||
kfree(lbuf);
|
||||
|
@ -4269,21 +4269,6 @@ static void hidpp_remove(struct hid_device *hdev)
|
||||
mutex_destroy(&hidpp->send_mutex);
|
||||
}
|
||||
|
||||
static const struct hid_device_id unhandled_hidpp_devices[] = {
|
||||
/* Logitech Harmony Adapter for PS3, handled in hid-sony */
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
|
||||
/* Handled in hid-generic */
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD) },
|
||||
{}
|
||||
};
|
||||
|
||||
static bool hidpp_match(struct hid_device *hdev,
|
||||
bool ignore_special_driver)
|
||||
{
|
||||
/* Refuse to handle devices handled by other HID drivers */
|
||||
return !hid_match_id(hdev, unhandled_hidpp_devices);
|
||||
}
|
||||
|
||||
#define LDJ_DEVICE(product) \
|
||||
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
|
||||
USB_VENDOR_ID_LOGITECH, (product))
|
||||
@ -4367,9 +4352,15 @@ static const struct hid_device_id hidpp_devices[] = {
|
||||
{ /* MX5500 keyboard over Bluetooth */
|
||||
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
|
||||
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
||||
|
||||
{ /* And try to enable HID++ for all the Logitech Bluetooth devices */
|
||||
HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_ANY, USB_VENDOR_ID_LOGITECH, HID_ANY_ID) },
|
||||
{ /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
|
||||
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
|
||||
{ /* MX Master mouse over Bluetooth */
|
||||
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) },
|
||||
{ /* MX Ergo trackball over Bluetooth */
|
||||
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
|
||||
{ /* MX Master 3 mouse over Bluetooth */
|
||||
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
|
||||
{}
|
||||
};
|
||||
|
||||
@ -4383,7 +4374,6 @@ static const struct hid_usage_id hidpp_usages[] = {
|
||||
static struct hid_driver hidpp_driver = {
|
||||
.name = "logitech-hidpp-device",
|
||||
.id_table = hidpp_devices,
|
||||
.match = hidpp_match,
|
||||
.report_fixup = hidpp_report_fixup,
|
||||
.probe = hidpp_probe,
|
||||
.remove = hidpp_remove,
|
||||
|
@ -54,6 +54,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
|
||||
@ -122,6 +123,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
|
||||
@ -146,6 +148,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
|
||||
|
@ -192,6 +192,7 @@ static int uclogic_probe(struct hid_device *hdev,
|
||||
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
|
||||
*/
|
||||
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
|
||||
hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE;
|
||||
|
||||
/* Allocate and assign driver data */
|
||||
drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
|
||||
|
@ -1193,7 +1193,7 @@ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
|
||||
p[sizeof(btn_head)] < param_num) {
|
||||
v = param_list[p[sizeof(btn_head)]];
|
||||
put_unaligned((__u8)0x2A, p); /* Usage Maximum */
|
||||
put_unaligned_le16((__force u16)cpu_to_le16(v), p + 1);
|
||||
put_unaligned((__force u16)cpu_to_le16(v), (s16 *)(p + 1));
|
||||
p += sizeof(btn_head) + 1;
|
||||
} else {
|
||||
p++;
|
||||
|
@ -66,6 +66,6 @@ endmenu
|
||||
|
||||
config I2C_HID_CORE
|
||||
tristate
|
||||
default y if I2C_HID_ACPI=y || I2C_HID_OF=y || I2C_HID_OF_GOODIX=y
|
||||
default m if I2C_HID_ACPI=m || I2C_HID_OF=m || I2C_HID_OF_GOODIX=m
|
||||
default y if I2C_HID_ACPI=y || I2C_HID_OF=y || I2C_HID_OF_ELAN=y || I2C_HID_OF_GOODIX=y
|
||||
default m if I2C_HID_ACPI=m || I2C_HID_OF=m || I2C_HID_OF_ELAN=m || I2C_HID_OF_GOODIX=m
|
||||
select HID
|
||||
|
@ -938,6 +938,8 @@ static int asus_ec_probe(struct platform_device *pdev)
|
||||
ec_data->nr_sensors = hweight_long(ec_data->board_info->sensors);
|
||||
ec_data->sensors = devm_kcalloc(dev, ec_data->nr_sensors,
|
||||
sizeof(struct ec_sensor), GFP_KERNEL);
|
||||
if (!ec_data->sensors)
|
||||
return -ENOMEM;
|
||||
|
||||
status = setup_lock_data(dev);
|
||||
if (status) {
|
||||
|
@ -242,10 +242,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
|
||||
*/
|
||||
if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
|
||||
for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
|
||||
if (host_bridge->device == tjmax_pci_table[i].device)
|
||||
if (host_bridge->device == tjmax_pci_table[i].device) {
|
||||
pci_dev_put(host_bridge);
|
||||
return tjmax_pci_table[i].tjmax;
|
||||
}
|
||||
}
|
||||
}
|
||||
pci_dev_put(host_bridge);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
|
||||
if (strstr(c->x86_model_id, tjmax_table[i].id))
|
||||
@ -533,6 +536,10 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
|
||||
{
|
||||
struct temp_data *tdata = pdata->core_data[indx];
|
||||
|
||||
/* if we errored on add then this is already gone */
|
||||
if (!tdata)
|
||||
return;
|
||||
|
||||
/* Remove the sysfs attributes */
|
||||
sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
|
||||
|
||||
|
@ -117,7 +117,7 @@ static int i5500_temp_probe(struct pci_dev *pdev,
|
||||
u32 tstimer;
|
||||
s8 tsfsc;
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to enable device\n");
|
||||
return err;
|
||||
|
@ -502,6 +502,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev)
|
||||
return;
|
||||
|
||||
out_register:
|
||||
list_del(&data->list);
|
||||
hwmon_device_unregister(data->hwmon_dev);
|
||||
out_user:
|
||||
ipmi_destroy_user(data->user);
|
||||
|
@ -228,7 +228,7 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
|
||||
* Shunt Voltage Sum register has 14-bit value with 1-bit shift
|
||||
* Other Shunt Voltage registers have 12 bits with 3-bit shift
|
||||
*/
|
||||
if (reg == INA3221_SHUNT_SUM)
|
||||
if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM)
|
||||
*val = sign_extend32(regval >> 1, 14);
|
||||
else
|
||||
*val = sign_extend32(regval >> 3, 12);
|
||||
@ -465,7 +465,7 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
|
||||
* SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV
|
||||
* SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV
|
||||
*/
|
||||
if (reg == INA3221_SHUNT_SUM)
|
||||
if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM)
|
||||
regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe;
|
||||
else
|
||||
regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
|
||||
|
@ -396,7 +396,7 @@ static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val,
|
||||
return ret;
|
||||
|
||||
/* in milidegrees celcius, temp is given by: */
|
||||
*val = (__val * 204) + 550;
|
||||
*val = (__val * 204) + 5500;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -852,7 +852,8 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||
CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US);
|
||||
if (ret) {
|
||||
ret = -EAGAIN;
|
||||
i2c_recover_bus(adap);
|
||||
if (id->adap.bus_recovery_info)
|
||||
i2c_recover_bus(adap);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1263,8 +1264,13 @@ static int cdns_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
id->rinfo.pinctrl = devm_pinctrl_get(&pdev->dev);
|
||||
if (IS_ERR(id->rinfo.pinctrl)) {
|
||||
int err = PTR_ERR(id->rinfo.pinctrl);
|
||||
|
||||
dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
|
||||
return PTR_ERR(id->rinfo.pinctrl);
|
||||
if (err != -ENODEV)
|
||||
return err;
|
||||
} else {
|
||||
id->adap.bus_recovery_info = &id->rinfo;
|
||||
}
|
||||
|
||||
id->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
|
||||
@ -1283,7 +1289,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
|
||||
id->adap.retries = 3; /* Default retry value. */
|
||||
id->adap.algo_data = id;
|
||||
id->adap.dev.parent = &pdev->dev;
|
||||
id->adap.bus_recovery_info = &id->rinfo;
|
||||
init_completion(&id->xfer_done);
|
||||
snprintf(id->adap.name, sizeof(id->adap.name),
|
||||
"Cadence I2C at %08lx", (unsigned long)r_mem->start);
|
||||
|
@ -1132,7 +1132,8 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
|
||||
int i, result;
|
||||
unsigned int temp;
|
||||
int block_data = msgs->flags & I2C_M_RECV_LEN;
|
||||
int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
|
||||
int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE &&
|
||||
msgs->len >= DMA_THRESHOLD && !block_data;
|
||||
|
||||
dev_dbg(&i2c_imx->adapter.dev,
|
||||
"<%s> write slave address: addr=0x%x\n",
|
||||
@ -1298,7 +1299,8 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
|
||||
result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic);
|
||||
} else {
|
||||
if (!atomic &&
|
||||
i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
|
||||
i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD &&
|
||||
msgs[i].flags & I2C_M_DMA_SAFE)
|
||||
result = i2c_imx_dma_write(i2c_imx, &msgs[i]);
|
||||
else
|
||||
result = i2c_imx_write(i2c_imx, &msgs[i], atomic);
|
||||
|
@ -2393,8 +2393,17 @@ static struct platform_driver npcm_i2c_bus_driver = {
|
||||
|
||||
static int __init npcm_i2c_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
|
||||
return platform_driver_register(&npcm_i2c_bus_driver);
|
||||
|
||||
ret = platform_driver_register(&npcm_i2c_bus_driver);
|
||||
if (ret) {
|
||||
debugfs_remove_recursive(npcm_i2c_debugfs_dir);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(npcm_i2c_init);
|
||||
|
||||
|
@ -626,7 +626,6 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
|
||||
dev_err(gi2c->se.dev, "I2C timeout gpi flags:%d addr:0x%x\n",
|
||||
gi2c->cur->flags, gi2c->cur->addr);
|
||||
gi2c->err = -ETIMEDOUT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (gi2c->err) {
|
||||
|
@ -467,6 +467,7 @@ static int i2c_device_probe(struct device *dev)
|
||||
{
|
||||
struct i2c_client *client = i2c_verify_client(dev);
|
||||
struct i2c_driver *driver;
|
||||
bool do_power_on;
|
||||
int status;
|
||||
|
||||
if (!client)
|
||||
@ -545,8 +546,8 @@ static int i2c_device_probe(struct device *dev)
|
||||
if (status < 0)
|
||||
goto err_clear_wakeup_irq;
|
||||
|
||||
status = dev_pm_domain_attach(&client->dev,
|
||||
!i2c_acpi_waive_d0_probe(dev));
|
||||
do_power_on = !i2c_acpi_waive_d0_probe(dev);
|
||||
status = dev_pm_domain_attach(&client->dev, do_power_on);
|
||||
if (status)
|
||||
goto err_clear_wakeup_irq;
|
||||
|
||||
@ -585,7 +586,7 @@ static int i2c_device_probe(struct device *dev)
|
||||
err_release_driver_resources:
|
||||
devres_release_group(&client->dev, client->devres_group_id);
|
||||
err_detach_pm_domain:
|
||||
dev_pm_domain_detach(&client->dev, !i2c_acpi_waive_d0_probe(dev));
|
||||
dev_pm_domain_detach(&client->dev, do_power_on);
|
||||
err_clear_wakeup_irq:
|
||||
dev_pm_clear_wake_irq(&client->dev);
|
||||
device_init_wakeup(&client->dev, false);
|
||||
@ -610,7 +611,7 @@ static void i2c_device_remove(struct device *dev)
|
||||
|
||||
devres_release_group(&client->dev, client->devres_group_id);
|
||||
|
||||
dev_pm_domain_detach(&client->dev, !i2c_acpi_waive_d0_probe(dev));
|
||||
dev_pm_domain_detach(&client->dev, true);
|
||||
|
||||
dev_pm_clear_wake_irq(&client->dev);
|
||||
device_init_wakeup(&client->dev, false);
|
||||
|
@ -211,12 +211,14 @@ static int raydium_i2c_send(struct i2c_client *client,
|
||||
|
||||
error = raydium_i2c_xfer(client, addr, xfer, ARRAY_SIZE(xfer));
|
||||
if (likely(!error))
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
msleep(RM_RETRY_DELAY_MS);
|
||||
} while (++tries < RM_MAX_RETRIES);
|
||||
|
||||
dev_err(&client->dev, "%s failed: %d\n", __func__, error);
|
||||
out:
|
||||
kfree(tx_buf);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -820,6 +820,7 @@ int __init dmar_dev_scope_init(void)
|
||||
info = dmar_alloc_pci_notify_info(dev,
|
||||
BUS_NOTIFY_ADD_DEVICE);
|
||||
if (!info) {
|
||||
pci_dev_put(dev);
|
||||
return dmar_dev_scope_status;
|
||||
} else {
|
||||
dmar_pci_bus_add_dev(info);
|
||||
|
@ -1396,6 +1396,24 @@ static void domain_update_iotlb(struct dmar_domain *domain)
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* The extra devTLB flush quirk impacts those QAT devices with PCI device
|
||||
* IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
|
||||
* check because it applies only to the built-in QAT devices and it doesn't
|
||||
* grant additional privileges.
|
||||
*/
|
||||
#define BUGGY_QAT_DEVID_MASK 0x494c
|
||||
static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->vendor != PCI_VENDOR_ID_INTEL)
|
||||
return false;
|
||||
|
||||
if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void iommu_enable_pci_caps(struct device_domain_info *info)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
@ -1478,6 +1496,7 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
|
||||
qdep = info->ats_qdep;
|
||||
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
|
||||
qdep, addr, mask);
|
||||
quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep);
|
||||
}
|
||||
|
||||
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
||||
@ -3854,8 +3873,10 @@ static inline bool has_external_pci(void)
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
for_each_pci_dev(pdev)
|
||||
if (pdev->external_facing)
|
||||
if (pdev->external_facing) {
|
||||
pci_dev_put(pdev);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -4490,9 +4511,10 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
|
||||
if (dev_is_pci(dev)) {
|
||||
if (ecap_dev_iotlb_support(iommu->ecap) &&
|
||||
pci_ats_supported(pdev) &&
|
||||
dmar_ats_supported(pdev, iommu))
|
||||
dmar_ats_supported(pdev, iommu)) {
|
||||
info->ats_supported = 1;
|
||||
|
||||
info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev);
|
||||
}
|
||||
if (sm_supported(iommu)) {
|
||||
if (pasid_supported(iommu)) {
|
||||
int features = pci_pasid_features(pdev);
|
||||
@ -4931,3 +4953,48 @@ static void __init check_tylersburg_isoch(void)
|
||||
pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
|
||||
vtisochctrl);
|
||||
}
|
||||
|
||||
/*
|
||||
* Here we deal with a device TLB defect where device may inadvertently issue ATS
|
||||
* invalidation completion before posted writes initiated with translated address
|
||||
* that utilized translations matching the invalidation address range, violating
|
||||
* the invalidation completion ordering.
|
||||
* Therefore, any use cases that cannot guarantee DMA is stopped before unmap is
|
||||
* vulnerable to this defect. In other words, any dTLB invalidation initiated not
|
||||
* under the control of the trusted/privileged host device driver must use this
|
||||
* quirk.
|
||||
* Device TLBs are invalidated under the following six conditions:
|
||||
* 1. Device driver does DMA API unmap IOVA
|
||||
* 2. Device driver unbind a PASID from a process, sva_unbind_device()
|
||||
* 3. PASID is torn down, after PASID cache is flushed. e.g. process
|
||||
* exit_mmap() due to crash
|
||||
* 4. Under SVA usage, called by mmu_notifier.invalidate_range() where
|
||||
* VM has to free pages that were unmapped
|
||||
* 5. Userspace driver unmaps a DMA buffer
|
||||
* 6. Cache invalidation in vSVA usage (upcoming)
|
||||
*
|
||||
* For #1 and #2, device drivers are responsible for stopping DMA traffic
|
||||
* before unmap/unbind. For #3, iommu driver gets mmu_notifier to
|
||||
* invalidate TLB the same way as normal user unmap which will use this quirk.
|
||||
* The dTLB invalidation after PASID cache flush does not need this quirk.
|
||||
*
|
||||
* As a reminder, #6 will *NEED* this quirk as we enable nested translation.
|
||||
*/
|
||||
void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
|
||||
unsigned long address, unsigned long mask,
|
||||
u32 pasid, u16 qdep)
|
||||
{
|
||||
u16 sid;
|
||||
|
||||
if (likely(!info->dtlb_extra_inval))
|
||||
return;
|
||||
|
||||
sid = PCI_DEVID(info->bus, info->devfn);
|
||||
if (pasid == PASID_RID2PASID) {
|
||||
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
|
||||
qdep, address, mask);
|
||||
} else {
|
||||
qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid,
|
||||
pasid, qdep, address, mask);
|
||||
}
|
||||
}
|
||||
|
@ -623,6 +623,7 @@ struct device_domain_info {
|
||||
u8 pri_enabled:1;
|
||||
u8 ats_supported:1;
|
||||
u8 ats_enabled:1;
|
||||
u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
|
||||
u8 ats_qdep;
|
||||
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
|
||||
struct intel_iommu *iommu; /* IOMMU used by this device */
|
||||
@ -728,6 +729,9 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
|
||||
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
||||
u32 pasid, u16 qdep, u64 addr,
|
||||
unsigned int size_order);
|
||||
void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
|
||||
unsigned long address, unsigned long pages,
|
||||
u32 pasid, u16 qdep);
|
||||
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
|
||||
u32 pasid);
|
||||
|
||||
|
@ -184,10 +184,13 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
|
||||
return;
|
||||
|
||||
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
|
||||
if (info->ats_enabled)
|
||||
if (info->ats_enabled) {
|
||||
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
|
||||
svm->pasid, sdev->qdep, address,
|
||||
order_base_2(pages));
|
||||
quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
|
||||
svm->pasid, sdev->qdep);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_flush_svm_range_dev(struct intel_svm *svm,
|
||||
@ -745,12 +748,16 @@ bad_req:
|
||||
* If prq is to be handled outside iommu driver via receiver of
|
||||
* the fault notifiers, we skip the page response here.
|
||||
*/
|
||||
if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
|
||||
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
||||
if (!pdev)
|
||||
goto bad_req;
|
||||
|
||||
trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
|
||||
req->priv_data[0], req->priv_data[1],
|
||||
iommu->prq_seq_number++);
|
||||
if (intel_svm_prq_report(iommu, &pdev->dev, req))
|
||||
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
||||
else
|
||||
trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
|
||||
req->priv_data[0], req->priv_data[1],
|
||||
iommu->prq_seq_number++);
|
||||
pci_dev_put(pdev);
|
||||
prq_advance:
|
||||
head = (head + sizeof(*req)) & PRQ_RING_MASK;
|
||||
}
|
||||
|
@ -35,11 +35,7 @@
|
||||
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
|
||||
struct frame_vector *vec)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
int ret_pin_user_pages_fast = 0;
|
||||
int ret = 0;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
if (nr_frames == 0)
|
||||
return 0;
|
||||
@ -52,57 +48,17 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
|
||||
ret = pin_user_pages_fast(start, nr_frames,
|
||||
FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
|
||||
(struct page **)(vec->ptrs));
|
||||
if (ret > 0) {
|
||||
vec->got_ref = true;
|
||||
vec->is_pfns = false;
|
||||
goto out_unlocked;
|
||||
}
|
||||
ret_pin_user_pages_fast = ret;
|
||||
vec->got_ref = true;
|
||||
vec->is_pfns = false;
|
||||
vec->nr_frames = ret;
|
||||
|
||||
mmap_read_lock(mm);
|
||||
vec->got_ref = false;
|
||||
vec->is_pfns = true;
|
||||
ret = 0;
|
||||
do {
|
||||
unsigned long *nums = frame_vector_pfns(vec);
|
||||
if (likely(ret > 0))
|
||||
return ret;
|
||||
|
||||
vma = vma_lookup(mm, start);
|
||||
if (!vma)
|
||||
break;
|
||||
|
||||
while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
|
||||
err = follow_pfn(vma, start, &nums[ret]);
|
||||
if (err) {
|
||||
if (ret)
|
||||
goto out;
|
||||
// If follow_pfn() returns -EINVAL, then this
|
||||
// is not an IO mapping or a raw PFN mapping.
|
||||
// In that case, return the original error from
|
||||
// pin_user_pages_fast(). Otherwise this
|
||||
// function would return -EINVAL when
|
||||
// pin_user_pages_fast() returned -ENOMEM,
|
||||
// which makes debugging hard.
|
||||
if (err == -EINVAL && ret_pin_user_pages_fast)
|
||||
ret = ret_pin_user_pages_fast;
|
||||
else
|
||||
ret = err;
|
||||
goto out;
|
||||
}
|
||||
start += PAGE_SIZE;
|
||||
ret++;
|
||||
}
|
||||
/* Bail out if VMA doesn't completely cover the tail page. */
|
||||
if (start < vma->vm_end)
|
||||
break;
|
||||
} while (ret < nr_frames);
|
||||
out:
|
||||
mmap_read_unlock(mm);
|
||||
out_unlocked:
|
||||
if (!ret)
|
||||
ret = -EFAULT;
|
||||
if (ret > 0)
|
||||
vec->nr_frames = ret;
|
||||
return ret;
|
||||
/* This used to (racily) return non-refcounted pfns. Let people know */
|
||||
WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping");
|
||||
vec->nr_frames = 0;
|
||||
return ret ? ret : -EFAULT;
|
||||
}
|
||||
EXPORT_SYMBOL(get_vaddr_frames);
|
||||
|
||||
|
@ -813,7 +813,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
|
||||
num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
|
||||
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
|
||||
/*
|
||||
* Set this now to ensure that drivers see the correct q->memory value
|
||||
* in the queue_setup op.
|
||||
*/
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = memory;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
set_queue_coherency(q, non_coherent_mem);
|
||||
|
||||
/*
|
||||
@ -823,22 +829,27 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
|
||||
plane_sizes, q->alloc_devs);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto error;
|
||||
|
||||
/* Check that driver has set sane values */
|
||||
if (WARN_ON(!num_planes))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(!num_planes)) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
if (WARN_ON(!plane_sizes[i]))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(!plane_sizes[i])) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Finally, allocate buffers and video memory */
|
||||
allocated_buffers =
|
||||
__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
|
||||
if (allocated_buffers == 0) {
|
||||
dprintk(q, 1, "memory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -879,7 +890,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
|
||||
* from q->num_buffers.
|
||||
* from q->num_buffers and it will reset q->memory to
|
||||
* VB2_MEMORY_UNKNOWN.
|
||||
*/
|
||||
__vb2_queue_free(q, allocated_buffers);
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
@ -895,6 +907,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
q->waiting_for_buffers = !q->is_output;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = VB2_MEMORY_UNKNOWN;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
|
||||
|
||||
@ -906,6 +924,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
unsigned int num_planes = 0, num_buffers, allocated_buffers;
|
||||
unsigned plane_sizes[VB2_MAX_PLANES] = { };
|
||||
bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
|
||||
bool no_previous_buffers = !q->num_buffers;
|
||||
int ret;
|
||||
|
||||
if (q->num_buffers == VB2_MAX_FRAME) {
|
||||
@ -913,13 +932,19 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
if (!q->num_buffers) {
|
||||
if (no_previous_buffers) {
|
||||
if (q->waiting_in_dqbuf && *count) {
|
||||
dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
|
||||
/*
|
||||
* Set this now to ensure that drivers see the correct q->memory
|
||||
* value in the queue_setup op.
|
||||
*/
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = memory;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
q->waiting_for_buffers = !q->is_output;
|
||||
set_queue_coherency(q, non_coherent_mem);
|
||||
} else {
|
||||
@ -945,14 +970,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
ret = call_qop(q, queue_setup, q, &num_buffers,
|
||||
&num_planes, plane_sizes, q->alloc_devs);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto error;
|
||||
|
||||
/* Finally, allocate buffers and video memory */
|
||||
allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
|
||||
num_planes, plane_sizes);
|
||||
if (allocated_buffers == 0) {
|
||||
dprintk(q, 1, "memory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -983,7 +1009,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
|
||||
* from q->num_buffers.
|
||||
* from q->num_buffers and it will reset q->memory to
|
||||
* VB2_MEMORY_UNKNOWN.
|
||||
*/
|
||||
__vb2_queue_free(q, allocated_buffers);
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
@ -998,6 +1025,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
*count = allocated_buffers;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (no_previous_buffers) {
|
||||
mutex_lock(&q->mmap_lock);
|
||||
q->memory = VB2_MEMORY_UNKNOWN;
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
|
||||
|
||||
@ -2164,6 +2199,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
|
||||
struct vb2_buffer *vb;
|
||||
unsigned int buffer, plane;
|
||||
|
||||
/*
|
||||
* Sanity checks to ensure the lock is held, MEMORY_MMAP is
|
||||
* used and fileio isn't active.
|
||||
*/
|
||||
lockdep_assert_held(&q->mmap_lock);
|
||||
|
||||
if (q->memory != VB2_MEMORY_MMAP) {
|
||||
dprintk(q, 1, "queue is not currently set up for mmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(q, 1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Go over all buffers and their planes, comparing the given offset
|
||||
* with an offset assigned to each plane. If a match is found,
|
||||
@ -2265,11 +2316,6 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
||||
int ret;
|
||||
unsigned long length;
|
||||
|
||||
if (q->memory != VB2_MEMORY_MMAP) {
|
||||
dprintk(q, 1, "queue is not currently set up for mmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check memory area access mode.
|
||||
*/
|
||||
@ -2291,14 +2337,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
|
||||
|
||||
mutex_lock(&q->mmap_lock);
|
||||
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(q, 1, "mmap: file io in progress\n");
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the plane corresponding to the offset passed by userspace.
|
||||
* Find the plane corresponding to the offset passed by userspace. This
|
||||
* will return an error if not MEMORY_MMAP or file I/O is in progress.
|
||||
*/
|
||||
ret = __find_plane_by_offset(q, off, &buffer, &plane);
|
||||
if (ret)
|
||||
@ -2351,22 +2392,25 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
if (q->memory != VB2_MEMORY_MMAP) {
|
||||
dprintk(q, 1, "queue is not currently set up for mmap\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&q->mmap_lock);
|
||||
|
||||
/*
|
||||
* Find the plane corresponding to the offset passed by userspace.
|
||||
* Find the plane corresponding to the offset passed by userspace. This
|
||||
* will return an error if not MEMORY_MMAP or file I/O is in progress.
|
||||
*/
|
||||
ret = __find_plane_by_offset(q, off, &buffer, &plane);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto unlock;
|
||||
|
||||
vb = q->bufs[buffer];
|
||||
|
||||
vaddr = vb2_plane_vaddr(vb, plane);
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
return vaddr ? (unsigned long)vaddr : -EINVAL;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&q->mmap_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
|
||||
#endif
|
||||
|
@ -1484,6 +1484,11 @@ void mmc_init_erase(struct mmc_card *card)
|
||||
card->pref_erase = 0;
|
||||
}
|
||||
|
||||
static bool is_trim_arg(unsigned int arg)
|
||||
{
|
||||
return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
|
||||
}
|
||||
|
||||
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
|
||||
unsigned int arg, unsigned int qty)
|
||||
{
|
||||
@ -1766,7 +1771,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
|
||||
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
|
||||
if (mmc_card_mmc(card) && is_trim_arg(arg) &&
|
||||
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@ -1796,7 +1801,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
|
||||
* identified by the card->eg_boundary flag.
|
||||
*/
|
||||
rem = card->erase_size - (from % card->erase_size);
|
||||
if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
|
||||
if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
|
||||
err = mmc_do_erase(card, from, from + rem - 1, arg);
|
||||
from += rem;
|
||||
if ((err) || (to <= from))
|
||||
|
@ -3179,7 +3179,8 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
|
||||
struct mmc_test_dbgfs_file *df;
|
||||
|
||||
if (card->debugfs_root)
|
||||
debugfs_create_file(name, mode, card->debugfs_root, card, fops);
|
||||
file = debugfs_create_file(name, mode, card->debugfs_root,
|
||||
card, fops);
|
||||
|
||||
df = kmalloc(sizeof(*df), GFP_KERNEL);
|
||||
if (!df) {
|
||||
|
@ -2588,13 +2588,11 @@ static int msdc_of_clock_parse(struct platform_device *pdev,
|
||||
return PTR_ERR(host->src_clk_cg);
|
||||
}
|
||||
|
||||
host->sys_clk_cg = devm_clk_get_optional(&pdev->dev, "sys_cg");
|
||||
/* If present, always enable for this clock gate */
|
||||
host->sys_clk_cg = devm_clk_get_optional_enabled(&pdev->dev, "sys_cg");
|
||||
if (IS_ERR(host->sys_clk_cg))
|
||||
host->sys_clk_cg = NULL;
|
||||
|
||||
/* If present, always enable for this clock gate */
|
||||
clk_prepare_enable(host->sys_clk_cg);
|
||||
|
||||
host->bulk_clks[0].id = "pclk_cg";
|
||||
host->bulk_clks[1].id = "axi_cg";
|
||||
host->bulk_clks[2].id = "ahb_cg";
|
||||
|
@ -1512,7 +1512,7 @@ static void esdhc_cqe_enable(struct mmc_host *mmc)
|
||||
* system resume back.
|
||||
*/
|
||||
cqhci_writel(cq_host, 0, CQHCI_CTL);
|
||||
if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT)
|
||||
if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
|
||||
dev_err(mmc_dev(host->mmc),
|
||||
"failed to exit halt state when enable CQE\n");
|
||||
|
||||
|
@ -470,7 +470,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
}
|
||||
|
||||
if (IS_ERR(sprd_host->pinctrl))
|
||||
return 0;
|
||||
goto reset;
|
||||
|
||||
switch (ios->signal_voltage) {
|
||||
case MMC_SIGNAL_VOLTAGE_180:
|
||||
@ -498,6 +498,8 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
|
||||
/* Wait for 300 ~ 500 us for pin state stable */
|
||||
usleep_range(300, 500);
|
||||
|
||||
reset:
|
||||
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
|
||||
|
||||
return 0;
|
||||
|
@ -373,6 +373,7 @@ static void sdhci_init(struct sdhci_host *host, int soft)
|
||||
if (soft) {
|
||||
/* force clock reconfiguration */
|
||||
host->clock = 0;
|
||||
host->reinit_uhs = true;
|
||||
mmc->ops->set_ios(mmc, &mmc->ios);
|
||||
}
|
||||
}
|
||||
@ -2293,11 +2294,46 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
|
||||
|
||||
static bool sdhci_timing_has_preset(unsigned char timing)
|
||||
{
|
||||
switch (timing) {
|
||||
case MMC_TIMING_UHS_SDR12:
|
||||
case MMC_TIMING_UHS_SDR25:
|
||||
case MMC_TIMING_UHS_SDR50:
|
||||
case MMC_TIMING_UHS_SDR104:
|
||||
case MMC_TIMING_UHS_DDR50:
|
||||
case MMC_TIMING_MMC_DDR52:
|
||||
return true;
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
|
||||
{
|
||||
return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
|
||||
sdhci_timing_has_preset(timing);
|
||||
}
|
||||
|
||||
static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
|
||||
{
|
||||
/*
|
||||
* Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
|
||||
* Frequency. Check if preset values need to be enabled, or the Driver
|
||||
* Strength needs updating. Note, clock changes are handled separately.
|
||||
*/
|
||||
return !host->preset_enabled &&
|
||||
(sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
|
||||
}
|
||||
|
||||
void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
{
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
bool reinit_uhs = host->reinit_uhs;
|
||||
bool turning_on_clk = false;
|
||||
u8 ctrl;
|
||||
|
||||
host->reinit_uhs = false;
|
||||
|
||||
if (ios->power_mode == MMC_POWER_UNDEFINED)
|
||||
return;
|
||||
|
||||
@ -2323,6 +2359,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
sdhci_enable_preset_value(host, false);
|
||||
|
||||
if (!ios->clock || ios->clock != host->clock) {
|
||||
turning_on_clk = ios->clock && !host->clock;
|
||||
|
||||
host->ops->set_clock(host, ios->clock);
|
||||
host->clock = ios->clock;
|
||||
|
||||
@ -2349,6 +2387,17 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
|
||||
host->ops->set_bus_width(host, ios->bus_width);
|
||||
|
||||
/*
|
||||
* Special case to avoid multiple clock changes during voltage
|
||||
* switching.
|
||||
*/
|
||||
if (!reinit_uhs &&
|
||||
turning_on_clk &&
|
||||
host->timing == ios->timing &&
|
||||
host->version >= SDHCI_SPEC_300 &&
|
||||
!sdhci_presetable_values_change(host, ios))
|
||||
return;
|
||||
|
||||
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
|
||||
|
||||
if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
|
||||
@ -2392,6 +2441,7 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
}
|
||||
|
||||
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
|
||||
host->drv_type = ios->drv_type;
|
||||
} else {
|
||||
/*
|
||||
* According to SDHC Spec v3.00, if the Preset Value
|
||||
@ -2419,19 +2469,14 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
host->ops->set_uhs_signaling(host, ios->timing);
|
||||
host->timing = ios->timing;
|
||||
|
||||
if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
|
||||
((ios->timing == MMC_TIMING_UHS_SDR12) ||
|
||||
(ios->timing == MMC_TIMING_UHS_SDR25) ||
|
||||
(ios->timing == MMC_TIMING_UHS_SDR50) ||
|
||||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
|
||||
(ios->timing == MMC_TIMING_UHS_DDR50) ||
|
||||
(ios->timing == MMC_TIMING_MMC_DDR52))) {
|
||||
if (sdhci_preset_needed(host, ios->timing)) {
|
||||
u16 preset;
|
||||
|
||||
sdhci_enable_preset_value(host, true);
|
||||
preset = sdhci_get_preset_value(host);
|
||||
ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
|
||||
preset);
|
||||
host->drv_type = ios->drv_type;
|
||||
}
|
||||
|
||||
/* Re-enable SD Clock */
|
||||
@ -3768,6 +3813,7 @@ int sdhci_resume_host(struct sdhci_host *host)
|
||||
sdhci_init(host, 0);
|
||||
host->pwr = 0;
|
||||
host->clock = 0;
|
||||
host->reinit_uhs = true;
|
||||
mmc->ops->set_ios(mmc, &mmc->ios);
|
||||
} else {
|
||||
sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
|
||||
@ -3830,6 +3876,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
|
||||
/* Force clock and power re-program */
|
||||
host->pwr = 0;
|
||||
host->clock = 0;
|
||||
host->reinit_uhs = true;
|
||||
mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
|
||||
mmc->ops->set_ios(mmc, &mmc->ios);
|
||||
|
||||
|
@ -524,6 +524,8 @@ struct sdhci_host {
|
||||
|
||||
unsigned int clock; /* Current clock (MHz) */
|
||||
u8 pwr; /* Current voltage */
|
||||
u8 drv_type; /* Current UHS-I driver type */
|
||||
bool reinit_uhs; /* Force UHS-related re-initialization */
|
||||
|
||||
bool runtime_suspended; /* Host is runtime suspended */
|
||||
bool bus_on; /* Bus power prevents runtime suspend */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user