mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Linux 5.13-rc7
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmDPuyMeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGvxgH/RKvSuRPwkJ2Jcp9 VLi5kCbqtJlYLq6tB6peSJ8otKgxkcRwC0pIY4LlYIAWYboktLQ5RKp/9nB2h2FN aMZUMu6AI/lVJyFMI5MnKnJIUiUq+WXR3lSSlw68vwFLFdzqUZFNq+bqeiVvnIy1 yqA6naj24Tu/RbYffQoPvdSJcU2SLXRMxwD8HRGiU2d51RaFsOvsZvF+P5TVcsEV ZmttJeER21CaI/A809eqaFmyGrUOcZZK9roZEbMwanTZOMw18biEsLu/UH4kBX01 JC4+RlGxcWjQ5YNZgChsgoOK/CHzc6ITztTntdeDWAvwZjQFzV7pCy4/3BWne3O+ 5178yHM= =o8cN -----END PGP SIGNATURE----- Merge tag 'v5.13-rc7' into usb-next We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
cfb0276373
@ -58,6 +58,6 @@ RISC-V Linux Kernel SV39
|
||||
|
|
||||
____________________________________________________________|____________________________________________________________
|
||||
| | | |
|
||||
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules
|
||||
ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel, BPF
|
||||
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
|
||||
ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
|
||||
__________________|____________|__________________|_________|____________________________________________________________
|
||||
|
@ -181,7 +181,7 @@ SLUB Debug output
|
||||
Here is a sample of slub debug output::
|
||||
|
||||
====================================================================
|
||||
BUG kmalloc-8: Redzone overwritten
|
||||
BUG kmalloc-8: Right Redzone overwritten
|
||||
--------------------------------------------------------------------
|
||||
|
||||
INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
|
||||
@ -189,10 +189,10 @@ Here is a sample of slub debug output::
|
||||
INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
|
||||
INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
|
||||
|
||||
Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
|
||||
Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
|
||||
Redzone 0xc90f6d28: 00 cc cc cc .
|
||||
Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
|
||||
Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
|
||||
Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
|
||||
Redzone (0xc90f6d28): 00 cc cc cc .
|
||||
Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
|
||||
|
||||
[<c010523d>] dump_trace+0x63/0x1eb
|
||||
[<c01053df>] show_trace_log_lvl+0x1a/0x2f
|
||||
|
@ -16560,6 +16560,7 @@ F: drivers/misc/sgi-xp/
|
||||
|
||||
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Guvenc Gulce <guvenc@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
9
Makefile
9
Makefile
@ -2,8 +2,8 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Frozen Wasteland
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Opossums on Parade
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
@ -929,11 +929,14 @@ CC_FLAGS_LTO += -fvisibility=hidden
|
||||
# Limit inlining across translation units to reduce binary size
|
||||
KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
|
||||
|
||||
# Check for frame size exceeding threshold during prolog/epilog insertion.
|
||||
# Check for frame size exceeding threshold during prolog/epilog insertion
|
||||
# when using lld < 13.0.0.
|
||||
ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
|
||||
KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_LTO
|
||||
KBUILD_CFLAGS += -fno-lto $(CC_FLAGS_LTO)
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
struct sigcontext {
|
||||
struct user_regs_struct regs;
|
||||
struct user_regs_arcv2 v2abi;
|
||||
};
|
||||
|
||||
#endif /* _ASM_ARC_SIGCONTEXT_H */
|
||||
|
@ -61,6 +61,41 @@ struct rt_sigframe {
|
||||
unsigned int sigret_magic;
|
||||
};
|
||||
|
||||
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
struct user_regs_arcv2 v2abi;
|
||||
|
||||
v2abi.r30 = regs->r30;
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
v2abi.r58 = regs->r58;
|
||||
v2abi.r59 = regs->r59;
|
||||
#else
|
||||
v2abi.r58 = v2abi.r59 = 0;
|
||||
#endif
|
||||
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
struct user_regs_arcv2 v2abi;
|
||||
|
||||
err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
|
||||
|
||||
regs->r30 = v2abi.r30;
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
regs->r58 = v2abi.r58;
|
||||
regs->r59 = v2abi.r59;
|
||||
#endif
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
|
||||
sigset_t *set)
|
||||
@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
|
||||
|
||||
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
|
||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||
|
||||
if (is_isa_arcv2())
|
||||
err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
|
||||
|
||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
|
||||
|
||||
return err ? -EFAULT : 0;
|
||||
@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
||||
err |= __copy_from_user(&uregs.scratch,
|
||||
&(sf->uc.uc_mcontext.regs.scratch),
|
||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||
|
||||
if (is_isa_arcv2())
|
||||
err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -57,7 +57,6 @@ SECTIONS
|
||||
.init.ramfs : { INIT_RAM_FS }
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_stext = .;
|
||||
|
||||
HEAD_TEXT_SECTION
|
||||
INIT_TEXT_SECTION(L1_CACHE_BYTES)
|
||||
@ -83,6 +82,7 @@ SECTIONS
|
||||
|
||||
.text : {
|
||||
_text = .;
|
||||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
|
@ -50,7 +50,7 @@ l_yes:
|
||||
1098: nop; \
|
||||
.pushsection __jump_table, "aw"; \
|
||||
.long 1098b - ., LABEL - .; \
|
||||
FTR_ENTRY_LONG KEY; \
|
||||
FTR_ENTRY_LONG KEY - .; \
|
||||
.popsection
|
||||
#endif
|
||||
|
||||
|
@ -902,6 +902,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||
unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
|
||||
user_write_access_end();
|
||||
|
||||
/* Save the siginfo outside of the unsafe block. */
|
||||
if (copy_siginfo_to_user(&frame->info, &ksig->info))
|
||||
goto badframe;
|
||||
|
||||
/* Make sure signal handler doesn't get spurious FP exceptions */
|
||||
tsk->thread.fp_state.fpscr = 0;
|
||||
|
||||
@ -915,11 +919,6 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||
regs->nip = (unsigned long) &frame->tramp[0];
|
||||
}
|
||||
|
||||
|
||||
/* Save the siginfo outside of the unsafe block. */
|
||||
if (copy_siginfo_to_user(&frame->info, &ksig->info))
|
||||
goto badframe;
|
||||
|
||||
/* Allocate a dummy caller frame for the signal handler. */
|
||||
newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
|
||||
err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/rtas.h>
|
||||
#include <asm/kasan.h>
|
||||
#include <asm/sparsemem.h>
|
||||
#include <asm/svm.h>
|
||||
|
||||
#include <mm/mmu_decl.h>
|
||||
|
@ -2254,7 +2254,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
||||
bool use_siar = regs_use_siar(regs);
|
||||
unsigned long siar = mfspr(SPRN_SIAR);
|
||||
|
||||
if (ppmu->flags & PPMU_P10_DD1) {
|
||||
if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
|
||||
if (siar)
|
||||
return siar;
|
||||
else
|
||||
|
@ -14,6 +14,7 @@ config SOC_SIFIVE
|
||||
select CLK_SIFIVE
|
||||
select CLK_SIFIVE_PRCI
|
||||
select SIFIVE_PLIC
|
||||
select RISCV_ERRATA_ALTERNATIVE
|
||||
select ERRATA_SIFIVE
|
||||
help
|
||||
This enables support for SiFive SoC platform hardware.
|
||||
|
@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
|
||||
CC_FLAGS_FTRACE := -fpatchable-function-entry=8
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
|
||||
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
|
||||
KBUILD_CFLAGS_MODULE += -mcmodel=medany
|
||||
endif
|
||||
|
||||
|
@ -273,7 +273,7 @@
|
||||
cache-size = <2097152>;
|
||||
cache-unified;
|
||||
interrupt-parent = <&plic0>;
|
||||
interrupts = <19 20 21 22>;
|
||||
interrupts = <19 21 22 20>;
|
||||
reg = <0x0 0x2010000 0x0 0x1000>;
|
||||
};
|
||||
gpio: gpio@10060000 {
|
||||
|
@ -30,9 +30,8 @@
|
||||
|
||||
#define BPF_JIT_REGION_SIZE (SZ_128M)
|
||||
#ifdef CONFIG_64BIT
|
||||
/* KASLR should leave at least 128MB for BPF after the kernel */
|
||||
#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end)
|
||||
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
|
||||
#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
|
||||
#define BPF_JIT_REGION_END (MODULES_END)
|
||||
#else
|
||||
#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
|
||||
#define BPF_JIT_REGION_END (VMALLOC_END)
|
||||
|
@ -169,7 +169,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
|
||||
|
||||
void __init kasan_init(void)
|
||||
{
|
||||
phys_addr_t _start, _end;
|
||||
phys_addr_t p_start, p_end;
|
||||
u64 i;
|
||||
|
||||
/*
|
||||
@ -189,9 +189,9 @@ void __init kasan_init(void)
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||
|
||||
/* Populate the linear mapping */
|
||||
for_each_mem_range(i, &_start, &_end) {
|
||||
void *start = (void *)__va(_start);
|
||||
void *end = (void *)__va(_end);
|
||||
for_each_mem_range(i, &p_start, &p_end) {
|
||||
void *start = (void *)__va(p_start);
|
||||
void *end = (void *)__va(p_end);
|
||||
|
||||
if (start >= end)
|
||||
break;
|
||||
@ -201,7 +201,7 @@ void __init kasan_init(void)
|
||||
|
||||
/* Populate kernel, BPF, modules mapping */
|
||||
kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
|
||||
kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END));
|
||||
kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
set_pte(&kasan_early_shadow_pte[i],
|
||||
|
@ -651,9 +651,9 @@ ENDPROC(stack_overflow)
|
||||
.Lcleanup_sie_mcck:
|
||||
larl %r13,.Lsie_entry
|
||||
slgr %r9,%r13
|
||||
larl %r13,.Lsie_skip
|
||||
lghi %r13,.Lsie_skip - .Lsie_entry
|
||||
clgr %r9,%r13
|
||||
jh .Lcleanup_sie_int
|
||||
jhe .Lcleanup_sie_int
|
||||
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
|
||||
.Lcleanup_sie_int:
|
||||
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
|
@ -578,10 +578,17 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
|
||||
* PKRU state is switched eagerly because it needs to be valid before we
|
||||
* return to userland e.g. for a copy_to_user() operation.
|
||||
*/
|
||||
if (current->mm) {
|
||||
if (!(current->flags & PF_KTHREAD)) {
|
||||
/*
|
||||
* If the PKRU bit in xsave.header.xfeatures is not set,
|
||||
* then the PKRU component was in init state, which means
|
||||
* XRSTOR will set PKRU to 0. If the bit is not set then
|
||||
* get_xsave_addr() will return NULL because the PKRU value
|
||||
* in memory is not valid. This means pkru_val has to be
|
||||
* set to 0 and not to init_pkru_value.
|
||||
*/
|
||||
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
|
||||
if (pk)
|
||||
pkru_val = pk->pkru;
|
||||
pkru_val = pk ? pk->pkru : 0;
|
||||
}
|
||||
__write_pkru(pkru_val);
|
||||
}
|
||||
|
@ -212,6 +212,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file)
|
||||
list_splice_tail(&secs_pages, &zombie_secs_pages);
|
||||
mutex_unlock(&zombie_secs_pages_lock);
|
||||
|
||||
xa_destroy(&vepc->page_array);
|
||||
kfree(vepc);
|
||||
|
||||
return 0;
|
||||
|
@ -307,13 +307,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!access_ok(buf, size))
|
||||
return -EACCES;
|
||||
if (!access_ok(buf, size)) {
|
||||
ret = -EACCES;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_FPU))
|
||||
return fpregs_soft_set(current, NULL,
|
||||
0, sizeof(struct user_i387_ia32_struct),
|
||||
NULL, buf) != 0;
|
||||
if (!static_cpu_has(X86_FEATURE_FPU)) {
|
||||
ret = fpregs_soft_set(current, NULL, 0,
|
||||
sizeof(struct user_i387_ia32_struct),
|
||||
NULL, buf);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (use_xsave()) {
|
||||
struct _fpx_sw_bytes fx_sw_user;
|
||||
@ -369,6 +373,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
fpregs_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The above did an FPU restore operation, restricted to
|
||||
* the user portion of the registers, and failed, but the
|
||||
* microcode might have modified the FPU registers
|
||||
* nevertheless.
|
||||
*
|
||||
* If the FPU registers do not belong to current, then
|
||||
* invalidate the FPU register state otherwise the task might
|
||||
* preempt current and return to user space with corrupted
|
||||
* FPU registers.
|
||||
*
|
||||
* In case current owns the FPU registers then no further
|
||||
* action is required. The fixup below will handle it
|
||||
* correctly.
|
||||
*/
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
__cpu_invalidate_fpregs_state();
|
||||
|
||||
fpregs_unlock();
|
||||
} else {
|
||||
/*
|
||||
@ -377,7 +400,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
*/
|
||||
ret = __copy_from_user(&env, buf, sizeof(env));
|
||||
if (ret)
|
||||
goto err_out;
|
||||
goto out;
|
||||
envp = &env;
|
||||
}
|
||||
|
||||
@ -405,16 +428,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
if (use_xsave() && !fx_only) {
|
||||
u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
|
||||
|
||||
if (using_compacted_format()) {
|
||||
ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
|
||||
} else {
|
||||
ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
|
||||
|
||||
if (!ret && state_size > offsetof(struct xregs_state, header))
|
||||
ret = validate_user_xstate_header(&fpu->state.xsave.header);
|
||||
}
|
||||
ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
goto out;
|
||||
|
||||
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
|
||||
fx_only);
|
||||
@ -434,7 +450,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto err_out;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
|
||||
@ -452,7 +468,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
} else {
|
||||
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
goto out;
|
||||
|
||||
fpregs_lock();
|
||||
ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
|
||||
@ -463,7 +479,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
fpregs_deactivate(fpu);
|
||||
fpregs_unlock();
|
||||
|
||||
err_out:
|
||||
out:
|
||||
if (ret)
|
||||
fpu__clear_user_states(fpu);
|
||||
return ret;
|
||||
|
@ -655,6 +655,7 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
|
||||
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
|
||||
entry->ecx = F(RDPID);
|
||||
++array->nent;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
|
||||
if (!apic_x2apic_mode(apic))
|
||||
valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
|
||||
|
||||
if (alignment + len > 4)
|
||||
return 1;
|
||||
|
||||
if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
|
||||
return 1;
|
||||
|
||||
|
@ -4739,9 +4739,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
|
||||
context->inject_page_fault = kvm_inject_page_fault;
|
||||
}
|
||||
|
||||
static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
|
||||
|
||||
/*
|
||||
* Nested MMUs are used only for walking L2's gva->gpa, they never have
|
||||
* shadow pages of their own and so "direct" has no meaning. Set it
|
||||
* to "true" to try to detect bogus usage of the nested MMU.
|
||||
*/
|
||||
role.base.direct = true;
|
||||
|
||||
if (!is_paging(vcpu))
|
||||
role.base.level = 0;
|
||||
else if (is_long_mode(vcpu))
|
||||
role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
|
||||
PT64_ROOT_4LEVEL;
|
||||
else if (is_pae(vcpu))
|
||||
role.base.level = PT32E_ROOT_LEVEL;
|
||||
else
|
||||
role.base.level = PT32_ROOT_LEVEL;
|
||||
|
||||
return role;
|
||||
}
|
||||
|
||||
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
|
||||
union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
|
||||
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
|
||||
|
||||
if (new_role.as_u64 == g_context->mmu_role.as_u64)
|
||||
|
@ -221,7 +221,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
|
||||
return &avic_physical_id_table[index];
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Note:
|
||||
* AVIC hardware walks the nested page table to check permissions,
|
||||
* but does not use the SPA address specified in the leaf page
|
||||
@ -764,7 +764,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Note:
|
||||
* The HW cannot support posting multicast/broadcast
|
||||
* interrupts to a vCPU. So, we still use legacy interrupt
|
||||
@ -1005,7 +1005,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* This function is called during VCPU halt/unhalt.
|
||||
*/
|
||||
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
|
||||
|
@ -199,9 +199,19 @@ static void sev_asid_free(struct kvm_sev_info *sev)
|
||||
sev->misc_cg = NULL;
|
||||
}
|
||||
|
||||
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
|
||||
static void sev_decommission(unsigned int handle)
|
||||
{
|
||||
struct sev_data_decommission decommission;
|
||||
|
||||
if (!handle)
|
||||
return;
|
||||
|
||||
decommission.handle = handle;
|
||||
sev_guest_decommission(&decommission, NULL);
|
||||
}
|
||||
|
||||
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
|
||||
{
|
||||
struct sev_data_deactivate deactivate;
|
||||
|
||||
if (!handle)
|
||||
@ -214,9 +224,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
|
||||
sev_guest_deactivate(&deactivate, NULL);
|
||||
up_read(&sev_deactivate_lock);
|
||||
|
||||
/* decommission handle */
|
||||
decommission.handle = handle;
|
||||
sev_guest_decommission(&decommission, NULL);
|
||||
sev_decommission(handle);
|
||||
}
|
||||
|
||||
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
@ -341,8 +349,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
|
||||
/* Bind ASID to this guest */
|
||||
ret = sev_bind_asid(kvm, start.handle, error);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
sev_decommission(start.handle);
|
||||
goto e_free_session;
|
||||
}
|
||||
|
||||
/* return handle to userspace */
|
||||
params.handle = start.handle;
|
||||
|
@ -6247,6 +6247,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
|
||||
switch (kvm_get_apic_mode(vcpu)) {
|
||||
case LAPIC_MODE_INVALID:
|
||||
WARN_ONCE(true, "Invalid local APIC state");
|
||||
break;
|
||||
case LAPIC_MODE_DISABLED:
|
||||
break;
|
||||
case LAPIC_MODE_XAPIC:
|
||||
|
@ -7106,7 +7106,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
|
||||
{
|
||||
emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
|
||||
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
||||
|
||||
vcpu->arch.hflags = emul_flags;
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
}
|
||||
|
||||
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
|
||||
@ -8258,6 +8261,7 @@ void kvm_arch_exit(void)
|
||||
kvm_x86_ops.hardware_enable = NULL;
|
||||
kvm_mmu_module_exit();
|
||||
free_percpu(user_return_msrs);
|
||||
kmem_cache_destroy(x86_emulator_cache);
|
||||
kmem_cache_destroy(x86_fpu_cache);
|
||||
#ifdef CONFIG_KVM_XEN
|
||||
static_key_deferred_flush(&kvm_xen_enabled);
|
||||
|
@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
|
||||
if (!IS_ENABLED(CONFIG_EFI))
|
||||
return;
|
||||
|
||||
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
|
||||
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
|
||||
(efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
|
||||
efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
|
||||
desc->flags |= IORES_MAP_ENCRYPTED;
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
||||
|
||||
/* make sure all non-reserved blocks are inside the limits */
|
||||
bi->start = max(bi->start, low);
|
||||
bi->end = min(bi->end, high);
|
||||
|
||||
/* preserve info for non-RAM areas above 'max_pfn': */
|
||||
if (bi->end > high) {
|
||||
numa_add_memblk_to(bi->nid, high, bi->end,
|
||||
&numa_reserved_meminfo);
|
||||
bi->end = high;
|
||||
}
|
||||
|
||||
/* and there's no empty block */
|
||||
if (bi->start >= bi->end)
|
||||
|
@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
|
||||
|
||||
#define RS690_LOWER_TOP_OF_DRAM2 0x30
|
||||
#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
|
||||
#define RS690_UPPER_TOP_OF_DRAM2 0x31
|
||||
#define RS690_HTIU_NB_INDEX 0xA8
|
||||
#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
|
||||
#define RS690_HTIU_NB_DATA 0xAC
|
||||
|
||||
/*
|
||||
* Some BIOS implementations support RAM above 4GB, but do not configure the
|
||||
* PCI host to respond to bus master accesses for these addresses. These
|
||||
* implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
|
||||
* works as expected for addresses below 4GB.
|
||||
*
|
||||
* Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
|
||||
* https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
|
||||
*/
|
||||
static void rs690_fix_64bit_dma(struct pci_dev *pdev)
|
||||
{
|
||||
u32 val = 0;
|
||||
phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
|
||||
|
||||
if (top_of_dram <= (1ULL << 32))
|
||||
return;
|
||||
|
||||
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
|
||||
RS690_LOWER_TOP_OF_DRAM2);
|
||||
pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
|
||||
|
||||
if (val)
|
||||
return;
|
||||
|
||||
pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
|
||||
|
||||
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
|
||||
RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
|
||||
pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
|
||||
|
||||
pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
|
||||
RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
|
||||
pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
|
||||
top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
|
||||
|
||||
#endif
|
||||
|
@ -19,16 +19,6 @@ config ACPI_CPPC_CPUFREQ
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ACPI_CPPC_CPUFREQ_FIE
|
||||
bool "Frequency Invariance support for CPPC cpufreq driver"
|
||||
depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
|
||||
default y
|
||||
help
|
||||
This extends frequency invariance support in the CPPC cpufreq driver,
|
||||
by using CPPC delivered and reference performance counters.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
|
||||
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
|
||||
depends on ARCH_SUNXI
|
||||
|
@ -10,18 +10,14 @@
|
||||
|
||||
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
|
||||
|
||||
#include <linux/arch_topology.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
|
||||
|
||||
/* Frequency invariance support */
|
||||
struct cppc_freq_invariance {
|
||||
int cpu;
|
||||
struct irq_work irq_work;
|
||||
struct kthread_work work;
|
||||
struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
|
||||
struct cppc_cpudata *cpu_data;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
|
||||
static struct kthread_worker *kworker_fie;
|
||||
static bool fie_disabled;
|
||||
|
||||
static struct cpufreq_driver cppc_cpufreq_driver;
|
||||
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
|
||||
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t0,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t1);
|
||||
|
||||
/**
|
||||
* cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
|
||||
* @work: The work item.
|
||||
*
|
||||
* The CPPC driver register itself with the topology core to provide its own
|
||||
* implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
|
||||
* gets called by the scheduler on every tick.
|
||||
*
|
||||
* Note that the arch specific counters have higher priority than CPPC counters,
|
||||
* if available, though the CPPC driver doesn't need to have any special
|
||||
* handling for that.
|
||||
*
|
||||
* On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
|
||||
* reach here from hard-irq context), which then schedules a normal work item
|
||||
* and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
|
||||
* based on the counter updates since the last tick.
|
||||
*/
|
||||
static void cppc_scale_freq_workfn(struct kthread_work *work)
|
||||
{
|
||||
struct cppc_freq_invariance *cppc_fi;
|
||||
struct cppc_perf_fb_ctrs fb_ctrs = {0};
|
||||
struct cppc_cpudata *cpu_data;
|
||||
unsigned long local_freq_scale;
|
||||
u64 perf;
|
||||
|
||||
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
|
||||
cpu_data = cppc_fi->cpu_data;
|
||||
|
||||
if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
|
||||
pr_warn("%s: failed to read perf counters\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
|
||||
perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs,
|
||||
fb_ctrs);
|
||||
|
||||
perf <<= SCHED_CAPACITY_SHIFT;
|
||||
local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
|
||||
if (WARN_ON(local_freq_scale > 1024))
|
||||
local_freq_scale = 1024;
|
||||
|
||||
per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
|
||||
}
|
||||
|
||||
static void cppc_irq_work(struct irq_work *irq_work)
|
||||
{
|
||||
struct cppc_freq_invariance *cppc_fi;
|
||||
|
||||
cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
|
||||
kthread_queue_work(kworker_fie, &cppc_fi->work);
|
||||
}
|
||||
|
||||
static void cppc_scale_freq_tick(void)
|
||||
{
|
||||
struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
|
||||
|
||||
/*
|
||||
* cppc_get_perf_ctrs() can potentially sleep, call that from the right
|
||||
* context.
|
||||
*/
|
||||
irq_work_queue(&cppc_fi->irq_work);
|
||||
}
|
||||
|
||||
static struct scale_freq_data cppc_sftd = {
|
||||
.source = SCALE_FREQ_SOURCE_CPPC,
|
||||
.set_freq_scale = cppc_scale_freq_tick,
|
||||
};
|
||||
|
||||
static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
|
||||
struct cppc_cpudata *cpu_data)
|
||||
{
|
||||
struct cppc_perf_fb_ctrs fb_ctrs = {0};
|
||||
struct cppc_freq_invariance *cppc_fi;
|
||||
int i, ret;
|
||||
|
||||
if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
|
||||
return;
|
||||
|
||||
if (fie_disabled)
|
||||
return;
|
||||
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
cppc_fi = &per_cpu(cppc_freq_inv, i);
|
||||
cppc_fi->cpu = i;
|
||||
cppc_fi->cpu_data = cpu_data;
|
||||
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
|
||||
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
|
||||
|
||||
ret = cppc_get_perf_ctrs(i, &fb_ctrs);
|
||||
if (ret) {
|
||||
pr_warn("%s: failed to read perf counters: %d\n",
|
||||
__func__, ret);
|
||||
fie_disabled = true;
|
||||
} else {
|
||||
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __init cppc_freq_invariance_init(void)
|
||||
{
|
||||
struct sched_attr attr = {
|
||||
.size = sizeof(struct sched_attr),
|
||||
.sched_policy = SCHED_DEADLINE,
|
||||
.sched_nice = 0,
|
||||
.sched_priority = 0,
|
||||
/*
|
||||
* Fake (unused) bandwidth; workaround to "fix"
|
||||
* priority inheritance.
|
||||
*/
|
||||
.sched_runtime = 1000000,
|
||||
.sched_deadline = 10000000,
|
||||
.sched_period = 10000000,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
|
||||
return;
|
||||
|
||||
if (fie_disabled)
|
||||
return;
|
||||
|
||||
kworker_fie = kthread_create_worker(0, "cppc_fie");
|
||||
if (IS_ERR(kworker_fie))
|
||||
return;
|
||||
|
||||
ret = sched_setattr_nocheck(kworker_fie->task, &attr);
|
||||
if (ret) {
|
||||
pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
|
||||
ret);
|
||||
kthread_destroy_worker(kworker_fie);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Register for freq-invariance */
|
||||
topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
|
||||
}
|
||||
|
||||
static void cppc_freq_invariance_exit(void)
|
||||
{
|
||||
struct cppc_freq_invariance *cppc_fi;
|
||||
int i;
|
||||
|
||||
if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
|
||||
return;
|
||||
|
||||
if (fie_disabled)
|
||||
return;
|
||||
|
||||
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
cppc_fi = &per_cpu(cppc_freq_inv, i);
|
||||
irq_work_sync(&cppc_fi->irq_work);
|
||||
}
|
||||
|
||||
kthread_destroy_worker(kworker_fie);
|
||||
kworker_fie = NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void
|
||||
cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
|
||||
struct cppc_cpudata *cpu_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cppc_freq_invariance_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cppc_freq_invariance_exit(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
|
||||
|
||||
/* Callback function used to retrieve the max frequency from DMI */
|
||||
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
|
||||
{
|
||||
@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
|
||||
|
||||
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
|
||||
caps->highest_perf, cpu, ret);
|
||||
} else {
|
||||
cppc_freq_invariance_policy_init(policy, cpu_data);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
|
||||
return (u32)t1 - (u32)t0;
|
||||
}
|
||||
|
||||
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t0,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t1)
|
||||
static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t0,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t1)
|
||||
{
|
||||
u64 delta_reference, delta_delivered;
|
||||
u64 reference_perf;
|
||||
u64 reference_perf, delivered_perf;
|
||||
|
||||
reference_perf = fb_ctrs_t0.reference_perf;
|
||||
|
||||
@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
|
||||
delta_delivered = get_delta(fb_ctrs_t1.delivered,
|
||||
fb_ctrs_t0.delivered);
|
||||
|
||||
/* Check to avoid divide-by zero and invalid delivered_perf */
|
||||
if (!delta_reference || !delta_delivered)
|
||||
return cpu_data->perf_ctrls.desired_perf;
|
||||
|
||||
return (reference_perf * delta_delivered) / delta_reference;
|
||||
}
|
||||
|
||||
static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t0,
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t1)
|
||||
{
|
||||
u64 delivered_perf;
|
||||
|
||||
delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0,
|
||||
fb_ctrs_t1);
|
||||
/* Check to avoid divide-by zero */
|
||||
if (delta_reference || delta_delivered)
|
||||
delivered_perf = (reference_perf * delta_delivered) /
|
||||
delta_reference;
|
||||
else
|
||||
delivered_perf = cpu_data->perf_ctrls.desired_perf;
|
||||
|
||||
return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
|
||||
}
|
||||
@ -718,8 +504,6 @@ static void cppc_check_hisi_workaround(void)
|
||||
|
||||
static int __init cppc_cpufreq_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((acpi_disabled) || !acpi_cpc_valid())
|
||||
return -ENODEV;
|
||||
|
||||
@ -727,11 +511,7 @@ static int __init cppc_cpufreq_init(void)
|
||||
|
||||
cppc_check_hisi_workaround();
|
||||
|
||||
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
|
||||
if (!ret)
|
||||
cppc_freq_invariance_init();
|
||||
|
||||
return ret;
|
||||
return cpufreq_register_driver(&cppc_cpufreq_driver);
|
||||
}
|
||||
|
||||
static inline void free_cpu_data(void)
|
||||
@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
|
||||
|
||||
static void __exit cppc_cpufreq_exit(void)
|
||||
{
|
||||
cppc_freq_invariance_exit();
|
||||
cpufreq_unregister_driver(&cppc_cpufreq_driver);
|
||||
|
||||
free_cpu_data();
|
||||
|
@ -59,6 +59,7 @@ config DMA_OF
|
||||
#devices
|
||||
config ALTERA_MSGDMA
|
||||
tristate "Altera / Intel mSGDMA Engine"
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for Altera / Intel mSGDMA controller.
|
||||
@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
|
||||
|
||||
config XILINX_ZYNQMP_DPDMA
|
||||
tristate "Xilinx DPDMA Engine"
|
||||
depends on HAS_IOMEM && OF
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
|
||||
}
|
||||
|
||||
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
|
||||
err = -EINVAL;
|
||||
dev_err(dev, "DPDMAI major version mismatch\n"
|
||||
"Found %u.%u, supported version is %u.%u\n",
|
||||
priv->dpdmai_attr.version.major,
|
||||
@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
|
||||
}
|
||||
|
||||
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
|
||||
err = -EINVAL;
|
||||
dev_err(dev, "DPDMAI minor version mismatch\n"
|
||||
"Found %u.%u, supported version is %u.%u\n",
|
||||
priv->dpdmai_attr.version.major,
|
||||
@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
|
||||
ppriv->store =
|
||||
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
|
||||
if (!ppriv->store) {
|
||||
err = -ENOMEM;
|
||||
dev_err(dev, "dpaa2_io_store_create() failed\n");
|
||||
goto err_store;
|
||||
}
|
||||
|
@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
||||
pasid = iommu_sva_get_pasid(sva);
|
||||
if (pasid == IOMMU_PASID_INVALID) {
|
||||
iommu_sva_unbind_device(sva);
|
||||
rc = -EINVAL;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
|
@ -168,6 +168,32 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
|
||||
{
|
||||
struct pci_dev *pdev = idxd->pdev;
|
||||
struct idxd_irq_entry *irq_entry;
|
||||
int i, msixcnt;
|
||||
|
||||
msixcnt = pci_msix_vec_count(pdev);
|
||||
if (msixcnt <= 0)
|
||||
return;
|
||||
|
||||
irq_entry = &idxd->irq_entries[0];
|
||||
free_irq(irq_entry->vector, irq_entry);
|
||||
|
||||
for (i = 1; i < msixcnt; i++) {
|
||||
|
||||
irq_entry = &idxd->irq_entries[i];
|
||||
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
|
||||
idxd_device_release_int_handle(idxd, idxd->int_handles[i],
|
||||
IDXD_IRQ_MSIX);
|
||||
free_irq(irq_entry->vector, irq_entry);
|
||||
}
|
||||
|
||||
idxd_mask_error_interrupts(idxd);
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
@ -242,6 +268,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
|
||||
engine->idxd = idxd;
|
||||
device_initialize(&engine->conf_dev);
|
||||
engine->conf_dev.parent = &idxd->conf_dev;
|
||||
engine->conf_dev.bus = &dsa_bus_type;
|
||||
engine->conf_dev.type = &idxd_engine_device_type;
|
||||
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
|
||||
if (rc < 0) {
|
||||
@ -303,6 +330,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_cleanup_internals(struct idxd_device *idxd)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < idxd->max_groups; i++)
|
||||
put_device(&idxd->groups[i]->conf_dev);
|
||||
for (i = 0; i < idxd->max_engines; i++)
|
||||
put_device(&idxd->engines[i]->conf_dev);
|
||||
for (i = 0; i < idxd->max_wqs; i++)
|
||||
put_device(&idxd->wqs[i]->conf_dev);
|
||||
destroy_workqueue(idxd->wq);
|
||||
}
|
||||
|
||||
static int idxd_setup_internals(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
@ -531,12 +571,12 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||
dev_dbg(dev, "Loading RO device config\n");
|
||||
rc = idxd_device_load_config(idxd);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
goto err_config;
|
||||
}
|
||||
|
||||
rc = idxd_setup_interrupts(idxd);
|
||||
if (rc)
|
||||
goto err;
|
||||
goto err_config;
|
||||
|
||||
dev_dbg(dev, "IDXD interrupt setup complete.\n");
|
||||
|
||||
@ -549,6 +589,8 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
|
||||
return 0;
|
||||
|
||||
err_config:
|
||||
idxd_cleanup_internals(idxd);
|
||||
err:
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
@ -556,6 +598,18 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_cleanup(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
|
||||
perfmon_pmu_remove(idxd);
|
||||
idxd_cleanup_interrupts(idxd);
|
||||
idxd_cleanup_internals(idxd);
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
}
|
||||
|
||||
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -608,7 +662,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
rc = idxd_register_devices(idxd);
|
||||
if (rc) {
|
||||
dev_err(dev, "IDXD sysfs setup failed\n");
|
||||
goto err;
|
||||
goto err_dev_register;
|
||||
}
|
||||
|
||||
idxd->state = IDXD_DEV_CONF_READY;
|
||||
@ -618,6 +672,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
return 0;
|
||||
|
||||
err_dev_register:
|
||||
idxd_cleanup(idxd);
|
||||
err:
|
||||
pci_iounmap(pdev, idxd->reg_base);
|
||||
err_iomap:
|
||||
@ -787,6 +843,7 @@ module_init(idxd_init_module);
|
||||
|
||||
static void __exit idxd_exit_module(void)
|
||||
{
|
||||
idxd_unregister_driver();
|
||||
pci_unregister_driver(&idxd_pci_driver);
|
||||
idxd_cdev_remove();
|
||||
idxd_unregister_bus_type();
|
||||
|
@ -230,7 +230,7 @@ out:
|
||||
}
|
||||
|
||||
/**
|
||||
* ipu_irq_map() - map an IPU interrupt source to an IRQ number
|
||||
* ipu_irq_unmap() - unmap an IPU interrupt source
|
||||
* @source: interrupt source bit position (see ipu_irq_map())
|
||||
* @return: 0 or negative error code
|
||||
*/
|
||||
|
@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
|
||||
|
||||
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct dma_chan *chan = vd->tx.chan;
|
||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
||||
|
||||
kfree(c->desc);
|
||||
kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
|
||||
@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
|
||||
|
||||
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
|
||||
{
|
||||
struct mtk_uart_apdma_desc *d = c->desc;
|
||||
|
||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
|
||||
|
||||
list_del(&d->vd.node);
|
||||
vchan_cookie_complete(&d->vd);
|
||||
}
|
||||
|
||||
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
|
||||
@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
|
||||
|
||||
c->rx_status = d->avail_len - cnt;
|
||||
mtk_uart_apdma_write(c, VFF_RPT, wg);
|
||||
}
|
||||
|
||||
list_del(&d->vd.node);
|
||||
vchan_cookie_complete(&d->vd);
|
||||
static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
|
||||
{
|
||||
struct mtk_uart_apdma_desc *d = c->desc;
|
||||
|
||||
if (d) {
|
||||
list_del(&d->vd.node);
|
||||
vchan_cookie_complete(&d->vd);
|
||||
c->desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
|
||||
@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
|
||||
mtk_uart_apdma_rx_handler(c);
|
||||
else if (c->dir == DMA_MEM_TO_DEV)
|
||||
mtk_uart_apdma_tx_handler(c);
|
||||
mtk_uart_apdma_chan_complete_handler(c);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
|
||||
return NULL;
|
||||
|
||||
/* Now allocate and setup the descriptor */
|
||||
d = kzalloc(sizeof(*d), GFP_ATOMIC);
|
||||
d = kzalloc(sizeof(*d), GFP_NOWAIT);
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
||||
@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (vchan_issue_pending(&c->vc)) {
|
||||
if (vchan_issue_pending(&c->vc) && !c->desc) {
|
||||
vd = vchan_next_desc(&c->vc);
|
||||
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
|
||||
|
||||
|
@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
||||
for (i = 0; i < len / period_len; i++) {
|
||||
desc = pl330_get_desc(pch);
|
||||
if (!desc) {
|
||||
unsigned long iflags;
|
||||
|
||||
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
|
||||
__func__, __LINE__);
|
||||
|
||||
if (!first)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&pl330->pool_lock, flags);
|
||||
spin_lock_irqsave(&pl330->pool_lock, iflags);
|
||||
|
||||
while (!list_empty(&first->node)) {
|
||||
desc = list_entry(first->node.next,
|
||||
@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
||||
|
||||
list_move_tail(&first->node, &pl330->desc_pool);
|
||||
|
||||
spin_unlock_irqrestore(&pl330->pool_lock, flags);
|
||||
spin_unlock_irqrestore(&pl330->pool_lock, iflags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ config QCOM_GPI_DMA
|
||||
|
||||
config QCOM_HIDMA_MGMT
|
||||
tristate "Qualcomm Technologies HIDMA Management support"
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for the Qualcomm Technologies HIDMA Management.
|
||||
|
@ -1,5 +1,6 @@
|
||||
config SF_PDMA
|
||||
tristate "Sifive PDMA controller driver"
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||
|
||||
/* Enable runtime PM and initialize the device. */
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
||||
return ret;
|
||||
|
@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
|
||||
|
||||
kfree(base->lcla_pool.base_unaligned);
|
||||
|
||||
if (base->lcpa_base)
|
||||
iounmap(base->lcpa_base);
|
||||
|
||||
if (base->phy_lcpa)
|
||||
release_mem_region(base->phy_lcpa,
|
||||
base->lcpa_size);
|
||||
|
@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(dmadev->ddev.dev);
|
||||
ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
|
||||
u32 ccr, id;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -113,6 +113,7 @@
|
||||
#define XILINX_DPDMA_CH_VDO 0x020
|
||||
#define XILINX_DPDMA_CH_PYLD_SZ 0x024
|
||||
#define XILINX_DPDMA_CH_DESC_ID 0x028
|
||||
#define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
|
||||
|
||||
/* DPDMA descriptor fields */
|
||||
#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
|
||||
@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
||||
* will be used, but it should be enough.
|
||||
*/
|
||||
list_for_each_entry(sw_desc, &desc->descriptors, node)
|
||||
sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
|
||||
sw_desc->hw.desc_id = desc->vdesc.tx.cookie
|
||||
& XILINX_DPDMA_CH_DESC_ID_MASK;
|
||||
|
||||
sw_desc = list_first_entry(&desc->descriptors,
|
||||
struct xilinx_dpdma_sw_desc, node);
|
||||
@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
|
||||
if (!chan->running || !pending)
|
||||
goto out;
|
||||
|
||||
desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
|
||||
desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
|
||||
& XILINX_DPDMA_CH_DESC_ID_MASK;
|
||||
|
||||
/* If the retrigger raced with vsync, retry at the next frame. */
|
||||
sw_desc = list_first_entry(&pending->descriptors,
|
||||
@ -1459,7 +1462,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
|
||||
*/
|
||||
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
|
||||
{
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
|
||||
}
|
||||
|
||||
@ -1596,6 +1599,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
|
||||
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
|
||||
}
|
||||
|
||||
static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
|
||||
{
|
||||
unsigned int i;
|
||||
void __iomem *reg;
|
||||
|
||||
/* Disable all interrupts */
|
||||
xilinx_dpdma_disable_irq(xdev);
|
||||
|
||||
/* Stop all channels */
|
||||
for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
|
||||
reg = xdev->reg + XILINX_DPDMA_CH_BASE
|
||||
+ XILINX_DPDMA_CH_OFFSET * i;
|
||||
dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
|
||||
}
|
||||
|
||||
/* Clear the interrupt status registers */
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
|
||||
}
|
||||
|
||||
static int xilinx_dpdma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct xilinx_dpdma_device *xdev;
|
||||
@ -1622,6 +1645,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(xdev->reg))
|
||||
return PTR_ERR(xdev->reg);
|
||||
|
||||
dpdma_hw_init(xdev);
|
||||
|
||||
xdev->irq = platform_get_irq(pdev, 0);
|
||||
if (xdev->irq < 0) {
|
||||
dev_err(xdev->dev, "failed to get platform irq\n");
|
||||
|
@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
|
||||
struct zynqmp_dma_desc_sw *desc;
|
||||
int i, ret;
|
||||
|
||||
ret = pm_runtime_get_sync(chan->dev);
|
||||
ret = pm_runtime_resume_and_get(chan->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -6871,8 +6871,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
|
||||
if (ring->use_doorbell) {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
(adev->doorbell_index.kiq * 2) << 2);
|
||||
/* If GC has entered CGPG, ringing doorbell > first page doesn't
|
||||
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
|
||||
* this issue.
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
(adev->doorbell_index.userqueue_end * 2) << 2);
|
||||
(adev->doorbell.size - 4));
|
||||
}
|
||||
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
|
||||
|
@ -3673,8 +3673,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
|
||||
if (ring->use_doorbell) {
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
(adev->doorbell_index.kiq * 2) << 2);
|
||||
/* If GC has entered CGPG, ringing doorbell > first page doesn't
|
||||
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
|
||||
* this issue.
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
(adev->doorbell_index.userqueue_end * 2) << 2);
|
||||
(adev->doorbell.size - 4));
|
||||
}
|
||||
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
|
||||
|
@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
|
||||
nmi_exit();
|
||||
}
|
||||
|
||||
static u32 do_read_iar(struct pt_regs *regs)
|
||||
{
|
||||
u32 iar;
|
||||
|
||||
if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
|
||||
u64 pmr;
|
||||
|
||||
/*
|
||||
* We were in a context with IRQs disabled. However, the
|
||||
* entry code has set PMR to a value that allows any
|
||||
* interrupt to be acknowledged, and not just NMIs. This can
|
||||
* lead to surprising effects if the NMI has been retired in
|
||||
* the meantime, and that there is an IRQ pending. The IRQ
|
||||
* would then be taken in NMI context, something that nobody
|
||||
* wants to debug twice.
|
||||
*
|
||||
* Until we sort this, drop PMR again to a level that will
|
||||
* actually only allow NMIs before reading IAR, and then
|
||||
* restore it to what it was.
|
||||
*/
|
||||
pmr = gic_read_pmr();
|
||||
gic_pmr_mask_irqs();
|
||||
isb();
|
||||
|
||||
iar = gic_read_iar();
|
||||
|
||||
gic_write_pmr(pmr);
|
||||
} else {
|
||||
iar = gic_read_iar();
|
||||
}
|
||||
|
||||
return iar;
|
||||
}
|
||||
|
||||
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqnr;
|
||||
|
||||
irqnr = gic_read_iar();
|
||||
irqnr = do_read_iar(regs);
|
||||
|
||||
/* Check for special IDs first */
|
||||
if ((irqnr >= 1020 && irqnr <= 1023))
|
||||
|
@ -350,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
|
||||
rtnl_lock();
|
||||
result = register_netdevice(dev);
|
||||
if (result) {
|
||||
tty_kref_put(tty);
|
||||
rtnl_unlock();
|
||||
free_netdev(dev);
|
||||
return -ENODEV;
|
||||
|
@ -82,6 +82,8 @@ struct mcba_priv {
|
||||
bool can_ka_first_pass;
|
||||
bool can_speed_check;
|
||||
atomic_t free_ctx_cnt;
|
||||
void *rxbuf[MCBA_MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
|
||||
};
|
||||
|
||||
/* CAN frame */
|
||||
@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
GFP_KERNEL, &urb->transfer_dma);
|
||||
GFP_KERNEL, &buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
if (err) {
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
buf, urb->transfer_dma);
|
||||
buf, buf_dma);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
}
|
||||
|
||||
priv->rxbuf[i] = buf;
|
||||
priv->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
|
||||
|
||||
static void mcba_urb_unlink(struct mcba_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
usb_kill_anchored_urbs(&priv->rx_submitted);
|
||||
|
||||
for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
|
||||
priv->rxbuf[i], priv->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
}
|
||||
|
||||
|
@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
|
||||
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
|
||||
struct ena_tx_buffer *tx_info,
|
||||
struct xdp_frame *xdpf,
|
||||
void **push_hdr,
|
||||
u32 *push_len)
|
||||
struct ena_com_tx_ctx *ena_tx_ctx)
|
||||
{
|
||||
struct ena_adapter *adapter = xdp_ring->adapter;
|
||||
struct ena_com_buf *ena_buf;
|
||||
dma_addr_t dma = 0;
|
||||
int push_len = 0;
|
||||
dma_addr_t dma;
|
||||
void *data;
|
||||
u32 size;
|
||||
|
||||
tx_info->xdpf = xdpf;
|
||||
data = tx_info->xdpf->data;
|
||||
size = tx_info->xdpf->len;
|
||||
ena_buf = tx_info->bufs;
|
||||
|
||||
/* llq push buffer */
|
||||
*push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
|
||||
*push_hdr = tx_info->xdpf->data;
|
||||
if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
||||
/* Designate part of the packet for LLQ */
|
||||
push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
|
||||
|
||||
if (size - *push_len > 0) {
|
||||
ena_tx_ctx->push_header = data;
|
||||
|
||||
size -= push_len;
|
||||
data += push_len;
|
||||
}
|
||||
|
||||
ena_tx_ctx->header_len = push_len;
|
||||
|
||||
if (size > 0) {
|
||||
dma = dma_map_single(xdp_ring->dev,
|
||||
*push_hdr + *push_len,
|
||||
size - *push_len,
|
||||
data,
|
||||
size,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
|
||||
goto error_report_dma_error;
|
||||
|
||||
tx_info->map_linear_data = 1;
|
||||
tx_info->num_of_bufs = 1;
|
||||
}
|
||||
tx_info->map_linear_data = 0;
|
||||
|
||||
ena_buf->paddr = dma;
|
||||
ena_buf->len = size;
|
||||
ena_buf = tx_info->bufs;
|
||||
ena_buf->paddr = dma;
|
||||
ena_buf->len = size;
|
||||
|
||||
ena_tx_ctx->ena_bufs = ena_buf;
|
||||
ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@ -274,10 +286,6 @@ error_report_dma_error:
|
||||
&xdp_ring->syncp);
|
||||
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
|
||||
|
||||
xdp_return_frame_rx_napi(tx_info->xdpf);
|
||||
tx_info->xdpf = NULL;
|
||||
tx_info->num_of_bufs = 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||
struct ena_com_tx_ctx ena_tx_ctx = {};
|
||||
struct ena_tx_buffer *tx_info;
|
||||
u16 next_to_use, req_id;
|
||||
void *push_hdr;
|
||||
u32 push_len;
|
||||
int rc;
|
||||
|
||||
next_to_use = xdp_ring->next_to_use;
|
||||
@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||
tx_info = &xdp_ring->tx_buffer_info[req_id];
|
||||
tx_info->num_of_bufs = 0;
|
||||
|
||||
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
|
||||
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
ena_tx_ctx.ena_bufs = tx_info->bufs;
|
||||
ena_tx_ctx.push_header = push_hdr;
|
||||
ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
|
||||
ena_tx_ctx.req_id = req_id;
|
||||
ena_tx_ctx.header_len = push_len;
|
||||
|
||||
rc = ena_xmit_common(dev,
|
||||
xdp_ring,
|
||||
|
@ -1849,6 +1849,7 @@ out_free_netdev:
|
||||
free_netdev(netdev);
|
||||
out_pci_release:
|
||||
pci_release_mem_regions(pdev);
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
out_pci_disable:
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
|
@ -7308,7 +7308,7 @@ skip_rdma:
|
||||
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
|
||||
2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
|
||||
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
|
||||
entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
|
||||
entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
|
||||
entries = roundup(entries, ctx->tqm_entries_multiple);
|
||||
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
|
||||
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
|
||||
@ -11750,6 +11750,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
|
||||
bnxt_hwrm_coal_params_qcaps(bp);
|
||||
}
|
||||
|
||||
static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
|
||||
|
||||
static int bnxt_fw_init_one(struct bnxt *bp)
|
||||
{
|
||||
int rc;
|
||||
@ -11764,6 +11766,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
|
||||
netdev_err(bp->dev, "Firmware init phase 2 failed\n");
|
||||
return rc;
|
||||
}
|
||||
rc = bnxt_probe_phy(bp, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -13155,6 +13160,7 @@ init_err_pci_clean:
|
||||
bnxt_hwrm_func_drv_unrgtr(bp);
|
||||
bnxt_free_hwrm_short_cmd_req(bp);
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_ethtool_free(bp);
|
||||
kfree(bp->fw_health);
|
||||
bp->fw_health = NULL;
|
||||
bnxt_cleanup_pci(bp);
|
||||
|
@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret)
|
||||
dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
|
||||
/* We have to RESET the chip/firmware because we need the
|
||||
* chip in uninitialized state for loading new PHY image.
|
||||
* Otherwise, the running firmware will only store the PHY
|
||||
* image in local RAM which will be lost after next reset.
|
||||
*/
|
||||
ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
|
||||
if (ret < 0) {
|
||||
dev_err(adap->pdev_dev,
|
||||
"Set FW to RESET for flashing PHY FW failed. ret: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
|
||||
if (ret < 0) {
|
||||
dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
|
||||
@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
|
||||
u32 ftid)
|
||||
{
|
||||
struct tid_info *t = &adap->tids;
|
||||
struct filter_entry *f;
|
||||
|
||||
if (ftid < t->nhpftids)
|
||||
f = &adap->tids.hpftid_tab[ftid];
|
||||
else if (ftid < t->nftids)
|
||||
f = &adap->tids.ftid_tab[ftid - t->nhpftids];
|
||||
else
|
||||
f = lookup_tid(&adap->tids, ftid);
|
||||
if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
|
||||
return &t->hpftid_tab[ftid - t->hpftid_base];
|
||||
|
||||
return f;
|
||||
if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
|
||||
return &t->ftid_tab[ftid - t->ftid_base];
|
||||
|
||||
return lookup_tid(t, ftid);
|
||||
}
|
||||
|
||||
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
|
||||
@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
|
||||
filter_id = filter_info->loc_array[cmd->fs.location];
|
||||
f = cxgb4_get_filter_entry(adapter, filter_id);
|
||||
|
||||
if (f->fs.prio)
|
||||
filter_id -= adapter->tids.hpftid_base;
|
||||
else if (!f->fs.hash)
|
||||
filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
|
||||
|
||||
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
|
||||
|
||||
filter_info = &adapter->ethtool_filters->port[pi->port_id];
|
||||
|
||||
if (fs.prio)
|
||||
tid += adapter->tids.hpftid_base;
|
||||
else if (!fs.hash)
|
||||
tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
|
||||
|
||||
filter_info->loc_array[cmd->fs.location] = tid;
|
||||
set_bit(cmd->fs.location, filter_info->bmap);
|
||||
filter_info->in_use++;
|
||||
|
@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
|
||||
WORD_MASK, f->fs.nat_lip[3] |
|
||||
f->fs.nat_lip[2] << 8 |
|
||||
f->fs.nat_lip[1] << 16 |
|
||||
(u64)f->fs.nat_lip[0] << 25, 1);
|
||||
(u64)f->fs.nat_lip[0] << 24, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
|
||||
|
||||
/* Load PHY Firmware onto adapter.
|
||||
*/
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
|
||||
(u8 *)phyf->data, phyf->size);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret < 0)
|
||||
dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
|
||||
-ret);
|
||||
|
@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
|
||||
* @addr: the start address to write
|
||||
* @n: length of data to write in bytes
|
||||
* @data: the data to write
|
||||
* @byte_oriented: whether to store data as bytes or as words
|
||||
*
|
||||
* Writes up to a page of data (256 bytes) to the serial flash starting
|
||||
* at the given address. All the data must be written to the same page.
|
||||
* If @byte_oriented is set the write data is stored as byte stream
|
||||
* (i.e. matches what on disk), otherwise in big-endian.
|
||||
*/
|
||||
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
unsigned int n, const u8 *data)
|
||||
unsigned int n, const u8 *data, bool byte_oriented)
|
||||
{
|
||||
int ret;
|
||||
u32 buf[64];
|
||||
unsigned int i, c, left, val, offset = addr & 0xff;
|
||||
u32 buf[64];
|
||||
int ret;
|
||||
|
||||
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
|
||||
goto unlock;
|
||||
|
||||
for (left = n; left; left -= c) {
|
||||
for (left = n; left; left -= c, data += c) {
|
||||
c = min(left, 4U);
|
||||
for (val = 0, i = 0; i < c; ++i)
|
||||
val = (val << 8) + *data++;
|
||||
for (val = 0, i = 0; i < c; ++i) {
|
||||
if (byte_oriented)
|
||||
val = (val << 8) + data[i];
|
||||
else
|
||||
val = (val << 8) + data[c - i - 1];
|
||||
}
|
||||
|
||||
ret = sf1_write(adapter, c, c != left, 1, val);
|
||||
if (ret)
|
||||
@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
|
||||
t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
|
||||
|
||||
/* Read the page to verify the write succeeded */
|
||||
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
|
||||
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
|
||||
byte_oriented);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
*/
|
||||
memcpy(first_page, fw_data, SF_PAGE_SIZE);
|
||||
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
|
||||
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
|
||||
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
|
||||
addr += SF_PAGE_SIZE;
|
||||
fw_data += SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = t4_write_flash(adap,
|
||||
fw_start + offsetof(struct fw_hdr, fw_ver),
|
||||
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
|
||||
ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
|
||||
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
|
||||
true);
|
||||
out:
|
||||
if (ret)
|
||||
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
|
||||
@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
|
||||
/* Copy the supplied PHY Firmware image to the adapter memory location
|
||||
* allocated by the adapter firmware.
|
||||
*/
|
||||
spin_lock_bh(&adap->win0_lock);
|
||||
ret = t4_memory_rw(adap, win, mtype, maddr,
|
||||
phy_fw_size, (__be32 *)phy_fw_data,
|
||||
T4_MEMORY_WRITE);
|
||||
spin_unlock_bh(&adap->win0_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
n = size - i;
|
||||
else
|
||||
n = SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data);
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
|
||||
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
|
||||
addr += SF_PAGE_SIZE;
|
||||
boot_data += SF_PAGE_SIZE;
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
|
||||
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
|
||||
false);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
|
||||
(const u8 *)header);
|
||||
(const u8 *)header, false);
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
for (i = 0; i < size; i += SF_PAGE_SIZE) {
|
||||
n = min_t(u32, size - i, SF_PAGE_SIZE);
|
||||
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data);
|
||||
ret = t4_write_flash(adap, addr, n, cfg_data, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
|
||||
for (i = 0; i < npad; i++) {
|
||||
u8 data = 0;
|
||||
|
||||
ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
|
||||
ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
|
||||
false);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
|
||||
struct ec_bhf_priv *priv = netdev_priv(net_dev);
|
||||
|
||||
unregister_netdev(net_dev);
|
||||
free_netdev(net_dev);
|
||||
|
||||
pci_iounmap(dev, priv->dma_io);
|
||||
pci_iounmap(dev, priv->io);
|
||||
|
||||
free_netdev(net_dev);
|
||||
|
||||
pci_release_regions(dev);
|
||||
pci_clear_master(dev);
|
||||
pci_disable_device(dev);
|
||||
|
@ -5897,6 +5897,7 @@ drv_cleanup:
|
||||
unmap_bars:
|
||||
be_unmap_pci_bars(adapter);
|
||||
free_netdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
free_netdev(netdev);
|
||||
rel_reg:
|
||||
pci_release_regions(pdev);
|
||||
|
@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
|
||||
{
|
||||
struct fec_enet_private *fep =
|
||||
container_of(cc, struct fec_enet_private, cc);
|
||||
const struct platform_device_id *id_entry =
|
||||
platform_get_device_id(fep->pdev);
|
||||
u32 tempval;
|
||||
|
||||
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
|
||||
tempval |= FEC_T_CTRL_CAPTURE;
|
||||
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
|
||||
|
||||
if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
|
||||
if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
|
||||
udelay(1);
|
||||
|
||||
return readl(fep->hwp + FEC_ATIME);
|
||||
@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
|
||||
fep->ptp_caps.enable = fec_ptp_enable;
|
||||
|
||||
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
|
||||
if (!fep->cycle_speed) {
|
||||
fep->cycle_speed = NSEC_PER_SEC;
|
||||
dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
|
||||
}
|
||||
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
|
||||
|
||||
spin_lock_init(&fep->tmreg_lock);
|
||||
|
@ -1717,12 +1717,13 @@ setup_rings:
|
||||
* ice_vsi_cfg_txqs - Configure the VSI for Tx
|
||||
* @vsi: the VSI being configured
|
||||
* @rings: Tx ring array to be configured
|
||||
* @count: number of Tx ring array elements
|
||||
*
|
||||
* Return 0 on success and a negative value on error
|
||||
* Configure the Tx VSI for operation.
|
||||
*/
|
||||
static int
|
||||
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
|
||||
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
|
||||
{
|
||||
struct ice_aqc_add_tx_qgrp *qg_buf;
|
||||
u16 q_idx = 0;
|
||||
@ -1734,7 +1735,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
|
||||
|
||||
qg_buf->num_txqs = 1;
|
||||
|
||||
for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
|
||||
for (q_idx = 0; q_idx < count; q_idx++) {
|
||||
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
|
||||
if (err)
|
||||
goto err_cfg_txqs;
|
||||
@ -1754,7 +1755,7 @@ err_cfg_txqs:
|
||||
*/
|
||||
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
|
||||
{
|
||||
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
|
||||
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1769,7 +1770,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
|
||||
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2009,17 +2010,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
|
||||
* @rst_src: reset source
|
||||
* @rel_vmvf_num: Relative ID of VF/VM
|
||||
* @rings: Tx ring array to be stopped
|
||||
* @count: number of Tx ring array elements
|
||||
*/
|
||||
static int
|
||||
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
u16 rel_vmvf_num, struct ice_ring **rings)
|
||||
u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
|
||||
{
|
||||
u16 q_idx;
|
||||
|
||||
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
|
||||
return -EINVAL;
|
||||
|
||||
for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
|
||||
for (q_idx = 0; q_idx < count; q_idx++) {
|
||||
struct ice_txq_meta txq_meta = { };
|
||||
int status;
|
||||
|
||||
@ -2047,7 +2049,7 @@ int
|
||||
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
u16 rel_vmvf_num)
|
||||
{
|
||||
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
|
||||
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2056,7 +2058,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
*/
|
||||
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
|
||||
return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2555,6 +2555,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
return (ret || xdp_ring_err) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp_safe_mode - XDP handler for safe mode
|
||||
* @dev: netdevice
|
||||
* @xdp: XDP command
|
||||
*/
|
||||
static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
|
||||
struct netdev_bpf *xdp)
|
||||
{
|
||||
NL_SET_ERR_MSG_MOD(xdp->extack,
|
||||
"Please provide working DDP firmware package in order to use XDP\n"
|
||||
"Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp - implements XDP handler
|
||||
* @dev: netdevice
|
||||
@ -6937,6 +6951,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
|
||||
.ndo_change_mtu = ice_change_mtu,
|
||||
.ndo_get_stats64 = ice_get_stats64,
|
||||
.ndo_tx_timeout = ice_tx_timeout,
|
||||
.ndo_bpf = ice_xdp_safe_mode,
|
||||
};
|
||||
|
||||
static const struct net_device_ops ice_netdev_ops = {
|
||||
|
@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
|
||||
|
||||
static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
{
|
||||
struct sk_buff *skb = ch->skb[ch->dma.desc];
|
||||
dma_addr_t mapping;
|
||||
int ret = 0;
|
||||
|
||||
@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
|
||||
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
ret = -ENOMEM;
|
||||
goto skip;
|
||||
}
|
||||
@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
|
||||
ch->dma.desc %= LTQ_DESC_NUM;
|
||||
|
||||
if (ret) {
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
net_dev->stats.rx_dropped++;
|
||||
netdev_err(net_dev, "failed to allocate new rx buffer\n");
|
||||
return ret;
|
||||
@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
|
||||
struct xrx200_chan *ch = ptr;
|
||||
|
||||
if (napi_schedule_prep(&ch->napi)) {
|
||||
__napi_schedule(&ch->napi);
|
||||
ltq_dma_disable_irq(&ch->dma);
|
||||
__napi_schedule(&ch->napi);
|
||||
}
|
||||
|
||||
ltq_dma_ack_irq(&ch->dma);
|
||||
|
@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
|
||||
int ret = 0, i;
|
||||
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
|
||||
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
|
||||
if (!priv->adev[i]) {
|
||||
bool is_supported = false;
|
||||
@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
|
||||
}
|
||||
} else {
|
||||
adev = &priv->adev[i]->adev;
|
||||
|
||||
/* Pay attention that this is not PCI driver that
|
||||
* mlx5_core_dev is connected, but auxiliary driver.
|
||||
*
|
||||
* Here we can race of module unload with devlink
|
||||
* reload, but we don't need to take extra lock because
|
||||
* we are holding global mlx5_intf_mutex.
|
||||
*/
|
||||
if (!adev->dev.driver)
|
||||
continue;
|
||||
adrv = to_auxiliary_drv(adev->dev.driver);
|
||||
|
||||
if (adrv->resume)
|
||||
@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
|
||||
continue;
|
||||
|
||||
adev = &priv->adev[i]->adev;
|
||||
/* Auxiliary driver was unbind manually through sysfs */
|
||||
if (!adev->dev.driver)
|
||||
goto skip_suspend;
|
||||
|
||||
adrv = to_auxiliary_drv(adev->dev.driver);
|
||||
|
||||
if (adrv->suspend) {
|
||||
@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
|
||||
continue;
|
||||
}
|
||||
|
||||
skip_suspend:
|
||||
del_adev(&priv->adev[i]->adev);
|
||||
priv->adev[i] = NULL;
|
||||
}
|
||||
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
}
|
||||
|
||||
@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
|
||||
lockdep_assert_held(&mlx5_intf_mutex);
|
||||
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
|
||||
return 0;
|
||||
|
||||
delete_drivers(dev);
|
||||
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
|
||||
|
@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct devlink_port *port;
|
||||
|
||||
if (!netif_device_present(dev))
|
||||
return NULL;
|
||||
port = mlx5e_devlink_get_dl_port(priv);
|
||||
if (port->registered)
|
||||
return port;
|
||||
|
@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
// Copyright (c) 2020 Mellanox Technologies
|
||||
|
||||
#include <linux/ptp_classify.h>
|
||||
#include "en/ptp.h"
|
||||
#include "en/txrx.h"
|
||||
#include "en/params.h"
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "en.h"
|
||||
#include "en_stats.h"
|
||||
#include <linux/ptp_classify.h>
|
||||
|
||||
struct mlx5e_ptpsq {
|
||||
struct mlx5e_txqsq txqsq;
|
||||
@ -43,6 +44,27 @@ struct mlx5e_ptp {
|
||||
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
|
||||
};
|
||||
|
||||
static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
|
||||
{
|
||||
struct flow_keys fk;
|
||||
|
||||
if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
|
||||
return false;
|
||||
|
||||
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||
return false;
|
||||
|
||||
if (fk.basic.n_proto == htons(ETH_P_1588))
|
||||
return true;
|
||||
|
||||
if (fk.basic.n_proto != htons(ETH_P_IP) &&
|
||||
fk.basic.n_proto != htons(ETH_P_IPV6))
|
||||
return false;
|
||||
|
||||
return (fk.basic.ip_proto == IPPROTO_UDP &&
|
||||
fk.ports.dst == htons(PTP_EV_PORT));
|
||||
}
|
||||
|
||||
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
u8 lag_port, struct mlx5e_ptp **cp);
|
||||
void mlx5e_ptp_close(struct mlx5e_ptp *c);
|
||||
|
@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
|
||||
work);
|
||||
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
|
||||
struct neighbour *n = update_work->n;
|
||||
struct mlx5e_encap_entry *e = NULL;
|
||||
bool neigh_connected, same_dev;
|
||||
struct mlx5e_encap_entry *e;
|
||||
unsigned char ha[ETH_ALEN];
|
||||
struct mlx5e_priv *priv;
|
||||
u8 nud_state, dead;
|
||||
|
||||
rtnl_lock();
|
||||
@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
|
||||
if (!same_dev)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(e, &nhe->encap_list, encap_list) {
|
||||
if (!mlx5e_encap_take(e))
|
||||
continue;
|
||||
/* mlx5e_get_next_init_encap() releases previous encap before returning
|
||||
* the next one.
|
||||
*/
|
||||
while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
|
||||
mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
|
||||
|
||||
priv = netdev_priv(e->out_dev);
|
||||
mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
|
||||
mlx5e_encap_put(priv, e);
|
||||
}
|
||||
out:
|
||||
rtnl_unlock();
|
||||
mlx5e_release_neigh_update_work(update_work);
|
||||
|
@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* wait for encap to be fully initialized */
|
||||
wait_for_completion(&e->res_ready);
|
||||
|
||||
mutex_lock(&esw->offloads.encap_tbl_lock);
|
||||
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
|
||||
if (e->compl_result < 0 || (encap_connected == neigh_connected &&
|
||||
ether_addr_equal(e->h_dest, ha)))
|
||||
if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
|
||||
goto unlock;
|
||||
|
||||
mlx5e_take_all_encap_flows(e, &flow_list);
|
||||
|
@ -251,9 +251,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
|
||||
mlx5e_take_tmp_flow(flow, flow_list, 0);
|
||||
}
|
||||
|
||||
typedef bool (match_cb)(struct mlx5e_encap_entry *);
|
||||
|
||||
static struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e)
|
||||
mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e,
|
||||
match_cb match)
|
||||
{
|
||||
struct mlx5e_encap_entry *next = NULL;
|
||||
|
||||
@ -288,7 +291,7 @@ retry:
|
||||
/* wait for encap to be fully initialized */
|
||||
wait_for_completion(&next->res_ready);
|
||||
/* continue searching if encap entry is not in valid state after completion */
|
||||
if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
|
||||
if (!match(next)) {
|
||||
e = next;
|
||||
goto retry;
|
||||
}
|
||||
@ -296,6 +299,30 @@ retry:
|
||||
return next;
|
||||
}
|
||||
|
||||
static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return e->flags & MLX5_ENCAP_ENTRY_VALID;
|
||||
}
|
||||
|
||||
static struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
|
||||
}
|
||||
|
||||
static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return e->compl_result >= 0;
|
||||
}
|
||||
|
||||
struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e)
|
||||
{
|
||||
return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
|
||||
}
|
||||
|
||||
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
||||
{
|
||||
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
|
||||
|
@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
|
||||
if (!priv->ipsec)
|
||||
return;
|
||||
|
||||
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
|
||||
!MLX5_CAP_ETH(mdev, swp)) {
|
||||
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
|
||||
|
@ -356,7 +356,7 @@ err:
|
||||
|
||||
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
|
||||
{
|
||||
int err = 0;
|
||||
int err = -ENOMEM;
|
||||
int i;
|
||||
|
||||
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
|
||||
|
@ -2705,8 +2705,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
|
||||
nch = priv->channels.params.num_channels;
|
||||
ntc = priv->channels.params.num_tc;
|
||||
num_rxqs = nch * priv->profile->rq_groups;
|
||||
if (priv->channels.params.ptp_rx)
|
||||
num_rxqs++;
|
||||
|
||||
mlx5e_netdev_set_tcs(netdev, nch, ntc);
|
||||
|
||||
@ -4824,22 +4822,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
}
|
||||
|
||||
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
}
|
||||
|
||||
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
|
||||
netdev->hw_features |= NETIF_F_GSO_GRE |
|
||||
NETIF_F_GSO_GRE_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
|
||||
NETIF_F_GSO_GRE_CSUM;
|
||||
netdev->gso_partial_features |= NETIF_F_GSO_GRE |
|
||||
NETIF_F_GSO_GRE_CSUM;
|
||||
netdev->hw_features |= NETIF_F_GSO_GRE;
|
||||
netdev->hw_enc_features |= NETIF_F_GSO_GRE;
|
||||
netdev->gso_partial_features |= NETIF_F_GSO_GRE;
|
||||
}
|
||||
|
||||
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
|
||||
|
@ -4765,7 +4765,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
|
||||
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
|
||||
wait_for_completion(&hpe->res_ready);
|
||||
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
|
||||
hpe->hp->pair->peer_gone = true;
|
||||
mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
|
||||
|
||||
mlx5e_hairpin_put(priv, hpe);
|
||||
}
|
||||
|
@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
|
||||
void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
|
||||
|
||||
struct mlx5e_neigh_hash_entry;
|
||||
struct mlx5e_encap_entry *
|
||||
mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
|
||||
struct mlx5e_encap_entry *e);
|
||||
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
|
||||
|
||||
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
|
||||
|
@ -32,7 +32,6 @@
|
||||
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/ptp_classify.h>
|
||||
#include <net/geneve.h>
|
||||
#include <net/dsfield.h>
|
||||
#include "en.h"
|
||||
@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool mlx5e_use_ptpsq(struct sk_buff *skb)
|
||||
{
|
||||
struct flow_keys fk;
|
||||
|
||||
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||
return false;
|
||||
|
||||
if (fk.basic.n_proto == htons(ETH_P_1588))
|
||||
return true;
|
||||
|
||||
if (fk.basic.n_proto != htons(ETH_P_IP) &&
|
||||
fk.basic.n_proto != htons(ETH_P_IPV6))
|
||||
return false;
|
||||
|
||||
return (fk.basic.ip_proto == IPPROTO_UDP &&
|
||||
fk.ports.dst == htons(PTP_EV_PORT));
|
||||
}
|
||||
|
||||
static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
ptp_channel = READ_ONCE(priv->channels.ptp);
|
||||
if (unlikely(ptp_channel) &&
|
||||
test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
|
||||
mlx5e_use_ptpsq(skb))
|
||||
if (unlikely(ptp_channel &&
|
||||
test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
|
||||
mlx5e_use_ptpsq(skb)))
|
||||
return mlx5e_select_ptpsq(dev, skb);
|
||||
|
||||
txq_ix = netdev_pick_tx(dev, skb, NULL);
|
||||
|
@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
|
||||
|
||||
eqe = next_eqe_sw(eq);
|
||||
if (!eqe)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
do {
|
||||
struct mlx5_core_cq *cq;
|
||||
@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
|
||||
++eq->cons_index;
|
||||
|
||||
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
|
||||
|
||||
out:
|
||||
eq_update_ci(eq, 1);
|
||||
|
||||
if (cqn != -1)
|
||||
@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
|
||||
++eq->cons_index;
|
||||
|
||||
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
|
||||
eq_update_ci(eq, 1);
|
||||
|
||||
out:
|
||||
eq_update_ci(eq, 1);
|
||||
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
|
||||
|
||||
return unlikely(recovery) ? num_eqes : 0;
|
||||
|
@ -1054,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
goto err_vhca_mapping;
|
||||
}
|
||||
|
||||
/* External controller host PF has factory programmed MAC.
|
||||
* Read it from the device.
|
||||
*/
|
||||
if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
|
||||
mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
|
||||
|
||||
esw_vport_change_handle_locked(vport);
|
||||
|
||||
esw->enabled_vports++;
|
||||
|
@ -1161,7 +1161,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
|
||||
err = mlx5_core_set_hca_defaults(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to set hca defaults\n");
|
||||
goto err_sriov;
|
||||
goto err_set_hca;
|
||||
}
|
||||
|
||||
mlx5_vhca_event_start(dev);
|
||||
@ -1194,6 +1194,7 @@ err_ec:
|
||||
mlx5_sf_hw_table_destroy(dev);
|
||||
err_vhca:
|
||||
mlx5_vhca_event_stop(dev);
|
||||
err_set_hca:
|
||||
mlx5_cleanup_fs(dev);
|
||||
err_fs:
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
|
@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
|
||||
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
|
||||
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
|
||||
mkey->size = MLX5_GET64(mkc, mkc, len);
|
||||
mkey->key |= mlx5_idx_to_mkey(mkey_index);
|
||||
mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
|
||||
mkey->pd = MLX5_GET(mkc, mkc, pd);
|
||||
init_waitqueue_head(&mkey->wait);
|
||||
|
||||
|
@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, roce))
|
||||
return;
|
||||
|
||||
err = mlx5_nic_vport_enable_roce(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
|
||||
|
@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
|
||||
sf_index = event->function_id - base_id;
|
||||
sf_dev = xa_load(&table->devices, sf_index);
|
||||
switch (event->new_vhca_state) {
|
||||
case MLX5_VHCA_STATE_INVALID:
|
||||
case MLX5_VHCA_STATE_ALLOCATED:
|
||||
if (sf_dev)
|
||||
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
|
||||
|
@ -694,7 +694,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
|
||||
if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(padded_data, data, data_sz);
|
||||
inline_data_sz =
|
||||
MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
|
||||
|
||||
/* Add an alignment padding */
|
||||
memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
|
||||
|
||||
/* Remove L2L3 outer headers */
|
||||
MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
|
||||
@ -706,32 +710,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
|
||||
hw_action += DR_STE_ACTION_DOUBLE_SZ;
|
||||
used_actions++; /* Remove and NOP are a single double action */
|
||||
|
||||
inline_data_sz =
|
||||
MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
|
||||
/* Point to the last dword of the header */
|
||||
data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
|
||||
|
||||
/* Add the new header inline + 2 extra bytes */
|
||||
/* Add the new header using inline action 4Byte at a time, the header
|
||||
* is added in reversed order to the beginning of the packet to avoid
|
||||
* incorrect parsing by the HW. Since header is 14B or 18B an extra
|
||||
* two bytes are padded and later removed.
|
||||
*/
|
||||
for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
|
||||
void *addr_inline;
|
||||
|
||||
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
|
||||
DR_STE_V1_ACTION_ID_INSERT_INLINE);
|
||||
/* The hardware expects here offset to words (2 bytes) */
|
||||
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
|
||||
i * 2);
|
||||
MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
|
||||
|
||||
/* Copy bytes one by one to avoid endianness problem */
|
||||
addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
|
||||
hw_action, inline_data);
|
||||
memcpy(addr_inline, data_ptr, inline_data_sz);
|
||||
memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
|
||||
hw_action += DR_STE_ACTION_DOUBLE_SZ;
|
||||
data_ptr += inline_data_sz;
|
||||
used_actions++;
|
||||
}
|
||||
|
||||
/* Remove 2 extra bytes */
|
||||
/* Remove first 2 extra bytes */
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
|
||||
DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
|
||||
/* The hardware expects here size in words (2 bytes) */
|
||||
MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
|
||||
used_actions++;
|
||||
|
@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
|
||||
static inline bool
|
||||
mlx5dr_is_supported(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
|
||||
(MLX5_CAP_GEN(dev, steering_format_version) <=
|
||||
MLX5_STEERING_FORMAT_CONNECTX_6DX));
|
||||
return MLX5_CAP_GEN(dev, roce) &&
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
|
||||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
|
||||
(MLX5_CAP_GEN(dev, steering_format_version) <=
|
||||
MLX5_STEERING_FORMAT_CONNECTX_6DX)));
|
||||
}
|
||||
|
||||
/* buddy functions & structure */
|
||||
|
@ -424,6 +424,15 @@ err_modify_sq:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_RST, 0, 0);
|
||||
}
|
||||
|
||||
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
|
||||
MLX5_RQC_STATE_RST, 0, 0);
|
||||
|
||||
/* unset peer SQs */
|
||||
if (hp->peer_gone)
|
||||
return;
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_RST, 0, 0);
|
||||
if (!hp->peer_gone)
|
||||
mlx5_hairpin_unpair_peer_sq(hp);
|
||||
}
|
||||
|
||||
struct mlx5_hairpin *
|
||||
@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
|
||||
mlx5_hairpin_destroy_queues(hp);
|
||||
kfree(hp);
|
||||
}
|
||||
|
||||
void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
|
||||
{
|
||||
int i;
|
||||
|
||||
mlx5_hairpin_unpair_peer_sq(hp);
|
||||
|
||||
/* destroy peer SQ */
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
|
||||
|
||||
hp->peer_gone = true;
|
||||
}
|
||||
|
@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
if (!vport)
|
||||
return -EINVAL;
|
||||
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
|
||||
return -EACCES;
|
||||
|
||||
|
@ -693,7 +693,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
|
||||
MLXSW_THERMAL_TRIP_MASK,
|
||||
module_tz,
|
||||
&mlxsw_thermal_module_ops,
|
||||
NULL, 0, 0);
|
||||
NULL, 0,
|
||||
module_tz->parent->polling_delay);
|
||||
if (IS_ERR(module_tz->tzdev)) {
|
||||
err = PTR_ERR(module_tz->tzdev);
|
||||
return err;
|
||||
@ -815,7 +816,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
|
||||
MLXSW_THERMAL_TRIP_MASK,
|
||||
gearbox_tz,
|
||||
&mlxsw_thermal_gearbox_ops,
|
||||
NULL, 0, 0);
|
||||
NULL, 0,
|
||||
gearbox_tz->parent->polling_delay);
|
||||
if (IS_ERR(gearbox_tz->tzdev))
|
||||
return PTR_ERR(gearbox_tz->tzdev);
|
||||
|
||||
|
@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
|
||||
#define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
|
||||
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
|
||||
|
||||
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
|
||||
enum mlxsw_reg_qeec_hr hr, u8 index,
|
||||
|
@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
u8 band, u32 child_handle)
|
||||
{
|
||||
struct mlxsw_sp_qdisc *old_qdisc;
|
||||
u32 parent;
|
||||
|
||||
if (band < mlxsw_sp_qdisc->num_classes &&
|
||||
mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
|
||||
@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
if (old_qdisc)
|
||||
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
|
||||
|
||||
mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
|
||||
parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
|
||||
mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
|
||||
parent);
|
||||
if (!WARN_ON(!mlxsw_sp_qdisc))
|
||||
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
|
||||
|
||||
|
@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
|
||||
|
||||
int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
{
|
||||
unsigned int pause_ena;
|
||||
int err, val;
|
||||
|
||||
/* Disable dequeuing from the egress queues */
|
||||
@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
QSYS_PORT_MODE, port);
|
||||
|
||||
/* Disable flow control */
|
||||
ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
|
||||
|
||||
/* Disable priority flow control */
|
||||
@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
/* Clear flushing again. */
|
||||
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
|
||||
|
||||
/* Re-enable flow control */
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_flush);
|
||||
|
@ -1602,6 +1602,8 @@ err_out_free_netdev:
|
||||
free_netdev(netdev);
|
||||
|
||||
err_out_free_res:
|
||||
if (NX_IS_REVISION_P3(pdev->revision))
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_out_disable_pdev:
|
||||
|
@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
|
||||
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
|
||||
|
||||
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
|
||||
BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
|
||||
sizeof(p_hwfn->p_dcbx_info->set.config.params));
|
||||
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
|
||||
&dcbx_info->operational.params,
|
||||
sizeof(struct qed_dcbx_admin_params));
|
||||
sizeof(p_hwfn->p_dcbx_info->set.config.params));
|
||||
p_hwfn->p_dcbx_info->set.config.valid = true;
|
||||
|
||||
memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
|
||||
|
@ -2690,6 +2690,7 @@ err_out_free_hw_res:
|
||||
kfree(ahw);
|
||||
|
||||
err_out_free_res:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_out_disable_pdev:
|
||||
|
@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *s)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_vnd_stats total_stats;
|
||||
struct rmnet_vnd_stats total_stats = { };
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
struct rmnet_vnd_stats snapshot;
|
||||
unsigned int cpu, start;
|
||||
|
||||
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
|
||||
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
|
||||
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
|
||||
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
|
||||
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
|
||||
snapshot = pcpu_ptr->stats; /* struct assignment */
|
||||
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
|
||||
|
||||
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
|
||||
total_stats.rx_pkts += snapshot.rx_pkts;
|
||||
total_stats.rx_bytes += snapshot.rx_bytes;
|
||||
total_stats.tx_pkts += snapshot.tx_pkts;
|
||||
total_stats.tx_bytes += snapshot.tx_bytes;
|
||||
total_stats.tx_drops += snapshot.tx_drops;
|
||||
}
|
||||
|
||||
s->rx_packets = total_stats.rx_pkts;
|
||||
@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -1671,7 +1671,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
{
|
||||
switch(stringset) {
|
||||
case ETH_SS_STATS:
|
||||
memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
|
||||
memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
|
||||
{
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
memcpy(data, *sh_eth_gstrings_stats,
|
||||
memcpy(data, sh_eth_gstrings_stats,
|
||||
sizeof(sh_eth_gstrings_stats));
|
||||
break;
|
||||
}
|
||||
|
@ -76,10 +76,10 @@ enum power_event {
|
||||
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
|
||||
|
||||
/* GMAC HW ADDR regs */
|
||||
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
|
||||
(reg * 8))
|
||||
#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
|
||||
(reg * 8))
|
||||
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
|
||||
0x00000040 + (reg * 8))
|
||||
#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
|
||||
0x00000044 + (reg * 8))
|
||||
#define GMAC_MAX_PERFECT_ADDRESSES 1
|
||||
|
||||
#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
|
||||
|
@ -622,6 +622,8 @@ error_pclk_get:
|
||||
void stmmac_remove_config_dt(struct platform_device *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
of_node_put(plat->phy_node);
|
||||
of_node_put(plat->mdio_node);
|
||||
}
|
||||
|
@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
|
||||
stat = be32_to_cpu(cur_p->app0);
|
||||
|
||||
while (stat & STS_CTRL_APP0_CMPLT) {
|
||||
/* Make sure that the other fields are read after bd is
|
||||
* released by dma
|
||||
*/
|
||||
rmb();
|
||||
dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
|
||||
be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
|
||||
skb = (struct sk_buff *)ptr_from_txbd(cur_p);
|
||||
if (skb)
|
||||
dev_consume_skb_irq(skb);
|
||||
cur_p->app0 = 0;
|
||||
cur_p->app1 = 0;
|
||||
cur_p->app2 = 0;
|
||||
cur_p->app3 = 0;
|
||||
@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
|
||||
|
||||
/* app0 must be visible last, as it is used to flag
|
||||
* availability of the bd
|
||||
*/
|
||||
smp_mb();
|
||||
cur_p->app0 = 0;
|
||||
|
||||
lp->tx_bd_ci++;
|
||||
if (lp->tx_bd_ci >= lp->tx_bd_num)
|
||||
lp->tx_bd_ci = 0;
|
||||
@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
|
||||
if (cur_p->app0)
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
/* Make sure to read next bd app0 after this one */
|
||||
rmb();
|
||||
|
||||
tail++;
|
||||
if (tail >= lp->tx_bd_num)
|
||||
tail = 0;
|
||||
@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
smp_mb();
|
||||
|
||||
/* Space might have just been freed - check again */
|
||||
if (temac_check_tx_bd_space(lp, num_frag))
|
||||
if (temac_check_tx_bd_space(lp, num_frag + 1))
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
netif_wake_queue(ndev);
|
||||
@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
cur_p->phys = cpu_to_be32(skb_dma_addr);
|
||||
ptr_to_txbd((void *)skb, cur_p);
|
||||
|
||||
for (ii = 0; ii < num_frag; ii++) {
|
||||
if (++lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
}
|
||||
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
|
||||
|
||||
/* Mark last fragment with skb address, so it can be consumed
|
||||
* in temac_start_xmit_done()
|
||||
*/
|
||||
ptr_to_txbd((void *)skb, cur_p);
|
||||
|
||||
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
|
||||
lp->tx_bd_tail++;
|
||||
if (lp->tx_bd_tail >= lp->tx_bd_num)
|
||||
@ -926,6 +942,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
wmb();
|
||||
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
|
||||
|
||||
if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
|
||||
netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
|
||||
netif_stop_queue(ndev);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user