2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 18:23:53 +08:00

Merge branch 'perf/urgent' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2018-08-02 09:59:20 +02:00
commit 16e0e6a83b
231 changed files with 2299 additions and 1103 deletions

View File

@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
Phy documentation is provided in the following places:
Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
Example device nodes:

View File

@ -7095,6 +7095,7 @@ F: include/uapi/linux/input.h
F: include/uapi/linux/input-event-codes.h
F: include/linux/input/
F: Documentation/devicetree/bindings/input/
F: Documentation/devicetree/bindings/serio/
F: Documentation/input/
INPUT MULTITOUCH (MT) PROTOCOL

View File

@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Merciless Moray
# *DOCUMENTATION*

View File

@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
vma_init(&gate_vma, NULL);
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
return 0;
}

View File

@ -237,8 +237,8 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
vma_init(&vma, mm);
vma.vm_flags = VM_EXEC;
vma.vm_mm = mm;
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);

View File

@ -37,7 +37,9 @@ static inline void __tlb_remove_table(void *_table)
static inline void tlb_flush(struct mmu_gather *tlb)
{
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
struct vm_area_struct vma;
vma_init(&vma, tlb->mm);
/*
* The ASID allocator will either invalidate the ASID or mark

View File

@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
static void update_cpu_capabilities(u16 scope_mask)
{
__update_cpu_capabilities(arm64_features, scope_mask, "detected:");
__update_cpu_capabilities(arm64_errata, scope_mask,
"enabling workaround for");
__update_cpu_capabilities(arm64_features, scope_mask, "detected:");
}
static int __enable_cpu_capability(void *arg)
@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
static void __init enable_cpu_capabilities(u16 scope_mask)
{
__enable_cpu_capabilities(arm64_features, scope_mask);
__enable_cpu_capabilities(arm64_errata, scope_mask);
__enable_cpu_capabilities(arm64_features, scope_mask);
}
/*

View File

@ -108,11 +108,13 @@ static pte_t get_clear_flush(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
struct vm_area_struct vma = { .vm_mm = mm };
struct vm_area_struct vma;
pte_t orig_pte = huge_ptep_get(ptep);
bool valid = pte_valid(orig_pte);
unsigned long i, saddr = addr;
vma_init(&vma, mm);
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
@ -145,9 +147,10 @@ static void clear_flush(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
struct vm_area_struct vma = { .vm_mm = mm };
struct vm_area_struct vma;
unsigned long i, saddr = addr;
vma_init(&vma, mm);
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
pte_clear(mm, addr, ptep);

View File

@ -611,11 +611,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Make sure we chose the upper bound of sizeof(struct page)
* correctly.
* correctly when sizing the VMEMMAP array.
*/
BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
#endif
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;

View File

@ -120,7 +120,7 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
*/
struct vm_area_struct vma;
vma.vm_mm = tlb->mm;
vma_init(&vma, tlb->mm);
/* flush the address range from the tlb: */
flush_tlb_range(&vma, start, end);
/* now flush the virt. page-table area mapping the address range: */

View File

@ -273,7 +273,7 @@ static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
gate_vma.vm_mm = NULL;
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;

View File

@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
void ath79_ddr_wb_flush(u32 reg)
{
void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
/* Flush the DDR write buffer. */
__raw_writel(0x1, flush_reg);

View File

@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
*/
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL;
/*
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
* Enable ExternalSync for sync instruction to take effect
*/
set_c0_config7(MIPS_CONF7_ES);
break;
#endif
}

View File

@ -681,8 +681,6 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
/* ExternalSync */
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@ -2767,7 +2765,6 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)

View File

@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
phys_addr_t size = resource_size(rsrc);
*start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size;
*end = rsrc->start + size - 1;
}

View File

@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u8 *func;
u32 true_cond;
u32 tmp_idx;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@ -637,11 +638,7 @@ emit_clear:
case BPF_STX | BPF_XADD | BPF_W:
/* Get EA into TMP_REG_1 */
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not word-aligned */
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
/* add value from src_reg into this */
@ -649,32 +646,16 @@ emit_clear:
/* store result back */
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
/* otherwise, let's try once more */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* exit if the store was not successful */
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/* *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not doubleword-aligned */
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
tmp_idx = ctx->idx * 4;
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/*

View File

@ -140,7 +140,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_GCC_PLUGINS if BROKEN
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4

View File

@ -106,9 +106,13 @@ define cmd_check_data_rel
done
endef
# We need to run two commands under "if_changed", so merge them into a
# single invocation.
quiet_cmd_check-and-link-vmlinux = LD $@
cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(call if_changed,check_data_rel)
$(call if_changed,ld)
$(call if_changed,check-and-link-vmlinux)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE

View File

@ -981,7 +981,7 @@ ENTRY(\sym)
call \do_sym
jmp error_exit /* %ebx: no swapgs flag */
jmp error_exit
.endif
END(\sym)
.endm
@ -1222,7 +1222,6 @@ END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch GS if needed.
* Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
UNWIND_HINT_FUNC
@ -1269,7 +1268,6 @@ ENTRY(error_entry)
* for these here too.
*/
.Lerror_kernelspace:
incl %ebx
leaq native_irq_return_iret(%rip), %rcx
cmpq %rcx, RIP+8(%rsp)
je .Lerror_bad_iret
@ -1303,28 +1301,20 @@ ENTRY(error_entry)
/*
* Pretend that the exception came from user mode: set up pt_regs
* as if we faulted immediately after IRET and clear EBX so that
* error_exit knows that we will be returning to user mode.
* as if we faulted immediately after IRET.
*/
mov %rsp, %rdi
call fixup_bad_iret
mov %rax, %rsp
decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
END(error_entry)
/*
* On entry, EBX is a "return to kernel mode" flag:
* 1: already in kernel mode, don't need SWAPGS
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/
ENTRY(error_exit)
UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testl %ebx, %ebx
jnz retint_kernel
testb $3, CS(%rsp)
jz retint_kernel
jmp retint_user
END(error_exit)

View File

@ -28,7 +28,7 @@
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
#define UNCORE_EXTRA_PCI_DEV_MAX 3
#define UNCORE_EXTRA_PCI_DEV_MAX 4
#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)

View File

@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void)
enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
BDX_PCI_QPI_PORT2_FILTER,
HSWEP_PCI_PCU_3,
};
@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
},
{ /* QPI Port 0 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT0_FILTER),
},
{ /* QPI Port 1 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
{ /* QPI Port 2 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
BDX_PCI_QPI_PORT2_FILTER),
},
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),

View File

@ -43,7 +43,7 @@ asm (".pushsection .text;"
"push %rdx;"
"mov $0x1,%eax;"
"xor %edx,%edx;"
"lock cmpxchg %dl,(%rdi);"
LOCK_PREFIX "cmpxchg %dl,(%rdi);"
"cmp $0x1,%al;"
"jne .slowpath;"
"pop %rdx;"

View File

@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void)
case 0x04: return 0x02000014;
}
if (boot_cpu_data.x86_stepping > 4)
return 0;
return ~0U;
}

View File

@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
page = (void *)__get_free_page(GFP_KERNEL);
page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;

View File

@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
if (!(md->attribute & EFI_MEMORY_WB))
flags |= _PAGE_PCD;
if (sev_active())
if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
flags |= _PAGE_ENC;
pfn = md->phys_addr >> PAGE_SHIFT;

View File

@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
if (!FIXADDR_USER_START)
return 0;
gate_vma.vm_mm = NULL;
vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;

View File

@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL(bio_add_page);
/**
* bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be mapped
*
* Pins as many pages from *iter and appends them to @bio's bvec array. The
* Pins pages from *iter and appends them to @bio's bvec array. The
* pages will have to be released using put_page() when done.
* For multi-segment *iter, this function only adds pages from the
* the next non-empty segment of the iov iterator.
*/
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
size_t offset, diff;
size_t offset;
ssize_t size;
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
/*
* Deep magic below: We need to walk the pinned pages backwards
@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
bio->bi_iter.bi_size += size;
bio->bi_vcnt += nr_pages;
diff = (nr_pages * PAGE_SIZE - offset) - size;
while (nr_pages--) {
bv[nr_pages].bv_page = pages[nr_pages];
bv[nr_pages].bv_len = PAGE_SIZE;
bv[nr_pages].bv_offset = 0;
while (idx--) {
bv[idx].bv_page = pages[idx];
bv[idx].bv_len = PAGE_SIZE;
bv[idx].bv_offset = 0;
}
bv[0].bv_offset += offset;
bv[0].bv_len -= offset;
if (diff)
bv[bio->bi_vcnt - 1].bv_len -= diff;
bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
iov_iter_advance(iter, size);
return 0;
}
/**
* bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be mapped
*
* Pins pages from *iter and appends them to @bio's bvec array. The
* pages will have to be released using put_page() when done.
* The function tries, but does not guarantee, to pin as many pages as
* fit into the bio, or are requested in *iter, whatever is smaller.
* If MM encounters an error pinning the requested pages, it stops.
* Error is returned only if 0 pages could be pinned.
*/
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
unsigned short orig_vcnt = bio->bi_vcnt;
do {
int ret = __bio_iov_iter_get_pages(bio, iter);
if (unlikely(ret))
return bio->bi_vcnt > orig_vcnt ? 0 : ret;
} while (iov_iter_count(iter) && !bio_full(bio));
return 0;
}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
static void submit_bio_wait_endio(struct bio *bio)
@ -1866,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_integrity_trim(split);
bio_advance(bio, split->bi_iter.bi_size);
bio->bi_iter.bi_done = 0;
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio_set_flag(split, BIO_TRACE_COMPLETION);

View File

@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
bool shared = false;
int cpu;
if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
MQ_RQ_IN_FLIGHT)
if (!blk_mq_mark_complete(rq))
return;
if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq);

View File

@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
status =
acpi_ps_create_op(walk_state, aml_op_start, &op);
if (ACPI_FAILURE(status)) {
/*
* ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
* executing it as a control method. However, if we encounter
* an error while loading the table, we need to keep trying to
* load the table rather than aborting the table load. Set the
* status to AE_OK to proceed with the table load.
*/
if ((walk_state->
parse_flags & ACPI_PARSE_MODULE_LEVEL)
&& status == AE_ALREADY_EXISTS) {
status = AE_OK;
}
if (status == AE_CTRL_PARSE_CONTINUE) {
continue;
}
@ -694,6 +706,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
acpi_ps_next_parse_state(walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
} else
if ((walk_state->
parse_flags & ACPI_PARSE_MODULE_LEVEL)
&& ACPI_FAILURE(status)) {
/*
* ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
* executing it as a control method. However, if we encounter
* an error while loading the table, we need to keep trying to
* load the table rather than aborting the table load. Set the
* status to AE_OK to proceed with the table load. If we get a
* failure at this point, it means that the dispatcher got an
* error while processing Op (most likely an AML operand error.
*/
status = AE_OK;
}
}

View File

@ -434,14 +434,6 @@ re_probe:
goto probe_failed;
}
/*
* Ensure devices are listed in devices_kset in correct order
* It's important to move Dev to the end of devices_kset before
* calling .probe, because it could be recursive and parent Dev
* should always go first
*/
devices_kset_move_last(dev);
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)

View File

@ -112,12 +112,16 @@ struct nbd_device {
struct task_struct *task_setup;
};
#define NBD_CMD_REQUEUED 1
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
int index;
int cookie;
struct completion send_complete;
blk_status_t status;
unsigned long flags;
u32 cmd_cookie;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
return disk_to_dev(nbd->disk);
}
static void nbd_requeue_cmd(struct nbd_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
blk_mq_requeue_request(req, true);
}
#define NBD_COOKIE_BITS 32
static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
u32 tag = blk_mq_unique_tag(req);
u64 cookie = cmd->cmd_cookie;
return (cookie << NBD_COOKIE_BITS) | tag;
}
static u32 nbd_handle_to_tag(u64 handle)
{
return (u32)handle;
}
static u32 nbd_handle_to_cookie(u64 handle)
{
return (u32)(handle >> NBD_COOKIE_BITS);
}
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
config = nbd->config;
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
}
blk_mq_requeue_request(req, true);
mutex_unlock(&cmd->lock);
nbd_requeue_cmd(cmd);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
nbd_config_put(nbd);
done:
@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
struct iov_iter from;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
u64 handle;
u32 type;
u32 nbd_cmd_flags = 0;
u32 tag = blk_mq_unique_tag(req);
int sent = nsock->sent, skip = 0;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
goto send_pages;
}
iov_iter_advance(&from, sent);
} else {
cmd->cmd_cookie++;
}
cmd->index = index;
cmd->cookie = nsock->cookie;
@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
memcpy(request.handle, &tag, sizeof(tag));
handle = nbd_cmd_handle(cmd);
memcpy(request.handle, &handle, sizeof(handle));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->pending = req;
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
@ -541,6 +583,7 @@ send_pages:
*/
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
u64 handle;
u16 hwq;
u32 tag;
struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
struct iov_iter to;
int ret = 0;
reply.magic = 0;
iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-EPROTO);
}
memcpy(&tag, reply.handle, sizeof(u32));
memcpy(&handle, reply.handle, sizeof(handle));
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-ENOENT);
}
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
ret = -ENOENT;
goto out;
}
if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
req);
ret = -ENOENT;
goto out;
}
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
cmd->status = BLK_STS_IOERR;
return cmd;
goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (nbd_disconnected(config) ||
config->num_connections <= 1) {
cmd->status = BLK_STS_IOERR;
return cmd;
goto out;
}
return ERR_PTR(-EIO);
ret = -EIO;
goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
req, bvec.bv_len);
}
} else {
/* See the comment in nbd_queue_rq. */
wait_for_completion(&cmd->send_complete);
}
return cmd;
out:
mutex_unlock(&cmd->lock);
return ret ? ERR_PTR(ret) : cmd;
}
static void recv_work(struct work_struct *work)
@ -805,7 +864,7 @@ again:
*/
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
blk_mq_requeue_request(req, true);
nbd_requeue_cmd(cmd);
ret = 0;
goto out;
}
@ -818,7 +877,7 @@ again:
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
blk_mq_requeue_request(req, true);
nbd_requeue_cmd(cmd);
ret = 0;
}
out:
@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* that the server is misbehaving (or there was an error) before we're
* done sending everything over the wire.
*/
init_completion(&cmd->send_complete);
mutex_lock(&cmd->lock);
clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
/* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In
@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_IOERR;
else if (!ret)
ret = BLK_STS_OK;
complete(&cmd->send_complete);
mutex_unlock(&cmd->lock);
return ret;
}
@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->nbd = set->driver_data;
cmd->flags = 0;
mutex_init(&cmd->lock);
return 0;
}

View File

@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
#endif
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
vma_set_anonymous(vma);
return 0;
}

View File

@ -1895,14 +1895,22 @@ static int
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{
size_t bytes;
__u32 buf[16];
__u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
int b, i = 0;
bytes = min(count, sizeof(buf));
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
if (!arch_get_random_int(&t))
break;
buf[i] ^= t;
}
count -= bytes;
p += bytes;

View File

@ -24,7 +24,7 @@
#define ASPEED_MPLL_PARAM 0x20
#define ASPEED_HPLL_PARAM 0x24
#define AST2500_HPLL_BYPASS_EN BIT(20)
#define AST2400_HPLL_STRAPPED BIT(18)
#define AST2400_HPLL_PROGRAMMED BIT(18)
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
[ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
u32 clk = BIT(gate->clock_idx);
u32 rst = BIT(gate->reset_idx);
u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
u32 reg;
/*
* If the IP is in reset, treat the clock as not enabled,
* this happens with some clocks such as the USB one when
* coming from cold reset. Without this, aspeed_clk_enable()
* will fail to lift the reset.
*/
if (gate->reset_idx >= 0) {
regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
if (reg & rst)
return 0;
}
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
return ((reg & clk) == enval) ? 1 : 0;
@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
static void __init aspeed_ast2400_cc(struct regmap *map)
{
struct clk_hw *hw;
u32 val, freq, div;
u32 val, div, clkin, hpll;
const u16 hpll_rates[][4] = {
{384, 360, 336, 408},
{400, 375, 350, 425},
};
int rate;
/*
* CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
* strapping
*/
regmap_read(map, ASPEED_STRAP, &val);
if (val & CLKIN_25MHZ_EN)
freq = 25000000;
else if (val & AST2400_CLK_SOURCE_SEL)
freq = 48000000;
else
freq = 24000000;
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
pr_debug("clkin @%u MHz\n", freq / 1000000);
rate = (val >> 8) & 3;
if (val & CLKIN_25MHZ_EN) {
clkin = 25000000;
hpll = hpll_rates[1][rate];
} else if (val & AST2400_CLK_SOURCE_SEL) {
clkin = 48000000;
hpll = hpll_rates[0][rate];
} else {
clkin = 24000000;
hpll = hpll_rates[0][rate];
}
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
pr_debug("clkin @%u MHz\n", clkin / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
* and we assume that it is enabled
* and we assume that it is enabled. It can be configured through the
* HPLL_PARAM register, or set to a specified frequency by strapping.
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
if (val & AST2400_HPLL_PROGRAMMED)
hw = aspeed_ast2400_calc_pll("hpll", val);
else
hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
hpll * 1000000);
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
/*
* Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)

View File

@ -24,7 +24,6 @@
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
#include <linux/stringify.h>
#include "clk.h"
@ -2559,7 +2558,7 @@ static const struct {
unsigned long flag;
const char *name;
} clk_flags[] = {
#define ENTRY(f) { f, __stringify(f) }
#define ENTRY(f) { f, #f }
ENTRY(CLK_SET_RATE_GATE),
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),

View File

@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
unsigned long divider;
divider = meson_parm_read(clk->map, &adiv->div);
divider = meson_parm_read(clk->map, &adiv->div) + 1;
return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
}

View File

@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
},
};

View File

@ -35,6 +35,7 @@
#define CLK_SEL 0x10
#define CLK_DIS 0x14
#define ARMADA_37XX_DVFS_LOAD_1 1
#define LOAD_LEVEL_NR 4
#define ARMADA_37XX_NB_L0L1 0x18
@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
/*
* Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
* respectively) to L0 frequency (1.2 Ghz) requires a significant
* amount of time to let VDD stabilize to the appropriate
* voltage. This amount of time is large enough that it cannot be
* covered by the hardware countdown register. Due to this, the CPU
* might start operating at L0 before the voltage is stabilized,
* leading to CPU stalls.
*
* To work around this problem, we prevent switching directly from the
* L2/L3 frequencies to the L0 frequency, and instead switch to the L1
* frequency in-between. The sequence therefore becomes:
* 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
* 2. Sleep 20ms for stabling VDD voltage
* 3. Then switch from L1(600MHZ) to L0(1200Mhz).
*/
static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
{
unsigned int cur_level;
if (rate != 1200 * 1000 * 1000)
return;
regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
return;
regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
ARMADA_37XX_NB_CPU_LOAD_MASK,
ARMADA_37XX_DVFS_LOAD_1);
msleep(20);
}
static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
*/
reg = ARMADA_37XX_NB_CPU_LOAD;
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
clk_pm_cpu_set_rate_wa(rate, base);
regmap_update_bits(base, reg, mask, load_level);
return rate;

View File

@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.halt_reg = 0x75018,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x75018,
.enable_mask = BIT(0),

View File

@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
.name = "mmagic_bimc",
},
.pwrsts = PWRSTS_OFF_ON,
.flags = ALWAYS_ON,
};
static struct gdsc mmagic_video_gdsc = {

View File

@ -183,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
{}
};
/*

View File

@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
fwspec.param_count = 2;
fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
fwspec.param[1] = IRQ_TYPE_NONE;
/*
* IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
* temporarily. Anyway, ->irq_set_type() will override it later.
*/
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
return irq_create_fwspec_mapping(&fwspec);
}

View File

@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Note that active low is the default.
*/
if (IS_ENABLED(CONFIG_REGULATOR) &&
(of_device_is_compatible(np, "reg-fixed-voltage") ||
(of_device_is_compatible(np, "regulator-fixed") ||
of_device_is_compatible(np, "reg-fixed-voltage") ||
of_device_is_compatible(np, "regulator-gpio"))) {
/*
* The regulator GPIO handles are specified such that the

View File

@ -652,6 +652,7 @@ enum intel_sbi_destination {
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
#define QUIRK_INCREASE_T12_DELAY (1<<6)
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
struct intel_fbdev;
struct intel_fbc_work;

View File

@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
/* Quirk time at 100ms for reliable operation */
msleep(100);
}
}
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,

View File

@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
intel_ddi_disable_transcoder_func(old_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
@ -14646,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
DRM_INFO("Applying T12 delay quirk\n");
}
/*
* GeminiLake NUC HDMI outputs require additional off time
* this allows the onboard retimer to correctly sync to signal
*/
static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
DRM_INFO("Applying Increase DDI Disabled quirk\n");
}
struct intel_quirk {
int device;
int subsystem_vendor;
@ -14732,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = {
/* Toshiba Satellite P50-C-18C */
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
/* GeminiLake NUC */
{ 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
};
static void intel_init_quirks(struct drm_device *dev)

View File

@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
struct intel_encoder *

View File

@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imx_ldb->regmap);
}
/* disable LDB by resetting the control register to POR default */
regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
imx_ldb->dev = dev;
if (of_id)
@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (ret || i < 0 || i > 1)
return -EINVAL;
if (!of_device_is_available(child))
continue;
if (dual && i > 0) {
dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
if (!of_device_is_available(child))
continue;
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;

View File

@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
mbus_fmt->field == V4L2_FIELD_ALTERNATE)
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;

View File

@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
/*
* It's not always possible to have 1 to 2 ratio when d=7, so fall back
* to minimal possible clkh in this case.
*
* Note:
* CLKH is not allowed to be 0, in this case I2C clock is not generated
* at all
*/
if (clk >= clkl + d) {
if (clk > clkl + d) {
clkh = clk - clkl - d;
clkl -= d;
} else {
clkh = 0;
clkh = 1;
clkl = clk - (d << 1);
}

View File

@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
goto err_desc;
}
reinit_completion(&dma->cmd_complete);
txdesc->callback = i2c_imx_dma_callback;
txdesc->callback_param = i2c_imx;
if (dma_submit_error(dmaengine_submit(txdesc))) {
@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
* The first byte must be transmitted by the CPU.
*/
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
if (result)
return result;
reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
"gpio");
rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {

View File

@ -32,6 +32,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
/* register offsets */
@ -111,8 +112,9 @@
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
/* persistent flags */
#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
#define ID_P_PM_BLOCKED (1 << 31)
#define ID_P_MASK ID_P_PM_BLOCKED
#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
enum rcar_i2c_type {
I2C_RCAR_GEN1,
@ -141,6 +143,8 @@ struct rcar_i2c_priv {
struct dma_chan *dma_rx;
struct scatterlist sg;
enum dma_data_direction dma_direction;
struct reset_control *rstc;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
/* Gen3 can only do one RXDMA per transfer and we just completed it */
if (priv->devtype == I2C_RCAR_GEN3 &&
priv->dma_direction == DMA_FROM_DEVICE)
priv->flags |= ID_P_NO_RXDMA;
priv->dma_direction = DMA_NONE;
}
@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
unsigned char *buf;
int len;
/* Do not use DMA if it's not available or for messages < 8 bytes */
if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
/* Do various checks to see if DMA is feasible at all */
if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
(read && priv->flags & ID_P_NO_RXDMA))
return;
if (read) {
@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
}
}
/* I2C is a special case, we need to poll the status of a reset */
static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
{
int i, ret;
ret = reset_control_reset(priv->rstc);
if (ret)
return ret;
for (i = 0; i < LOOP_TIMEOUT; i++) {
ret = reset_control_status(priv->rstc);
if (ret == 0)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs,
int num)
@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_get_sync(dev);
/* Gen3 needs a reset before allowing RXDMA once */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->flags |= ID_P_NO_RXDMA;
if (!IS_ERR(priv->rstc)) {
ret = rcar_i2c_do_reset(priv);
if (ret == 0)
priv->flags &= ~ID_P_NO_RXDMA;
}
}
rcar_i2c_init(priv);
ret = rcar_i2c_bus_barrier(priv);
@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
ret = reset_control_status(priv->rstc);
if (ret < 0)
priv->rstc = ERR_PTR(-ENOTSUPP);
}
}
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;

View File

@ -624,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
rt_mutex_lock(&adapter->bus_lock);
rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
}
/**

View File

@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
struct i2c_mux_priv *priv = adapter->algo_data;
struct i2c_adapter *parent = priv->muxc->parent;
rt_mutex_lock(&parent->mux_lock);
rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
if (!(flags & I2C_LOCK_ROOT_ADAPTER))
return;
i2c_lock_bus(parent, flags);
@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
struct i2c_mux_priv *priv = adapter->algo_data;
struct i2c_adapter *parent = priv->muxc->parent;
rt_mutex_lock(&parent->mux_lock);
rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
i2c_lock_bus(parent, flags);
}

View File

@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0618", 0 },
{ "ELAN061D", 0 },
{ "ELAN0622", 0 },
{ "ELAN1000", 0 },
{ }
};

View File

@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
},
{
/* Lenovo LaVie Z */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
},
{ }
};

View File

@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
bond->params.miimon);
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
newval->string);
/* disable arp monitoring */
bond->params.arp_interval = 0;
}
if (!bond->params.miimon) {
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
bond->params.miimon);
}
}
if (newval->value == BOND_MODE_ALB)

View File

@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
int err;
err = pm_runtime_get_sync(priv->device);
if (err)
if (err < 0) {
pm_runtime_put_noidle(priv->device);
return err;
}
return err;
return 0;
}
static void m_can_clk_stop(struct m_can_priv *priv)
@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(cclk);
priv->mram_base = mram_addr;
m_can_of_parse_mram(priv, mram_config_vals);
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto clk_disable;
}
m_can_of_parse_mram(priv, mram_config_vals);
devm_can_led_init(dev);
of_can_transceiver(dev);
@ -1687,8 +1690,6 @@ failed_ret:
return ret;
}
/* TODO: runtime PM with power down or sleep mode */
static __maybe_unused int m_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
m_can_init_ram(priv);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
if (ret)
return ret;
m_can_init_ram(priv);
m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);

View File

@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0;
}
cdm = of_iomap(np_cdm, 0);
if (!cdm) {
of_node_put(np_cdm);
dev_err(&ofdev->dev, "can't map clock node!\n");
return 0;
}
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;

View File

@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
((u32)(y) << 16) | \
((u32)(z) << 8))
/* System Control Registers Bits */
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
hw_ver_major, hw_ver_minor, hw_ver_sub);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
/* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
* 64-bit logical addresses: this workaround forces usage of 32-bit
* DMA addresses only when such a fw is detected.
*/
if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
PCIEFD_FW_VERSION(3, 3, 0)) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
dev_warn(&pdev->dev,
"warning: can't set DMA mask %llxh (err %d)\n",
DMA_BIT_MASK(32), err);
}
#endif
/* stop system clock */
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
PCIEFD_REG_SYS_CTL_CLR);

View File

@ -2,6 +2,7 @@
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 Sandvik Mining and Construction Oy
*
* Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@ -25,8 +26,10 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
@ -101,7 +104,7 @@ enum xcan_reg {
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@ -118,6 +121,7 @@ enum xcan_reg {
/**
* struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure.
* @tx_lock: Lock for synchronizing TX interrupt handling
* @tx_head: Tx CAN packets ready to send on the queue
* @tx_tail: Tx CAN packets successfully sended on the queue
* @tx_max: Maximum number packets the driver can send
@ -132,6 +136,7 @@ enum xcan_reg {
*/
struct xcan_priv {
struct can_priv can;
spinlock_t tx_lock;
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
#define XCAN_CAP_WATERMARK 0x0001
struct xcan_devtype_data {
unsigned int caps;
};
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
usleep_range(500, 10000);
}
/* reset clears FIFOs */
priv->tx_head = 0;
priv->tx_tail = 0;
return 0;
}
@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0};
unsigned long flags;
if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK;
@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++;
/* Write the Frame to Xilinx CAN TX FIFO */
@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stats->tx_bytes += cf->can_dlc;
}
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
if (priv->tx_max > 1)
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
/* Check if the TX buffer is full */
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
netif_stop_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
}
@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
return 1;
}
/**
* xcan_current_error_state - Get current error state from HW
* @ndev: Pointer to net_device structure
*
* Checks the current CAN error state from the HW. Note that this
* only checks for ERROR_PASSIVE and ERROR_WARNING.
*
* Return:
* ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
* otherwise.
*/
static enum can_state xcan_current_error_state(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
return CAN_STATE_ERROR_PASSIVE;
else if (status & XCAN_SR_ERRWRN_MASK)
return CAN_STATE_ERROR_WARNING;
else
return CAN_STATE_ERROR_ACTIVE;
}
/**
* xcan_set_error_state - Set new CAN error state
* @ndev: Pointer to net_device structure
* @new_state: The new CAN state to be set
* @cf: Error frame to be populated or NULL
*
* Set new CAN error state for the device, updating statistics and
* populating the error frame if given.
*/
static void xcan_set_error_state(struct net_device *ndev,
enum can_state new_state,
struct can_frame *cf)
{
struct xcan_priv *priv = netdev_priv(ndev);
u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
u32 txerr = ecr & XCAN_ECR_TEC_MASK;
u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
priv->can.state = new_state;
if (cf) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
switch (new_state) {
case CAN_STATE_ERROR_PASSIVE:
priv->can.can_stats.error_passive++;
if (cf)
cf->data[1] = (rxerr > 127) ?
CAN_ERR_CRTL_RX_PASSIVE :
CAN_ERR_CRTL_TX_PASSIVE;
break;
case CAN_STATE_ERROR_WARNING:
priv->can.can_stats.error_warning++;
if (cf)
cf->data[1] |= (txerr > rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
break;
case CAN_STATE_ERROR_ACTIVE:
if (cf)
cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
break;
default:
/* non-ERROR states are handled elsewhere */
WARN_ON(1);
break;
}
}
/**
* xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
* @ndev: Pointer to net_device structure
*
* If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
* the performed RX/TX has caused it to drop to a lesser state and set
* the interface state accordingly.
*/
static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
enum can_state old_state = priv->can.state;
enum can_state new_state;
/* changing error state due to successful frame RX/TX can only
* occur from these states
*/
if (old_state != CAN_STATE_ERROR_WARNING &&
old_state != CAN_STATE_ERROR_PASSIVE)
return;
new_state = xcan_current_error_state(ndev);
if (new_state != old_state) {
struct sk_buff *skb;
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
if (skb) {
struct net_device_stats *stats = &ndev->stats;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
}
}
/**
* xcan_err_interrupt - error frame Isr
* @ndev: net_device pointer
@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u32 err_status, status, txerr = 0, rxerr = 0;
u32 err_status;
skb = alloc_can_err_skb(ndev, &cf);
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
} else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (rxerr > 127) ?
CAN_ERR_CRTL_RX_PASSIVE :
CAN_ERR_CRTL_TX_PASSIVE;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
} else if (status & XCAN_SR_ERRWRN_MASK) {
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= (txerr > rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
} else {
enum can_state new_state = xcan_current_error_state(ndev);
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
}
/* Check for Arbitration lost interrupt */
@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
if (isr & XCAN_IXR_RXOK_MASK) {
priv->write_reg(priv, XCAN_ICR_OFFSET,
XCAN_IXR_RXOK_MASK);
work_done += xcan_rx(ndev);
} else {
priv->write_reg(priv, XCAN_ICR_OFFSET,
XCAN_IXR_RXNEMP_MASK);
break;
}
work_done += xcan_rx(ndev);
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
if (work_done)
if (work_done) {
can_led_event(ndev, CAN_LED_EVENT_RX);
xcan_update_error_state_after_rxtx(ndev);
}
if (work_done < quota) {
napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
ier |= XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
}
return work_done;
@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
unsigned int frames_in_fifo;
int frames_sent = 1; /* TXOK => at least 1 frame was sent */
unsigned long flags;
int retries = 0;
while ((priv->tx_head - priv->tx_tail > 0) &&
(isr & XCAN_IXR_TXOK_MASK)) {
/* Synchronize with xmit as we need to know the exact number
* of frames in the FIFO to stay in sync due to the TXFEMP
* handling.
* This also prevents a race between netif_wake_queue() and
* netif_stop_queue().
*/
spin_lock_irqsave(&priv->tx_lock, flags);
frames_in_fifo = priv->tx_head - priv->tx_tail;
if (WARN_ON_ONCE(frames_in_fifo == 0)) {
/* clear TXOK anyway to avoid getting back here */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return;
}
/* Check if 2 frames were sent (TXOK only means that at least 1
* frame was sent).
*/
if (frames_in_fifo > 1) {
WARN_ON(frames_in_fifo > priv->tx_max);
/* Synchronize TXOK and isr so that after the loop:
* (1) isr variable is up-to-date at least up to TXOK clear
* time. This avoids us clearing a TXOK of a second frame
* but not noticing that the FIFO is now empty and thus
* marking only a single frame as sent.
* (2) No TXOK is left. Having one could mean leaving a
* stray TXOK as we might process the associated frame
* via TXFEMP handling as we read TXFEMP *after* TXOK
* clear to satisfy (1).
*/
while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
if (isr & XCAN_IXR_TXFEMP_MASK) {
/* nothing in FIFO anymore */
frames_sent = frames_in_fifo;
}
} else {
/* single frame in fifo, just clear TXOK */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
}
while (frames_sent--) {
can_get_echo_skb(ndev, priv->tx_tail %
priv->tx_max);
priv->tx_tail++;
stats->tx_packets++;
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
can_led_event(ndev, CAN_LED_EVENT_TX);
netif_wake_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
can_led_event(ndev, CAN_LED_EVENT_TX);
xcan_update_error_state_after_rxtx(ndev);
}
/**
@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier;
u32 isr_errors;
/* Get the interrupt status from Xilinx CAN */
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
xcan_tx_interrupt(ndev, isr);
/* Check for the type of error interrupt and Processing it */
if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
XCAN_IXR_ARBLST_MASK));
isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
if (isr_errors) {
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
}
/* Check for the type of receive interrupt and Processing it */
if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
if (isr & XCAN_IXR_RXNEMP_MASK) {
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
ier &= ~XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
napi_schedule(&priv->napi);
}
@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
static void xcan_chip_stop(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
u32 ier;
/* Disable interrupts and leave the can in configuration mode */
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier &= ~XCAN_INTR_ALL;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
set_reset_mode(ndev);
priv->can.state = CAN_STATE_STOPPED;
}
@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
*/
static int __maybe_unused xcan_suspend(struct device *dev)
{
if (!device_may_wakeup(dev))
return pm_runtime_force_suspend(dev);
struct net_device *ndev = dev_get_drvdata(dev);
return 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
xcan_chip_stop(ndev);
}
return pm_runtime_force_suspend(dev);
}
/**
@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
*/
static int __maybe_unused xcan_resume(struct device *dev)
{
if (!device_may_wakeup(dev))
return pm_runtime_force_resume(dev);
struct net_device *ndev = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret) {
dev_err(dev, "pm_runtime_force_resume failed on resume\n");
return ret;
}
if (netif_running(ndev)) {
ret = xcan_chip_start(ndev);
if (ret) {
dev_err(dev, "xcan_chip_start failed on resume\n");
return ret;
}
netif_device_attach(ndev);
netif_start_queue(ndev);
}
return 0;
}
/**
@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
}
priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
priv->can.state = CAN_STATE_SLEEPING;
clk_disable_unprepare(priv->bus_clk);
clk_disable_unprepare(priv->can_clk);
@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
u32 isr, status;
ret = clk_prepare_enable(priv->bus_clk);
if (ret) {
@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
return ret;
}
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (netif_running(ndev)) {
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
priv->write_reg(priv, XCAN_SRR_OFFSET,
XCAN_SRR_RESET_MASK);
} else if ((status & XCAN_SR_ESTAT_MASK) ==
XCAN_SR_ESTAT_MASK) {
priv->can.state = CAN_STATE_ERROR_PASSIVE;
} else if (status & XCAN_SR_ERRWRN_MASK) {
priv->can.state = CAN_STATE_ERROR_WARNING;
} else {
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
netif_device_attach(ndev);
netif_start_queue(ndev);
}
return 0;
}
@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
};
static const struct xcan_devtype_data xcan_zynq_data = {
.caps = XCAN_CAP_WATERMARK,
};
/* Match table for OF platform binding */
static const struct of_device_id xcan_of_match[] = {
{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
{ .compatible = "xlnx,axi-can-1.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xcan_of_match);
/**
* xcan_probe - Platform registration call
* @pdev: Handle to the platform device structure
@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
const struct of_device_id *of_id;
int caps = 0;
void __iomem *addr;
int ret, rx_max, tx_max;
int ret, rx_max, tx_max, tx_fifo_depth;
/* Get the virtual base address for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
&tx_fifo_depth);
if (ret < 0)
goto err;
@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
if (ret < 0)
goto err;
of_id = of_match_device(xcan_of_match, &pdev->dev);
if (of_id) {
const struct xcan_devtype_data *devtype_data = of_id->data;
if (devtype_data)
caps = devtype_data->caps;
}
/* There is no way to directly figure out how many frames have been
* sent when the TXOK interrupt is processed. If watermark programming
* is supported, we can have 2 frames in the FIFO and use TXFEMP
* to determine if 1 or 2 frames have been sent.
* Theoretically we should be able to use TXFWMEMP to determine up
* to 3 frames, but it seems that after putting a second frame in the
* FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
* than 2 frames in FIFO) is set anyway with no TXOK (a frame was
* sent), which is not a sensible state - possibly TXFWMEMP is not
* completely synchronized with the rest of the bits?
*/
if (caps & XCAN_CAP_WATERMARK)
tx_max = min(tx_fifo_depth, 2);
else
tx_max = 1;
/* Create a CAN device instance */
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
if (!ndev)
@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
CAN_CTRLMODE_BERR_REPORTING;
priv->reg_base = addr;
priv->tx_max = tx_max;
spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
ndev->irq = platform_get_irq(pdev, 0);
@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
priv->reg_base, ndev->irq, priv->can.clock.freq,
priv->tx_max);
tx_fifo_depth, priv->tx_max);
return 0;
@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
return 0;
}
/* Match table for OF platform binding */
static const struct of_device_id xcan_of_match[] = {
{ .compatible = "xlnx,zynq-can-1.0", },
{ .compatible = "xlnx,axi-can-1.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xcan_of_match);
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
.remove = xcan_remove,

View File

@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
/* To be called with reg_lock held */
static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
{
int irq, virq;
@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
{
mv88e6xxx_g1_irq_free_common(chip);
/*
* free_irq must be called without reg_lock taken because the irq
* handler takes this lock, too.
*/
free_irq(chip->irq, chip);
mutex_lock(&chip->reg_lock);
mv88e6xxx_g1_irq_free_common(chip);
mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
{
mv88e6xxx_g1_irq_free_common(chip);
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
kthread_destroy_worker(chip->kworker);
mutex_lock(&chip->reg_lock);
mv88e6xxx_g1_irq_free_common(chip);
mutex_unlock(&chip->reg_lock);
}
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@ -4506,12 +4515,10 @@ out_g2_irq:
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
out_g1_irq:
mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
mutex_unlock(&chip->reg_lock);
out:
if (pdata)
dev_put(pdata->netdev);
@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
mutex_unlock(&chip->reg_lock);
}
static const struct of_device_id mv88e6xxx_of_match[] = {

View File

@ -32,7 +32,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
depends on ISA && ISA_DMA_API
depends on ISA && ISA_DMA_API && !PPC32
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y here.

View File

@ -44,7 +44,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
depends on ISA && ISA_DMA_API && !ARM
depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
Some LinkSys cards are of this type.
@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
depends on ISA && ISA_DMA_API && !ARM
depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.

View File

@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
skb = build_skb(page_address(page) + adapter->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
adapter->rx_page_offset += adapter->rx_frag_size;
if (adapter->rx_page_offset >= PAGE_SIZE)
adapter->rx_page = NULL;

View File

@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
if (bp->state == BNX2X_STATE_OPEN)
return bnx2x_rss(bp, &bp->rss_conf_obj, false,
true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
if (bp->state == BNX2X_STATE_OPEN)
return bnx2x_rss(bp, &bp->rss_conf_obj, false,
true);
}
return 0;
@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
return bnx2x_config_rss_eth(bp, false);
if (bp->state == BNX2X_STATE_OPEN)
return bnx2x_config_rss_eth(bp, false);
return 0;
}
/**

View File

@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
config CS89x0
tristate "CS89x0 support"
depends on ISA || EISA || ARM
depends on !PPC32
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file

View File

@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
txq->txq_stats.tx_busy++;
u64_stats_update_end(&txq->txq_stats.syncp);
err = NETDEV_TX_BUSY;
wqe_size = 0;
goto flush_skbs;
}

View File

@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
int local_qpn = vhcr->in_modifier & 0xffffff;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)

View File

@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
int i;
buf->size = size;
buf->npages = 1 << get_order(size);
buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
buf->page_shift = PAGE_SHIFT;
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
GFP_KERNEL);

View File

@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
break;
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
arfs_rule->filter_id)) {
hlist_del_init(&arfs_rule->hlist);
hlist_add_head(&arfs_rule->hlist, &del_list);
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
break;
}
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb->protocol != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
if (!arfs_t)
return -EPROTONOSUPPORT;

View File

@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
}
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets)
struct ieee_ets *ets,
bool zero_sum_allowed)
{
bool have_ets_tc = false;
int bw_sum = 0;
@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
if (have_ets_tc && bw_sum != 100) {
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
if (bw_sum || (!bw_sum && !zero_sum_allowed))
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
}
return 0;
@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
err = mlx5e_dbcnl_validate_ets(netdev, ets);
err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
if (err)
return err;
@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets);
if (err) {
netdev_err(netdev,
"%s, Failed to validate ETS: %d\n", __func__, err);
err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
if (err)
goto out;
}
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err) {

View File

@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
else
actions = flow->nic_attr->action;
if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
!(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec, exts);

View File

@ -2216,6 +2216,6 @@ free_out:
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
{
return esw->mode;
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);

View File

@ -1887,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
if (dest)
if (dest_num)
return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio);

View File

@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
u64 overflow_cycles;
u64 ns;
u64 frac = 0;
u32 dev_freq;
@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
* The period is calculated as the minimum between max HW cycles count
* (The clock source mask) and max amount of cycles that can be
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/
ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac);
do_div(ns, NSEC_PER_SEC / 2 / HZ);
do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns;
mdev->clock_info_page = alloc_page(GFP_KERNEL);

View File

@ -113,35 +113,45 @@ err_db_free:
return err;
}
static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
struct mlx5_wq_qp *qp)
static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
struct mlx5_wq_qp *qp)
{
struct mlx5_frag_buf_ctrl *sq_fbc;
struct mlx5_frag_buf *rqb, *sqb;
rqb = &qp->rq.fbc.frag_buf;
rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
rqb->npages = 1 << get_order(rqb->size);
rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
sqb = &qp->sq.fbc.frag_buf;
*sqb = *buf;
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
sqb->npages = 1 << get_order(sqb->size);
sq_fbc = &qp->sq.fbc;
sqb = &sq_fbc->frag_buf;
*sqb = *buf;
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
sqb->frags += rqb->npages; /* first part is for the rq */
if (sq_fbc->strides_offset)
sqb->frags--;
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u32 sq_strides_offset;
int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size),
&wq->sq.fbc);
sq_strides_offset =
((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size),
sq_strides_offset,
&wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];

View File

@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.dst_ipv4 = flow->daddr;
/* If entry has expired send dst IP with all other fields 0. */
if (!(neigh->nud_state & NUD_VALID)) {
if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);

View File

@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
u32 *p_bins = (u32 *)p_params->bins;
u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = NULL;
u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
memset(bins, 0, sizeof(unsigned long) *
ETH_MULTICAST_MAC_BINS_IN_REGS);
memset(bins, 0, sizeof(bins));
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport
*/
if (p_filter_cmd->opcode == QED_FILTER_ADD) {
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
u32 bit;
u32 bit, nbits;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
__set_bit(bit, bins);
nbits = sizeof(u32) * BITS_PER_BYTE;
bins[bit / nbits] |= 1 << (bit % nbits);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
}
}

View File

@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
u32 bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;

View File

@ -1211,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
break;
default:
p_link->speed = 0;
p_link->link_up = 0;
}
if (p_link->link_up && p_link->speed)
@ -1308,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg.loopback_mode = params->loopback_mode;
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
if (params->eee.enable)
phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
/* There are MFWs that share this capability regardless of whether
* this is feasible or not. And given that at the very least adv_caps
* would be set internally by qed, we want to make sure LFA would
* still work.
*/
if ((p_hwfn->mcp_info->capabilities &
FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
if (params->eee.tx_lpi_enable)
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
if (params->eee.adv_caps & QED_EEE_1G_ADV)

View File

@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
p_data->update_approx_mcast_flg = 1;
memcpy(p_data->bins, p_mcast_tlv->bins,
sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
}

View File

@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
resp_size += sizeof(struct pfvf_def_resp_tlv);
memcpy(p_mcast_tlv->bins, p_params->bins,
sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
}
update_rx = p_params->accept_flags.update_rx_mode_config;
@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
u32 bit;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
__set_bit(bit, sp_params.bins);
sp_params.bins[bit / 32] |= 1 << (bit % 32);
}
}

View File

@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
u64 bins[8];
/* There are only 256 approx bins, and in HSI they're divided into
* 32-bit values. As old VFs used to set-bit to the values on its side,
* the upper half of the array is never expected to contain any data.
*/
u64 bins[4];
u64 obsolete_bins[4];
};
struct vfpf_vport_update_accept_param_tlv {

View File

@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
/* override BIOS settings, use userspace tools to enable WOL */
__rtl8169_set_wol(tp, 0);
tp->saved_wolopts = __rtl8169_get_wol(tp);
if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;

View File

@ -218,6 +218,7 @@ issue:
ret = of_mdiobus_register(bus, np1);
if (ret) {
mdiobus_free(bus);
lp->mii_bus = NULL;
return ret;
}
return 0;

View File

@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
* negotiation may already be done and aneg interrupt may not be
* generated.
*/
if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
err = phy_aneg_done(phydev);
if (err > 0) {
trigger = true;

View File

@ -1246,7 +1246,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */

View File

@ -636,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
/* Add new entry to forwarding table -- assumes lock held */
static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
const u8 *mac, __u16 state,
__be32 src_vni, __u8 ndm_flags)
{
struct vxlan_fdb *f;
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return NULL;
f->state = state;
f->flags = ndm_flags;
f->updated = f->used = jiffies;
f->vni = src_vni;
INIT_LIST_HEAD(&f->remotes);
memcpy(f->eth_addr, mac, ETH_ALEN);
return f;
}
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
__be32 vni, __u32 ifindex, __u8 ndm_flags,
struct vxlan_fdb **fdb)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int rc;
if (vxlan->cfg.addrmax &&
vxlan->addrcnt >= vxlan->cfg.addrmax)
return -ENOSPC;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
if (!f)
return -ENOMEM;
rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0) {
kfree(f);
return rc;
}
++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist,
vxlan_fdb_head(vxlan, mac, src_vni));
*fdb = f;
return 0;
}
/* Add new entry to forwarding table -- assumes lock held */
static int vxlan_fdb_update(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
@ -687,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (!(flags & NLM_F_CREATE))
return -ENOENT;
if (vxlan->cfg.addrmax &&
vxlan->addrcnt >= vxlan->cfg.addrmax)
return -ENOSPC;
/* Disallow replace to add a multicast entry */
if ((flags & NLM_F_REPLACE) &&
(is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
return -EOPNOTSUPP;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return -ENOMEM;
notify = 1;
f->state = state;
f->flags = ndm_flags;
f->updated = f->used = jiffies;
f->vni = src_vni;
INIT_LIST_HEAD(&f->remotes);
memcpy(f->eth_addr, mac, ETH_ALEN);
rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0) {
kfree(f);
rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
vni, ifindex, ndm_flags, &f);
if (rc < 0)
return rc;
}
++vxlan->addrcnt;
hlist_add_head_rcu(&f->hlist,
vxlan_fdb_head(vxlan, mac, src_vni));
notify = 1;
}
if (notify) {
@ -741,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
kfree(f);
}
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify)
{
netdev_dbg(vxlan->dev,
"delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
if (do_notify)
vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@ -863,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EAFNOSUPPORT;
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex, ndm->ndm_flags);
spin_unlock_bh(&vxlan->hash_lock);
@ -897,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
goto out;
}
vxlan_fdb_destroy(vxlan, f);
vxlan_fdb_destroy(vxlan, f, true);
out:
return 0;
@ -1006,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
vxlan_fdb_create(vxlan, src_mac, src_ip,
vxlan_fdb_update(vxlan, src_mac, src_ip,
NUD_REACHABLE,
NLM_F_EXCL|NLM_F_CREATE,
vxlan->cfg.dst_port,
@ -2364,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t)
"garbage collect %pM\n",
f->eth_addr);
f->state = NUD_STALE;
vxlan_fdb_destroy(vxlan, f);
vxlan_fdb_destroy(vxlan, f, true);
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
@ -2415,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
spin_lock_bh(&vxlan->hash_lock);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
vxlan_fdb_destroy(vxlan, f);
vxlan_fdb_destroy(vxlan, f, true);
spin_unlock_bh(&vxlan->hash_lock);
}
@ -2469,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f);
vxlan_fdb_destroy(vxlan, f, true);
}
}
spin_unlock_bh(&vxlan->hash_lock);
@ -3160,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f = NULL;
int err;
err = vxlan_dev_configure(net, dev, conf, false, extack);
@ -3173,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
err = vxlan_fdb_create(vxlan, all_zeros_mac,
&vxlan->default_dst.remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
NLM_F_EXCL | NLM_F_CREATE,
vxlan->cfg.dst_port,
vxlan->default_dst.remote_vni,
vxlan->default_dst.remote_vni,
vxlan->default_dst.remote_ifindex,
NTF_SELF);
NTF_SELF, &f);
if (err)
return err;
}
err = register_netdevice(dev);
if (err)
goto errout;
err = rtnl_configure_link(dev, NULL);
if (err) {
vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
return err;
unregister_netdevice(dev);
goto errout;
}
/* notify default fdb entry */
if (f)
vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
errout:
if (f)
vxlan_fdb_destroy(vxlan, f, false);
return err;
}
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@ -3425,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct vxlan_rdst *dst = &vxlan->default_dst;
struct vxlan_rdst old_dst;
struct vxlan_config conf;
struct vxlan_fdb *f = NULL;
int err;
err = vxlan_nl2conf(tb, data,
@ -3453,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
err = vxlan_fdb_create(vxlan, all_zeros_mac,
&dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
NLM_F_CREATE | NLM_F_APPEND,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
NTF_SELF);
NTF_SELF, &f);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
}
spin_unlock_bh(&vxlan->hash_lock);
}

View File

@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
/*
* For something we're not in a state to send to the device the default action
* is to busy it and retry it after the controller state is recovered. However,
* anything marked for failfast or nvme multipath is immediately failed.
* if the controller is deleting or if anything is marked for failfast or
* nvme multipath it is immediately failed.
*
* Note: commands used to initialize the controller will be marked for failfast.
* Note: nvme cli/ioctl commands are marked for failfast.
*/
blk_status_t nvmf_fail_nonready_command(struct request *rq)
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
if (ctrl->state != NVME_CTRL_DELETING &&
ctrl->state != NVME_CTRL_DEAD &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;

View File

@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
blk_status_t nvmf_fail_nonready_command(struct request *rq);
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);

View File

@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvmf_fail_nonready_command(rq);
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
ret = nvme_setup_cmd(ns, rq, sqe);
if (ret)

View File

@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvmf_fail_nonready_command(rq);
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,

View File

@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
{
struct nvmet_ns *ns = to_nvmet_ns(item);
struct nvmet_subsys *subsys = ns->subsys;
size_t len;
int ret;
mutex_lock(&subsys->lock);
@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
if (ns->enabled)
goto out_unlock;
kfree(ns->device_path);
ret = -EINVAL;
len = strcspn(page, "\n");
if (!len)
goto out_unlock;
kfree(ns->device_path);
ret = -ENOMEM;
ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
ns->device_path = kstrndup(page, len, GFP_KERNEL);
if (!ns->device_path)
goto out_unlock;

View File

@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
if (ret)
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
if (ret)
goto out_unlock;

View File

@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
enum nvmet_fcp_datadir {
NVMET_FCP_NODATA,
@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
struct nvme_fc_cmd_iu cmdiubuf;
struct nvme_fc_ersp_iu rspiubuf;
dma_addr_t rspdma;
struct scatterlist *next_sg;
struct scatterlist *data_sg;
int data_sg_cnt;
u32 offset;
@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->assoc_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
template->max_sgl_segments);
newrec->max_sg_cnt = template->max_sgl_segments;
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
/* note: write from initiator perspective */
fod->next_sg = fod->data_sg;
return 0;
@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, u8 op)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct scatterlist *sg = fod->next_sg;
unsigned long flags;
u32 tlen;
u32 remaininglen = fod->req.transfer_len - fod->offset;
u32 tlen = 0;
int ret;
fcpreq->op = op;
fcpreq->offset = fod->offset;
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
(fod->req.transfer_len - fod->offset));
/*
* for next sequence:
* break at a sg element boundary
* attempt to keep sequence length capped at
* NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
* be longer if a single sg element is larger
* than that amount. This is done to avoid creating
* a new sg list to use for the tgtport api.
*/
fcpreq->sg = sg;
fcpreq->sg_cnt = 0;
while (tlen < remaininglen &&
fcpreq->sg_cnt < tgtport->max_sg_cnt &&
tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
fcpreq->sg_cnt++;
tlen += sg_dma_len(sg);
sg = sg_next(sg);
}
if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
fcpreq->sg_cnt++;
tlen += min_t(u32, sg_dma_len(sg), remaininglen);
sg = sg_next(sg);
}
if (tlen < remaininglen)
fod->next_sg = sg;
else
fod->next_sg = NULL;
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
fcpreq->rsplen = 0;
fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
/*
* If the last READDATA request: check if LLDD supports
* combined xfr with response.

View File

@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t ret;
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
return nvmf_fail_nonready_command(req);
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret)

View File

@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
parent = udev->subordinate;
pci_lock_rescan_remove();
pci_dev_get(dev);
list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
bus_list) {
pci_dev_get(pdev);
@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
pci_info(dev, "Device recovery from fatal error failed\n");
}
pci_dev_put(dev);
pci_unlock_rescan_remove();
}

View File

@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
{
void __iomem *ctrl = params->ctrl_regs;
USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
/* 1 millisecond - for USB clocks to settle down */
usleep_range(1000, 2000);
if (BRCM_ID(params->family_id) == 0x7366) {
/*
* The PHY3_SOFT_RESETB bits default to the wrong state.

View File

@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
ddata = container_of(work, struct phy_mdm6600, status_work.work);
dev = ddata->dev;
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
ddata->status_gpios->desc,
values);
if (error)
return;
for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
val |= values[i] << i;
dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
__func__, i, values[i], val);

Some files were not shown because too many files have changed in this diff Show More