mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-03 02:49:09 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor conflict with the DSA legacy code removal. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a9e41a5296
@ -38,7 +38,7 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt.
|
||||
* "smii"
|
||||
* "xgmii"
|
||||
* "trgmii"
|
||||
* "2000base-x",
|
||||
* "1000base-x",
|
||||
* "2500base-x",
|
||||
* "rxaui"
|
||||
* "xaui"
|
||||
|
@ -117,6 +117,8 @@ static bool is_simm32(s64 value)
|
||||
#define IA32_JLE 0x7E
|
||||
#define IA32_JG 0x7F
|
||||
|
||||
#define COND_JMP_OPCODE_INVALID (0xFF)
|
||||
|
||||
/*
|
||||
* Map eBPF registers to IA32 32bit registers or stack scratch space.
|
||||
*
|
||||
@ -698,19 +700,12 @@ static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
|
||||
STACK_VAR(dst_hi));
|
||||
}
|
||||
|
||||
/* xor ecx,ecx */
|
||||
EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
|
||||
/* sub dreg_lo,ecx */
|
||||
EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX));
|
||||
/* mov dreg_lo,ecx */
|
||||
EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
|
||||
|
||||
/* xor ecx,ecx */
|
||||
EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
|
||||
/* sbb dreg_hi,ecx */
|
||||
EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX));
|
||||
/* mov dreg_hi,ecx */
|
||||
EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX));
|
||||
/* neg dreg_lo */
|
||||
EMIT2(0xF7, add_1reg(0xD8, dreg_lo));
|
||||
/* adc dreg_hi,0x0 */
|
||||
EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00);
|
||||
/* neg dreg_hi */
|
||||
EMIT2(0xF7, add_1reg(0xD8, dreg_hi));
|
||||
|
||||
if (dstk) {
|
||||
/* mov dword ptr [ebp+off],dreg_lo */
|
||||
@ -1613,6 +1608,75 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog)
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
|
||||
{
|
||||
u8 jmp_cond;
|
||||
|
||||
/* Convert BPF opcode to x86 */
|
||||
switch (op) {
|
||||
case BPF_JEQ:
|
||||
jmp_cond = IA32_JE;
|
||||
break;
|
||||
case BPF_JSET:
|
||||
case BPF_JNE:
|
||||
jmp_cond = IA32_JNE;
|
||||
break;
|
||||
case BPF_JGT:
|
||||
/* GT is unsigned '>', JA in x86 */
|
||||
jmp_cond = IA32_JA;
|
||||
break;
|
||||
case BPF_JLT:
|
||||
/* LT is unsigned '<', JB in x86 */
|
||||
jmp_cond = IA32_JB;
|
||||
break;
|
||||
case BPF_JGE:
|
||||
/* GE is unsigned '>=', JAE in x86 */
|
||||
jmp_cond = IA32_JAE;
|
||||
break;
|
||||
case BPF_JLE:
|
||||
/* LE is unsigned '<=', JBE in x86 */
|
||||
jmp_cond = IA32_JBE;
|
||||
break;
|
||||
case BPF_JSGT:
|
||||
if (!is_cmp_lo)
|
||||
/* Signed '>', GT in x86 */
|
||||
jmp_cond = IA32_JG;
|
||||
else
|
||||
/* GT is unsigned '>', JA in x86 */
|
||||
jmp_cond = IA32_JA;
|
||||
break;
|
||||
case BPF_JSLT:
|
||||
if (!is_cmp_lo)
|
||||
/* Signed '<', LT in x86 */
|
||||
jmp_cond = IA32_JL;
|
||||
else
|
||||
/* LT is unsigned '<', JB in x86 */
|
||||
jmp_cond = IA32_JB;
|
||||
break;
|
||||
case BPF_JSGE:
|
||||
if (!is_cmp_lo)
|
||||
/* Signed '>=', GE in x86 */
|
||||
jmp_cond = IA32_JGE;
|
||||
else
|
||||
/* GE is unsigned '>=', JAE in x86 */
|
||||
jmp_cond = IA32_JAE;
|
||||
break;
|
||||
case BPF_JSLE:
|
||||
if (!is_cmp_lo)
|
||||
/* Signed '<=', LE in x86 */
|
||||
jmp_cond = IA32_JLE;
|
||||
else
|
||||
/* LE is unsigned '<=', JBE in x86 */
|
||||
jmp_cond = IA32_JBE;
|
||||
break;
|
||||
default: /* to silence GCC warning */
|
||||
jmp_cond = COND_JMP_OPCODE_INVALID;
|
||||
break;
|
||||
}
|
||||
|
||||
return jmp_cond;
|
||||
}
|
||||
|
||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
int oldproglen, struct jit_context *ctx)
|
||||
{
|
||||
@ -2069,10 +2133,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
case BPF_JMP | BPF_JLT | BPF_X:
|
||||
case BPF_JMP | BPF_JGE | BPF_X:
|
||||
case BPF_JMP | BPF_JLE | BPF_X:
|
||||
case BPF_JMP | BPF_JSGT | BPF_X:
|
||||
case BPF_JMP | BPF_JSLE | BPF_X:
|
||||
case BPF_JMP | BPF_JSLT | BPF_X:
|
||||
case BPF_JMP | BPF_JSGE | BPF_X:
|
||||
case BPF_JMP32 | BPF_JEQ | BPF_X:
|
||||
case BPF_JMP32 | BPF_JNE | BPF_X:
|
||||
case BPF_JMP32 | BPF_JGT | BPF_X:
|
||||
@ -2118,6 +2178,40 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
|
||||
goto emit_cond_jmp;
|
||||
}
|
||||
case BPF_JMP | BPF_JSGT | BPF_X:
|
||||
case BPF_JMP | BPF_JSLE | BPF_X:
|
||||
case BPF_JMP | BPF_JSLT | BPF_X:
|
||||
case BPF_JMP | BPF_JSGE | BPF_X: {
|
||||
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
|
||||
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
|
||||
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
|
||||
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
|
||||
|
||||
if (dstk) {
|
||||
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
|
||||
STACK_VAR(dst_lo));
|
||||
EMIT3(0x8B,
|
||||
add_2reg(0x40, IA32_EBP,
|
||||
IA32_EDX),
|
||||
STACK_VAR(dst_hi));
|
||||
}
|
||||
|
||||
if (sstk) {
|
||||
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
|
||||
STACK_VAR(src_lo));
|
||||
EMIT3(0x8B,
|
||||
add_2reg(0x40, IA32_EBP,
|
||||
IA32_EBX),
|
||||
STACK_VAR(src_hi));
|
||||
}
|
||||
|
||||
/* cmp dreg_hi,sreg_hi */
|
||||
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
|
||||
EMIT2(IA32_JNE, 10);
|
||||
/* cmp dreg_lo,sreg_lo */
|
||||
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
|
||||
goto emit_cond_jmp_signed;
|
||||
}
|
||||
case BPF_JMP | BPF_JSET | BPF_X:
|
||||
case BPF_JMP32 | BPF_JSET | BPF_X: {
|
||||
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
|
||||
@ -2194,10 +2288,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
case BPF_JMP | BPF_JLT | BPF_K:
|
||||
case BPF_JMP | BPF_JGE | BPF_K:
|
||||
case BPF_JMP | BPF_JLE | BPF_K:
|
||||
case BPF_JMP | BPF_JSGT | BPF_K:
|
||||
case BPF_JMP | BPF_JSLE | BPF_K:
|
||||
case BPF_JMP | BPF_JSLT | BPF_K:
|
||||
case BPF_JMP | BPF_JSGE | BPF_K:
|
||||
case BPF_JMP32 | BPF_JEQ | BPF_K:
|
||||
case BPF_JMP32 | BPF_JNE | BPF_K:
|
||||
case BPF_JMP32 | BPF_JGT | BPF_K:
|
||||
@ -2238,50 +2328,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
/* cmp dreg_lo,sreg_lo */
|
||||
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
|
||||
|
||||
emit_cond_jmp: /* Convert BPF opcode to x86 */
|
||||
switch (BPF_OP(code)) {
|
||||
case BPF_JEQ:
|
||||
jmp_cond = IA32_JE;
|
||||
break;
|
||||
case BPF_JSET:
|
||||
case BPF_JNE:
|
||||
jmp_cond = IA32_JNE;
|
||||
break;
|
||||
case BPF_JGT:
|
||||
/* GT is unsigned '>', JA in x86 */
|
||||
jmp_cond = IA32_JA;
|
||||
break;
|
||||
case BPF_JLT:
|
||||
/* LT is unsigned '<', JB in x86 */
|
||||
jmp_cond = IA32_JB;
|
||||
break;
|
||||
case BPF_JGE:
|
||||
/* GE is unsigned '>=', JAE in x86 */
|
||||
jmp_cond = IA32_JAE;
|
||||
break;
|
||||
case BPF_JLE:
|
||||
/* LE is unsigned '<=', JBE in x86 */
|
||||
jmp_cond = IA32_JBE;
|
||||
break;
|
||||
case BPF_JSGT:
|
||||
/* Signed '>', GT in x86 */
|
||||
jmp_cond = IA32_JG;
|
||||
break;
|
||||
case BPF_JSLT:
|
||||
/* Signed '<', LT in x86 */
|
||||
jmp_cond = IA32_JL;
|
||||
break;
|
||||
case BPF_JSGE:
|
||||
/* Signed '>=', GE in x86 */
|
||||
jmp_cond = IA32_JGE;
|
||||
break;
|
||||
case BPF_JSLE:
|
||||
/* Signed '<=', LE in x86 */
|
||||
jmp_cond = IA32_JLE;
|
||||
break;
|
||||
default: /* to silence GCC warning */
|
||||
emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
|
||||
if (jmp_cond == COND_JMP_OPCODE_INVALID)
|
||||
return -EFAULT;
|
||||
}
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
if (is_imm8(jmp_offset)) {
|
||||
EMIT2(jmp_cond, jmp_offset);
|
||||
@ -2291,7 +2340,66 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
|
||||
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
|
||||
return -EFAULT;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BPF_JMP | BPF_JSGT | BPF_K:
|
||||
case BPF_JMP | BPF_JSLE | BPF_K:
|
||||
case BPF_JMP | BPF_JSLT | BPF_K:
|
||||
case BPF_JMP | BPF_JSGE | BPF_K: {
|
||||
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
|
||||
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
|
||||
u8 sreg_lo = IA32_ECX;
|
||||
u8 sreg_hi = IA32_EBX;
|
||||
u32 hi;
|
||||
|
||||
if (dstk) {
|
||||
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
|
||||
STACK_VAR(dst_lo));
|
||||
EMIT3(0x8B,
|
||||
add_2reg(0x40, IA32_EBP,
|
||||
IA32_EDX),
|
||||
STACK_VAR(dst_hi));
|
||||
}
|
||||
|
||||
/* mov ecx,imm32 */
|
||||
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
|
||||
hi = imm32 & (1 << 31) ? (u32)~0 : 0;
|
||||
/* mov ebx,imm32 */
|
||||
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
|
||||
/* cmp dreg_hi,sreg_hi */
|
||||
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
|
||||
EMIT2(IA32_JNE, 10);
|
||||
/* cmp dreg_lo,sreg_lo */
|
||||
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
|
||||
|
||||
/*
|
||||
* For simplicity of branch offset computation,
|
||||
* let's use fixed jump coding here.
|
||||
*/
|
||||
emit_cond_jmp_signed: /* Check the condition for low 32-bit comparison */
|
||||
jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true);
|
||||
if (jmp_cond == COND_JMP_OPCODE_INVALID)
|
||||
return -EFAULT;
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i] + 8;
|
||||
if (is_simm32(jmp_offset)) {
|
||||
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
|
||||
} else {
|
||||
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
|
||||
return -EFAULT;
|
||||
}
|
||||
EMIT2(0xEB, 6);
|
||||
|
||||
/* Check the condition for high 32-bit comparison */
|
||||
jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
|
||||
if (jmp_cond == COND_JMP_OPCODE_INVALID)
|
||||
return -EFAULT;
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
if (is_simm32(jmp_offset)) {
|
||||
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
|
||||
} else {
|
||||
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
|
||||
return -EFAULT;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BPF_JMP | BPF_JA:
|
||||
|
@ -958,6 +958,7 @@ static void write_iso_callback(struct urb *urb)
|
||||
*/
|
||||
static int starturbs(struct bc_state *bcs)
|
||||
{
|
||||
struct usb_device *udev = bcs->cs->hw.bas->udev;
|
||||
struct bas_bc_state *ubc = bcs->hw.bas;
|
||||
struct urb *urb;
|
||||
int j, k;
|
||||
@ -975,8 +976,8 @@ static int starturbs(struct bc_state *bcs)
|
||||
rc = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
|
||||
usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
|
||||
usb_fill_int_urb(urb, udev,
|
||||
usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
|
||||
ubc->isoinbuf + k * BAS_INBUFSIZE,
|
||||
BAS_INBUFSIZE, read_iso_callback, bcs,
|
||||
BAS_FRAMETIME);
|
||||
@ -1006,8 +1007,8 @@ static int starturbs(struct bc_state *bcs)
|
||||
rc = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
|
||||
usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
|
||||
usb_fill_int_urb(urb, udev,
|
||||
usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
|
||||
ubc->isooutbuf->data,
|
||||
sizeof(ubc->isooutbuf->data),
|
||||
write_iso_callback, &ubc->isoouturbs[k],
|
||||
|
@ -731,7 +731,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
|
||||
if (rc)
|
||||
pr_err("Cannot set LLQ configuration: %d\n", rc);
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
|
||||
@ -2195,7 +2195,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
|
||||
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
|
||||
pr_err("Func hash %d isn't supported by device, abort\n",
|
||||
rss->hash_func);
|
||||
return -EOPNOTSUPP;
|
||||
@ -2280,6 +2280,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rss->hash_func = func;
|
||||
rc = ena_com_set_hash_function(ena_dev);
|
||||
|
||||
/* Restore the old function */
|
||||
@ -2802,7 +2803,11 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
|
||||
/* if moderation is supported by device we set adaptive moderation */
|
||||
delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
|
||||
ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
|
||||
ena_com_enable_adaptive_moderation(ena_dev);
|
||||
|
||||
/* Disable adaptive moderation by default - can be enabled from
|
||||
* ethtool
|
||||
*/
|
||||
ena_com_disable_adaptive_moderation(ena_dev);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
|
@ -697,8 +697,8 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
|
||||
if (indir) {
|
||||
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
|
||||
rc = ena_com_indirect_table_fill_entry(ena_dev,
|
||||
ENA_IO_RXQ_IDX(indir[i]),
|
||||
i);
|
||||
i,
|
||||
ENA_IO_RXQ_IDX(indir[i]));
|
||||
if (unlikely(rc)) {
|
||||
netif_err(adapter, drv, netdev,
|
||||
"Cannot fill indirect table (index is too large)\n");
|
||||
|
@ -224,28 +224,23 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
|
||||
if (!tx_ring->tx_buffer_info) {
|
||||
tx_ring->tx_buffer_info = vzalloc(size);
|
||||
if (!tx_ring->tx_buffer_info)
|
||||
return -ENOMEM;
|
||||
goto err_tx_buffer_info;
|
||||
}
|
||||
|
||||
size = sizeof(u16) * tx_ring->ring_size;
|
||||
tx_ring->free_tx_ids = vzalloc_node(size, node);
|
||||
if (!tx_ring->free_tx_ids) {
|
||||
tx_ring->free_tx_ids = vzalloc(size);
|
||||
if (!tx_ring->free_tx_ids) {
|
||||
vfree(tx_ring->tx_buffer_info);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!tx_ring->free_tx_ids)
|
||||
goto err_free_tx_ids;
|
||||
}
|
||||
|
||||
size = tx_ring->tx_max_header_size;
|
||||
tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
|
||||
if (!tx_ring->push_buf_intermediate_buf) {
|
||||
tx_ring->push_buf_intermediate_buf = vzalloc(size);
|
||||
if (!tx_ring->push_buf_intermediate_buf) {
|
||||
vfree(tx_ring->tx_buffer_info);
|
||||
vfree(tx_ring->free_tx_ids);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!tx_ring->push_buf_intermediate_buf)
|
||||
goto err_push_buf_intermediate_buf;
|
||||
}
|
||||
|
||||
/* Req id ring for TX out of order completions */
|
||||
@ -259,6 +254,15 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
|
||||
tx_ring->next_to_clean = 0;
|
||||
tx_ring->cpu = ena_irq->cpu;
|
||||
return 0;
|
||||
|
||||
err_push_buf_intermediate_buf:
|
||||
vfree(tx_ring->free_tx_ids);
|
||||
tx_ring->free_tx_ids = NULL;
|
||||
err_free_tx_ids:
|
||||
vfree(tx_ring->tx_buffer_info);
|
||||
tx_ring->tx_buffer_info = NULL;
|
||||
err_tx_buffer_info:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* ena_free_tx_resources - Free I/O Tx Resources per Queue
|
||||
@ -378,6 +382,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
|
||||
rx_ring->free_rx_ids = vzalloc(size);
|
||||
if (!rx_ring->free_rx_ids) {
|
||||
vfree(rx_ring->rx_buffer_info);
|
||||
rx_ring->rx_buffer_info = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -1820,6 +1825,7 @@ err_setup_rx:
|
||||
err_setup_tx:
|
||||
ena_free_io_irq(adapter);
|
||||
err_req_irq:
|
||||
ena_del_napi(adapter);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -2291,7 +2297,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
|
||||
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
|
||||
host_info->os_type = ENA_ADMIN_OS_LINUX;
|
||||
host_info->kernel_ver = LINUX_VERSION_CODE;
|
||||
strncpy(host_info->kernel_ver_str, utsname()->version,
|
||||
strlcpy(host_info->kernel_ver_str, utsname()->version,
|
||||
sizeof(host_info->kernel_ver_str) - 1);
|
||||
host_info->os_dist = 0;
|
||||
strncpy(host_info->os_dist_str, utsname()->release,
|
||||
|
@ -2427,12 +2427,12 @@ static int macb_open(struct net_device *dev)
|
||||
goto pm_exit;
|
||||
}
|
||||
|
||||
bp->macbgem_ops.mog_init_rings(bp);
|
||||
macb_init_hw(bp);
|
||||
|
||||
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
|
||||
napi_enable(&queue->napi);
|
||||
|
||||
bp->macbgem_ops.mog_init_rings(bp);
|
||||
macb_init_hw(bp);
|
||||
|
||||
/* schedule a link state check */
|
||||
phy_start(dev->phydev);
|
||||
|
||||
|
@ -6160,15 +6160,24 @@ static int __init cxgb4_init_module(void)
|
||||
|
||||
ret = pci_register_driver(&cxgb4_driver);
|
||||
if (ret < 0)
|
||||
debugfs_remove(cxgb4_debugfs_root);
|
||||
goto err_pci;
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (!inet6addr_registered) {
|
||||
register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
|
||||
inet6addr_registered = true;
|
||||
ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
|
||||
if (ret)
|
||||
pci_unregister_driver(&cxgb4_driver);
|
||||
else
|
||||
inet6addr_registered = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ret == 0)
|
||||
return ret;
|
||||
|
||||
err_pci:
|
||||
debugfs_remove(cxgb4_debugfs_root);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1648,7 +1648,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
|
||||
qm_sg_entry_get_len(&sgt[0]), dma_dir);
|
||||
|
||||
/* remaining pages were mapped with skb_frag_dma_map() */
|
||||
for (i = 1; i < nr_frags; i++) {
|
||||
for (i = 1; i <= nr_frags; i++) {
|
||||
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
|
||||
|
||||
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
|
||||
|
@ -252,14 +252,12 @@ uec_set_ringparam(struct net_device *netdev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (netif_running(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
ug_info->bdRingLenRx[queue] = ring->rx_pending;
|
||||
ug_info->bdRingLenTx[queue] = ring->tx_pending;
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
/* FIXME: restart automatically */
|
||||
netdev_info(netdev, "Please re-open the interface\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -875,12 +875,6 @@ static inline int netvsc_send_pkt(
|
||||
} else if (ret == -EAGAIN) {
|
||||
netif_tx_stop_queue(txq);
|
||||
ndev_ctx->eth_stats.stop_queue++;
|
||||
if (atomic_read(&nvchan->queue_sends) < 1 &&
|
||||
!net_device->tx_disable) {
|
||||
netif_tx_wake_queue(txq);
|
||||
ndev_ctx->eth_stats.wake_queue++;
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
} else {
|
||||
netdev_err(ndev,
|
||||
"Unable to send packet pages %u len %u, ret %d\n",
|
||||
@ -888,6 +882,15 @@ static inline int netvsc_send_pkt(
|
||||
ret);
|
||||
}
|
||||
|
||||
if (netif_tx_queue_stopped(txq) &&
|
||||
atomic_read(&nvchan->queue_sends) < 1 &&
|
||||
!net_device->tx_disable) {
|
||||
netif_tx_wake_queue(txq);
|
||||
ndev_ctx->eth_stats.wake_queue++;
|
||||
if (ret == -EAGAIN)
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2116,11 +2116,14 @@ bool phy_validate_pause(struct phy_device *phydev,
|
||||
struct ethtool_pauseparam *pp)
|
||||
{
|
||||
if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
|
||||
phydev->supported) ||
|
||||
(!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
|
||||
phydev->supported) &&
|
||||
pp->rx_pause != pp->tx_pause))
|
||||
phydev->supported) && pp->rx_pause)
|
||||
return false;
|
||||
|
||||
if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
|
||||
phydev->supported) &&
|
||||
pp->rx_pause != pp->tx_pause)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_validate_pause);
|
||||
|
@ -18,6 +18,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
|
||||
return val * hash_rnd[0];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
|
||||
@ -25,6 +26,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev
|
||||
|
||||
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
|
||||
{
|
||||
|
@ -94,7 +94,6 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
|
||||
goto out;
|
||||
|
||||
head->dev = dev;
|
||||
skb_get(head);
|
||||
spin_unlock(&fq->q.lock);
|
||||
|
||||
icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/times.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/neighbour.h>
|
||||
#include <net/arp.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/netevent.h>
|
||||
@ -663,6 +664,8 @@ out:
|
||||
out_tbl_unlock:
|
||||
write_unlock_bh(&tbl->lock);
|
||||
out_neigh_release:
|
||||
if (!exempt_from_gc)
|
||||
atomic_dec(&tbl->gc_entries);
|
||||
neigh_release(n);
|
||||
goto out;
|
||||
}
|
||||
@ -2990,7 +2993,13 @@ int neigh_xmit(int index, struct net_device *dev,
|
||||
if (!tbl)
|
||||
goto out;
|
||||
rcu_read_lock_bh();
|
||||
neigh = __neigh_lookup_noref(tbl, addr, dev);
|
||||
if (index == NEIGH_ARP_TABLE) {
|
||||
u32 key = *((u32 *)addr);
|
||||
|
||||
neigh = __ipv4_neigh_lookup_noref(dev, key);
|
||||
} else {
|
||||
neigh = __neigh_lookup_noref(tbl, addr, dev);
|
||||
}
|
||||
if (!neigh)
|
||||
neigh = __neigh_create(tbl, addr, dev, false);
|
||||
err = PTR_ERR(neigh);
|
||||
|
@ -344,7 +344,7 @@ static int __init dsa_init_module(void)
|
||||
|
||||
rc = dsa_slave_register_notifier();
|
||||
if (rc)
|
||||
return rc;
|
||||
goto register_notifier_fail;
|
||||
|
||||
dev_add_pack(&dsa_pack_type);
|
||||
|
||||
@ -352,6 +352,11 @@ static int __init dsa_init_module(void)
|
||||
THIS_MODULE);
|
||||
|
||||
return 0;
|
||||
|
||||
register_notifier_fail:
|
||||
destroy_workqueue(dsa_owq);
|
||||
|
||||
return rc;
|
||||
}
|
||||
module_init(dsa_init_module);
|
||||
|
||||
|
@ -335,8 +335,6 @@ next_entry2:
|
||||
}
|
||||
spin_unlock_bh(lock);
|
||||
err = 0;
|
||||
e = 0;
|
||||
|
||||
out:
|
||||
cb->args[1] = e;
|
||||
return err;
|
||||
@ -374,6 +372,7 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
|
||||
if (err < 0)
|
||||
break;
|
||||
cb->args[1] = 0;
|
||||
next_table:
|
||||
t++;
|
||||
}
|
||||
|
@ -1084,7 +1084,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
|
||||
if (!tdev && tunnel->parms.link)
|
||||
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
|
||||
|
||||
if (tdev) {
|
||||
if (tdev && !netif_is_l3_master(tdev)) {
|
||||
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
|
||||
|
||||
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
|
||||
|
@ -1735,7 +1735,8 @@ static __net_exit void l2tp_exit_net(struct net *net)
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
flush_workqueue(l2tp_wq);
|
||||
if (l2tp_wq)
|
||||
flush_workqueue(l2tp_wq);
|
||||
rcu_barrier();
|
||||
|
||||
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
|
||||
|
@ -551,7 +551,7 @@ static __net_init int rds_tcp_init_net(struct net *net)
|
||||
tbl = kmemdup(rds_tcp_sysctl_table,
|
||||
sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
|
||||
if (!tbl) {
|
||||
pr_warn("could not set allocate syctl table\n");
|
||||
pr_warn("could not set allocate sysctl table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
rtn->ctl_table = tbl;
|
||||
|
@ -32,6 +32,8 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
|
||||
u32 classid = task_get_classid(skb);
|
||||
|
||||
if (unlikely(!head))
|
||||
return -1;
|
||||
if (!classid)
|
||||
return -1;
|
||||
if (!tcf_em_tree_match(skb, &head->ematches, NULL))
|
||||
|
@ -32,6 +32,9 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
{
|
||||
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
|
||||
|
||||
if (unlikely(!head))
|
||||
return -1;
|
||||
|
||||
if (tc_skip_sw(head->flags))
|
||||
return -1;
|
||||
|
||||
|
@ -86,3 +86,22 @@
|
||||
.result = ACCEPT,
|
||||
.retval = 2,
|
||||
},
|
||||
{
|
||||
"jit: jsgt, jslt",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, 0x80000000ULL),
|
||||
BPF_LD_IMM64(BPF_REG_2, 0x0ULL),
|
||||
BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
||||
BPF_JMP_REG(BPF_JSLT, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.retval = 2,
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user