2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 20:23:57 +08:00
linux-next/kernel/bpf/percpu_freelist.c

105 lines
2.3 KiB
C
Raw Normal View History

/* Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include "percpu_freelist.h"
int pcpu_freelist_init(struct pcpu_freelist *s)
{
int cpu;
s->freelist = alloc_percpu(struct pcpu_freelist_head);
if (!s->freelist)
return -ENOMEM;
for_each_possible_cpu(cpu) {
struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
raw_spin_lock_init(&head->lock);
head->first = NULL;
}
return 0;
}
void pcpu_freelist_destroy(struct pcpu_freelist *s)
{
free_percpu(s->freelist);
}
static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
struct pcpu_freelist_node *node)
{
raw_spin_lock(&head->lock);
node->next = head->first;
head->first = node;
raw_spin_unlock(&head->lock);
}
void pcpu_freelist_push(struct pcpu_freelist *s,
struct pcpu_freelist_node *node)
{
struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
__pcpu_freelist_push(head, node);
}
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems)
{
struct pcpu_freelist_head *head;
unsigned long flags;
int i, cpu, pcpu_entries;
pcpu_entries = nr_elems / num_possible_cpus() + 1;
i = 0;
/* disable irq to workaround lockdep false positive
* in bpf usage pcpu_freelist_populate() will never race
* with pcpu_freelist_push()
*/
local_irq_save(flags);
for_each_possible_cpu(cpu) {
again:
head = per_cpu_ptr(s->freelist, cpu);
__pcpu_freelist_push(head, buf);
i++;
buf += elem_size;
if (i == nr_elems)
break;
if (i % pcpu_entries)
goto again;
}
local_irq_restore(flags);
}
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
{
struct pcpu_freelist_head *head;
struct pcpu_freelist_node *node;
bpf: fix lockdep splat pcpu_freelist_pop() needs the same lockdep awareness than pcpu_freelist_populate() to avoid a false positive. [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ] switchto-defaul/12508 [HC0[0]:SC0[6]:HE0:SE0] is trying to acquire: (&htab->buckets[i].lock){......}, at: [<ffffffff9dc099cb>] __htab_percpu_map_update_elem+0x1cb/0x300 and this task is already holding: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...}, at: [<ffffffff9e135848>] __dev_queue_xmit+0 x868/0x1240 which would create a new lock dependency: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} -> (&htab->buckets[i].lock){......} but this new dependency connects a SOFTIRQ-irq-safe lock: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} ... which became SOFTIRQ-irq-safe at: [<ffffffff9db5931b>] __lock_acquire+0x42b/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9e135848>] __dev_queue_xmit+0x868/0x1240 [<ffffffff9e136240>] dev_queue_xmit+0x10/0x20 [<ffffffff9e1965d9>] ip_finish_output2+0x439/0x590 [<ffffffff9e197410>] ip_finish_output+0x150/0x2f0 [<ffffffff9e19886d>] ip_output+0x7d/0x260 [<ffffffff9e19789e>] ip_local_out+0x5e/0xe0 [<ffffffff9e197b25>] ip_queue_xmit+0x205/0x620 [<ffffffff9e1b8398>] tcp_transmit_skb+0x5a8/0xcb0 [<ffffffff9e1ba152>] tcp_write_xmit+0x242/0x1070 [<ffffffff9e1baffc>] __tcp_push_pending_frames+0x3c/0xf0 [<ffffffff9e1b3472>] tcp_rcv_established+0x312/0x700 [<ffffffff9e1c1acc>] tcp_v4_do_rcv+0x11c/0x200 [<ffffffff9e1c3dc2>] tcp_v4_rcv+0xaa2/0xc30 [<ffffffff9e191107>] ip_local_deliver_finish+0xa7/0x240 [<ffffffff9e191a36>] ip_local_deliver+0x66/0x200 [<ffffffff9e19137d>] ip_rcv_finish+0xdd/0x560 [<ffffffff9e191e65>] ip_rcv+0x295/0x510 [<ffffffff9e12ff88>] __netif_receive_skb_core+0x988/0x1020 [<ffffffff9e130641>] __netif_receive_skb+0x21/0x70 [<ffffffff9e1306ff>] process_backlog+0x6f/0x230 [<ffffffff9e132129>] net_rx_action+0x229/0x420 [<ffffffff9da07ee8>] __do_softirq+0xd8/0x43d [<ffffffff9e282bcc>] do_softirq_own_stack+0x1c/0x30 [<ffffffff9dafc2f5>] do_softirq+0x55/0x60 [<ffffffff9dafc3a8>] __local_bh_enable_ip+0xa8/0xb0 [<ffffffff9db4c727>] cpu_startup_entry+0x1c7/0x500 [<ffffffff9daab333>] start_secondary+0x113/0x140 to a SOFTIRQ-irq-unsafe lock: (&head->lock){+.+...} ... which became SOFTIRQ-irq-unsafe at: ... [<ffffffff9db5971f>] __lock_acquire+0x82f/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9dc0b7fa>] pcpu_freelist_pop+0x7a/0xb0 [<ffffffff9dc08b2c>] htab_map_alloc+0x50c/0x5f0 [<ffffffff9dc00dc5>] SyS_bpf+0x265/0x1200 [<ffffffff9e28195f>] entry_SYSCALL_64_fastpath+0x12/0x17 other info that might help us debug this: Chain exists of: dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2 --> &htab->buckets[i].lock --> &head->lock Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&head->lock); local_irq_disable(); lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); lock(&htab->buckets[i].lock); <Interrupt> lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); *** DEADLOCK *** Fixes: e19494edab82 ("bpf: introduce percpu_freelist") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-15 09:15:50 +08:00
unsigned long flags;
int orig_cpu, cpu;
bpf: fix lockdep splat pcpu_freelist_pop() needs the same lockdep awareness than pcpu_freelist_populate() to avoid a false positive. [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ] switchto-defaul/12508 [HC0[0]:SC0[6]:HE0:SE0] is trying to acquire: (&htab->buckets[i].lock){......}, at: [<ffffffff9dc099cb>] __htab_percpu_map_update_elem+0x1cb/0x300 and this task is already holding: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...}, at: [<ffffffff9e135848>] __dev_queue_xmit+0 x868/0x1240 which would create a new lock dependency: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} -> (&htab->buckets[i].lock){......} but this new dependency connects a SOFTIRQ-irq-safe lock: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} ... which became SOFTIRQ-irq-safe at: [<ffffffff9db5931b>] __lock_acquire+0x42b/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9e135848>] __dev_queue_xmit+0x868/0x1240 [<ffffffff9e136240>] dev_queue_xmit+0x10/0x20 [<ffffffff9e1965d9>] ip_finish_output2+0x439/0x590 [<ffffffff9e197410>] ip_finish_output+0x150/0x2f0 [<ffffffff9e19886d>] ip_output+0x7d/0x260 [<ffffffff9e19789e>] ip_local_out+0x5e/0xe0 [<ffffffff9e197b25>] ip_queue_xmit+0x205/0x620 [<ffffffff9e1b8398>] tcp_transmit_skb+0x5a8/0xcb0 [<ffffffff9e1ba152>] tcp_write_xmit+0x242/0x1070 [<ffffffff9e1baffc>] __tcp_push_pending_frames+0x3c/0xf0 [<ffffffff9e1b3472>] tcp_rcv_established+0x312/0x700 [<ffffffff9e1c1acc>] tcp_v4_do_rcv+0x11c/0x200 [<ffffffff9e1c3dc2>] tcp_v4_rcv+0xaa2/0xc30 [<ffffffff9e191107>] ip_local_deliver_finish+0xa7/0x240 [<ffffffff9e191a36>] ip_local_deliver+0x66/0x200 [<ffffffff9e19137d>] ip_rcv_finish+0xdd/0x560 [<ffffffff9e191e65>] ip_rcv+0x295/0x510 [<ffffffff9e12ff88>] __netif_receive_skb_core+0x988/0x1020 [<ffffffff9e130641>] __netif_receive_skb+0x21/0x70 [<ffffffff9e1306ff>] process_backlog+0x6f/0x230 [<ffffffff9e132129>] net_rx_action+0x229/0x420 [<ffffffff9da07ee8>] __do_softirq+0xd8/0x43d [<ffffffff9e282bcc>] do_softirq_own_stack+0x1c/0x30 [<ffffffff9dafc2f5>] do_softirq+0x55/0x60 [<ffffffff9dafc3a8>] __local_bh_enable_ip+0xa8/0xb0 [<ffffffff9db4c727>] cpu_startup_entry+0x1c7/0x500 [<ffffffff9daab333>] start_secondary+0x113/0x140 to a SOFTIRQ-irq-unsafe lock: (&head->lock){+.+...} ... which became SOFTIRQ-irq-unsafe at: ... [<ffffffff9db5971f>] __lock_acquire+0x82f/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9dc0b7fa>] pcpu_freelist_pop+0x7a/0xb0 [<ffffffff9dc08b2c>] htab_map_alloc+0x50c/0x5f0 [<ffffffff9dc00dc5>] SyS_bpf+0x265/0x1200 [<ffffffff9e28195f>] entry_SYSCALL_64_fastpath+0x12/0x17 other info that might help us debug this: Chain exists of: dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2 --> &htab->buckets[i].lock --> &head->lock Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&head->lock); local_irq_disable(); lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); lock(&htab->buckets[i].lock); <Interrupt> lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); *** DEADLOCK *** Fixes: e19494edab82 ("bpf: introduce percpu_freelist") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-15 09:15:50 +08:00
local_irq_save(flags);
orig_cpu = cpu = raw_smp_processor_id();
while (1) {
head = per_cpu_ptr(s->freelist, cpu);
raw_spin_lock(&head->lock);
node = head->first;
if (node) {
head->first = node->next;
bpf: fix lockdep splat pcpu_freelist_pop() needs the same lockdep awareness than pcpu_freelist_populate() to avoid a false positive. [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ] switchto-defaul/12508 [HC0[0]:SC0[6]:HE0:SE0] is trying to acquire: (&htab->buckets[i].lock){......}, at: [<ffffffff9dc099cb>] __htab_percpu_map_update_elem+0x1cb/0x300 and this task is already holding: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...}, at: [<ffffffff9e135848>] __dev_queue_xmit+0 x868/0x1240 which would create a new lock dependency: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} -> (&htab->buckets[i].lock){......} but this new dependency connects a SOFTIRQ-irq-safe lock: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} ... which became SOFTIRQ-irq-safe at: [<ffffffff9db5931b>] __lock_acquire+0x42b/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9e135848>] __dev_queue_xmit+0x868/0x1240 [<ffffffff9e136240>] dev_queue_xmit+0x10/0x20 [<ffffffff9e1965d9>] ip_finish_output2+0x439/0x590 [<ffffffff9e197410>] ip_finish_output+0x150/0x2f0 [<ffffffff9e19886d>] ip_output+0x7d/0x260 [<ffffffff9e19789e>] ip_local_out+0x5e/0xe0 [<ffffffff9e197b25>] ip_queue_xmit+0x205/0x620 [<ffffffff9e1b8398>] tcp_transmit_skb+0x5a8/0xcb0 [<ffffffff9e1ba152>] tcp_write_xmit+0x242/0x1070 [<ffffffff9e1baffc>] __tcp_push_pending_frames+0x3c/0xf0 [<ffffffff9e1b3472>] tcp_rcv_established+0x312/0x700 [<ffffffff9e1c1acc>] tcp_v4_do_rcv+0x11c/0x200 [<ffffffff9e1c3dc2>] tcp_v4_rcv+0xaa2/0xc30 [<ffffffff9e191107>] ip_local_deliver_finish+0xa7/0x240 [<ffffffff9e191a36>] ip_local_deliver+0x66/0x200 [<ffffffff9e19137d>] ip_rcv_finish+0xdd/0x560 [<ffffffff9e191e65>] ip_rcv+0x295/0x510 [<ffffffff9e12ff88>] __netif_receive_skb_core+0x988/0x1020 [<ffffffff9e130641>] __netif_receive_skb+0x21/0x70 [<ffffffff9e1306ff>] process_backlog+0x6f/0x230 [<ffffffff9e132129>] net_rx_action+0x229/0x420 [<ffffffff9da07ee8>] __do_softirq+0xd8/0x43d [<ffffffff9e282bcc>] do_softirq_own_stack+0x1c/0x30 [<ffffffff9dafc2f5>] do_softirq+0x55/0x60 [<ffffffff9dafc3a8>] __local_bh_enable_ip+0xa8/0xb0 [<ffffffff9db4c727>] cpu_startup_entry+0x1c7/0x500 [<ffffffff9daab333>] start_secondary+0x113/0x140 to a SOFTIRQ-irq-unsafe lock: (&head->lock){+.+...} ... which became SOFTIRQ-irq-unsafe at: ... [<ffffffff9db5971f>] __lock_acquire+0x82f/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9dc0b7fa>] pcpu_freelist_pop+0x7a/0xb0 [<ffffffff9dc08b2c>] htab_map_alloc+0x50c/0x5f0 [<ffffffff9dc00dc5>] SyS_bpf+0x265/0x1200 [<ffffffff9e28195f>] entry_SYSCALL_64_fastpath+0x12/0x17 other info that might help us debug this: Chain exists of: dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2 --> &htab->buckets[i].lock --> &head->lock Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&head->lock); local_irq_disable(); lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); lock(&htab->buckets[i].lock); <Interrupt> lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); *** DEADLOCK *** Fixes: e19494edab82 ("bpf: introduce percpu_freelist") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-15 09:15:50 +08:00
raw_spin_unlock_irqrestore(&head->lock, flags);
return node;
}
raw_spin_unlock(&head->lock);
cpu = cpumask_next(cpu, cpu_possible_mask);
if (cpu >= nr_cpu_ids)
cpu = 0;
bpf: fix lockdep splat pcpu_freelist_pop() needs the same lockdep awareness than pcpu_freelist_populate() to avoid a false positive. [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ] switchto-defaul/12508 [HC0[0]:SC0[6]:HE0:SE0] is trying to acquire: (&htab->buckets[i].lock){......}, at: [<ffffffff9dc099cb>] __htab_percpu_map_update_elem+0x1cb/0x300 and this task is already holding: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...}, at: [<ffffffff9e135848>] __dev_queue_xmit+0 x868/0x1240 which would create a new lock dependency: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} -> (&htab->buckets[i].lock){......} but this new dependency connects a SOFTIRQ-irq-safe lock: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} ... which became SOFTIRQ-irq-safe at: [<ffffffff9db5931b>] __lock_acquire+0x42b/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9e135848>] __dev_queue_xmit+0x868/0x1240 [<ffffffff9e136240>] dev_queue_xmit+0x10/0x20 [<ffffffff9e1965d9>] ip_finish_output2+0x439/0x590 [<ffffffff9e197410>] ip_finish_output+0x150/0x2f0 [<ffffffff9e19886d>] ip_output+0x7d/0x260 [<ffffffff9e19789e>] ip_local_out+0x5e/0xe0 [<ffffffff9e197b25>] ip_queue_xmit+0x205/0x620 [<ffffffff9e1b8398>] tcp_transmit_skb+0x5a8/0xcb0 [<ffffffff9e1ba152>] tcp_write_xmit+0x242/0x1070 [<ffffffff9e1baffc>] __tcp_push_pending_frames+0x3c/0xf0 [<ffffffff9e1b3472>] tcp_rcv_established+0x312/0x700 [<ffffffff9e1c1acc>] tcp_v4_do_rcv+0x11c/0x200 [<ffffffff9e1c3dc2>] tcp_v4_rcv+0xaa2/0xc30 [<ffffffff9e191107>] ip_local_deliver_finish+0xa7/0x240 [<ffffffff9e191a36>] ip_local_deliver+0x66/0x200 [<ffffffff9e19137d>] ip_rcv_finish+0xdd/0x560 [<ffffffff9e191e65>] ip_rcv+0x295/0x510 [<ffffffff9e12ff88>] __netif_receive_skb_core+0x988/0x1020 [<ffffffff9e130641>] __netif_receive_skb+0x21/0x70 [<ffffffff9e1306ff>] process_backlog+0x6f/0x230 [<ffffffff9e132129>] net_rx_action+0x229/0x420 [<ffffffff9da07ee8>] __do_softirq+0xd8/0x43d [<ffffffff9e282bcc>] do_softirq_own_stack+0x1c/0x30 [<ffffffff9dafc2f5>] do_softirq+0x55/0x60 [<ffffffff9dafc3a8>] __local_bh_enable_ip+0xa8/0xb0 [<ffffffff9db4c727>] cpu_startup_entry+0x1c7/0x500 [<ffffffff9daab333>] start_secondary+0x113/0x140 to a SOFTIRQ-irq-unsafe lock: (&head->lock){+.+...} ... which became SOFTIRQ-irq-unsafe at: ... [<ffffffff9db5971f>] __lock_acquire+0x82f/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9dc0b7fa>] pcpu_freelist_pop+0x7a/0xb0 [<ffffffff9dc08b2c>] htab_map_alloc+0x50c/0x5f0 [<ffffffff9dc00dc5>] SyS_bpf+0x265/0x1200 [<ffffffff9e28195f>] entry_SYSCALL_64_fastpath+0x12/0x17 other info that might help us debug this: Chain exists of: dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2 --> &htab->buckets[i].lock --> &head->lock Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&head->lock); local_irq_disable(); lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); lock(&htab->buckets[i].lock); <Interrupt> lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); *** DEADLOCK *** Fixes: e19494edab82 ("bpf: introduce percpu_freelist") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-15 09:15:50 +08:00
if (cpu == orig_cpu) {
local_irq_restore(flags);
return NULL;
bpf: fix lockdep splat pcpu_freelist_pop() needs the same lockdep awareness than pcpu_freelist_populate() to avoid a false positive. [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ] switchto-defaul/12508 [HC0[0]:SC0[6]:HE0:SE0] is trying to acquire: (&htab->buckets[i].lock){......}, at: [<ffffffff9dc099cb>] __htab_percpu_map_update_elem+0x1cb/0x300 and this task is already holding: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...}, at: [<ffffffff9e135848>] __dev_queue_xmit+0 x868/0x1240 which would create a new lock dependency: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} -> (&htab->buckets[i].lock){......} but this new dependency connects a SOFTIRQ-irq-safe lock: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} ... which became SOFTIRQ-irq-safe at: [<ffffffff9db5931b>] __lock_acquire+0x42b/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9e135848>] __dev_queue_xmit+0x868/0x1240 [<ffffffff9e136240>] dev_queue_xmit+0x10/0x20 [<ffffffff9e1965d9>] ip_finish_output2+0x439/0x590 [<ffffffff9e197410>] ip_finish_output+0x150/0x2f0 [<ffffffff9e19886d>] ip_output+0x7d/0x260 [<ffffffff9e19789e>] ip_local_out+0x5e/0xe0 [<ffffffff9e197b25>] ip_queue_xmit+0x205/0x620 [<ffffffff9e1b8398>] tcp_transmit_skb+0x5a8/0xcb0 [<ffffffff9e1ba152>] tcp_write_xmit+0x242/0x1070 [<ffffffff9e1baffc>] __tcp_push_pending_frames+0x3c/0xf0 [<ffffffff9e1b3472>] tcp_rcv_established+0x312/0x700 [<ffffffff9e1c1acc>] tcp_v4_do_rcv+0x11c/0x200 [<ffffffff9e1c3dc2>] tcp_v4_rcv+0xaa2/0xc30 [<ffffffff9e191107>] ip_local_deliver_finish+0xa7/0x240 [<ffffffff9e191a36>] ip_local_deliver+0x66/0x200 [<ffffffff9e19137d>] ip_rcv_finish+0xdd/0x560 [<ffffffff9e191e65>] ip_rcv+0x295/0x510 [<ffffffff9e12ff88>] __netif_receive_skb_core+0x988/0x1020 [<ffffffff9e130641>] __netif_receive_skb+0x21/0x70 [<ffffffff9e1306ff>] process_backlog+0x6f/0x230 [<ffffffff9e132129>] net_rx_action+0x229/0x420 [<ffffffff9da07ee8>] __do_softirq+0xd8/0x43d [<ffffffff9e282bcc>] do_softirq_own_stack+0x1c/0x30 [<ffffffff9dafc2f5>] do_softirq+0x55/0x60 [<ffffffff9dafc3a8>] __local_bh_enable_ip+0xa8/0xb0 [<ffffffff9db4c727>] cpu_startup_entry+0x1c7/0x500 [<ffffffff9daab333>] start_secondary+0x113/0x140 to a SOFTIRQ-irq-unsafe lock: (&head->lock){+.+...} ... which became SOFTIRQ-irq-unsafe at: ... [<ffffffff9db5971f>] __lock_acquire+0x82f/0x1f10 [<ffffffff9db5b32c>] lock_acquire+0xbc/0x1b0 [<ffffffff9da05e38>] _raw_spin_lock+0x38/0x50 [<ffffffff9dc0b7fa>] pcpu_freelist_pop+0x7a/0xb0 [<ffffffff9dc08b2c>] htab_map_alloc+0x50c/0x5f0 [<ffffffff9dc00dc5>] SyS_bpf+0x265/0x1200 [<ffffffff9e28195f>] entry_SYSCALL_64_fastpath+0x12/0x17 other info that might help us debug this: Chain exists of: dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2 --> &htab->buckets[i].lock --> &head->lock Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&head->lock); local_irq_disable(); lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); lock(&htab->buckets[i].lock); <Interrupt> lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); *** DEADLOCK *** Fixes: e19494edab82 ("bpf: introduce percpu_freelist") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-15 09:15:50 +08:00
}
}
}