mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 23:23:55 +08:00
89c4b442b7
Callers of netpoll_poll_lock() own NAPI_STATE_SCHED
Callers of netpoll_poll_unlock() have BH blocked between
the NAPI_STATE_SCHED being cleared and poll_lock is released.
We can avoid the spinlock which has no contention, and use cmpxchg()
on poll_owner which we need to set anyway.
This removes a possible lockdep violation after the cited commit,
since sk_busy_loop() re-enables BH before calling busy_poll_stop()
Fixes: 217f697436
("net: busy-poll: allow preemption in sk_busy_loop()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
122 lines
2.6 KiB
C
122 lines
2.6 KiB
C
/*
|
|
* Common code for low-level network console, dump, and debugger code
|
|
*
|
|
* Derived from netconsole, kgdb-over-ethernet, and netdump patches
|
|
*/
|
|
|
|
#ifndef _LINUX_NETPOLL_H
|
|
#define _LINUX_NETPOLL_H
|
|
|
|
#include <linux/netdevice.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/list.h>
|
|
|
|
union inet_addr {
|
|
__u32 all[4];
|
|
__be32 ip;
|
|
__be32 ip6[4];
|
|
struct in_addr in;
|
|
struct in6_addr in6;
|
|
};
|
|
|
|
struct netpoll {
|
|
struct net_device *dev;
|
|
char dev_name[IFNAMSIZ];
|
|
const char *name;
|
|
|
|
union inet_addr local_ip, remote_ip;
|
|
bool ipv6;
|
|
u16 local_port, remote_port;
|
|
u8 remote_mac[ETH_ALEN];
|
|
|
|
struct work_struct cleanup_work;
|
|
};
|
|
|
|
struct netpoll_info {
|
|
atomic_t refcnt;
|
|
|
|
struct semaphore dev_lock;
|
|
|
|
struct sk_buff_head txq;
|
|
|
|
struct delayed_work tx_work;
|
|
|
|
struct netpoll *netpoll;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
#ifdef CONFIG_NETPOLL
|
|
extern void netpoll_poll_disable(struct net_device *dev);
|
|
extern void netpoll_poll_enable(struct net_device *dev);
|
|
#else
|
|
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
|
|
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
|
|
#endif
|
|
|
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
|
|
void netpoll_print_options(struct netpoll *np);
|
|
int netpoll_parse_options(struct netpoll *np, char *opt);
|
|
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
|
|
int netpoll_setup(struct netpoll *np);
|
|
void __netpoll_cleanup(struct netpoll *np);
|
|
void __netpoll_free_async(struct netpoll *np);
|
|
void netpoll_cleanup(struct netpoll *np);
|
|
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
|
struct net_device *dev);
|
|
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
|
{
|
|
unsigned long flags;
|
|
local_irq_save(flags);
|
|
netpoll_send_skb_on_dev(np, skb, np->dev);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
#ifdef CONFIG_NETPOLL
|
|
static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|
{
|
|
struct net_device *dev = napi->dev;
|
|
|
|
if (dev && dev->npinfo) {
|
|
int owner = smp_processor_id();
|
|
|
|
while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
|
|
cpu_relax();
|
|
|
|
return napi;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
static inline void netpoll_poll_unlock(void *have)
|
|
{
|
|
struct napi_struct *napi = have;
|
|
|
|
if (napi)
|
|
smp_store_release(&napi->poll_owner, -1);
|
|
}
|
|
|
|
static inline bool netpoll_tx_running(struct net_device *dev)
|
|
{
|
|
return irqs_disabled();
|
|
}
|
|
|
|
#else
|
|
static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|
{
|
|
return NULL;
|
|
}
|
|
static inline void netpoll_poll_unlock(void *have)
|
|
{
|
|
}
|
|
static inline void netpoll_netdev_init(struct net_device *dev)
|
|
{
|
|
}
|
|
static inline bool netpoll_tx_running(struct net_device *dev)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
#endif
|