mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 18:23:53 +08:00
tun: do not put self in waitq if doing a nonblock read
Perf shows a relatively high rate (about 8%) race in spin_lock_irqsave() when doing netperf between external host and guest. It's mainly becuase the lock contention between the tun_do_read() and tun_xmit_skb(), so this patch do not put self into waitqueue to reduce this kind of race. After this patch, it drops to 4%. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Amos Kong <akong@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6f7c156c08
commit
61a5ff15eb
@ -817,7 +817,8 @@ static ssize_t tun_do_read(struct tun_struct *tun,
|
||||
|
||||
tun_debug(KERN_INFO, tun, "tun_chr_read\n");
|
||||
|
||||
add_wait_queue(&tun->wq.wait, &wait);
|
||||
if (unlikely(!noblock))
|
||||
add_wait_queue(&tun->wq.wait, &wait);
|
||||
while (len) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
|
||||
@ -848,7 +849,8 @@ static ssize_t tun_do_read(struct tun_struct *tun,
|
||||
}
|
||||
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue(&tun->wq.wait, &wait);
|
||||
if (unlikely(!noblock))
|
||||
remove_wait_queue(&tun->wq.wait, &wait);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user