2019-06-04 16:11:15 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-07-28 22:36:32 +08:00
|
|
|
/*
|
|
|
|
* common code for virtio vsock
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013-2015 Red Hat, Inc.
|
|
|
|
* Author: Asias He <asias@redhat.com>
|
|
|
|
* Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
|
*/
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/module.h>
|
2017-02-03 02:15:33 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2016-07-28 22:36:32 +08:00
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/virtio_vsock.h>
|
2017-04-21 17:10:46 +08:00
|
|
|
#include <uapi/linux/vsockmon.h>
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/af_vsock.h>
|
|
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/vsock_virtio_transport_common.h>
|
|
|
|
|
|
|
|
/* How long to wait for graceful shutdown of a connection */
|
|
|
|
#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
|
|
|
|
|
2019-07-30 23:43:30 +08:00
|
|
|
/* Threshold for detecting small packets to copy */
|
|
|
|
#define GOOD_COPY_LEN 128
|
|
|
|
|
2019-11-14 17:57:41 +08:00
|
|
|
static const struct virtio_transport *
|
|
|
|
virtio_transport_get_ops(struct vsock_sock *vsk)
|
2016-07-28 22:36:32 +08:00
|
|
|
{
|
2019-11-14 17:57:41 +08:00
|
|
|
const struct vsock_transport *t = vsock_core_get_transport(vsk);
|
2016-07-28 22:36:32 +08:00
|
|
|
|
2019-12-14 02:48:01 +08:00
|
|
|
if (WARN_ON(!t))
|
|
|
|
return NULL;
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
return container_of(t, struct virtio_transport, transport);
|
|
|
|
}
|
|
|
|
|
2016-12-06 12:06:06 +08:00
|
|
|
static struct virtio_vsock_pkt *
|
2016-07-28 22:36:32 +08:00
|
|
|
virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
|
|
|
|
size_t len,
|
|
|
|
u32 src_cid,
|
|
|
|
u32 src_port,
|
|
|
|
u32 dst_cid,
|
|
|
|
u32 dst_port)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt *pkt;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
|
|
|
|
if (!pkt)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
pkt->hdr.type = cpu_to_le16(info->type);
|
|
|
|
pkt->hdr.op = cpu_to_le16(info->op);
|
|
|
|
pkt->hdr.src_cid = cpu_to_le64(src_cid);
|
|
|
|
pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
|
|
|
|
pkt->hdr.src_port = cpu_to_le32(src_port);
|
|
|
|
pkt->hdr.dst_port = cpu_to_le32(dst_port);
|
|
|
|
pkt->hdr.flags = cpu_to_le32(info->flags);
|
|
|
|
pkt->len = len;
|
|
|
|
pkt->hdr.len = cpu_to_le32(len);
|
|
|
|
pkt->reply = info->reply;
|
2017-03-15 09:32:14 +08:00
|
|
|
pkt->vsk = info->vsk;
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
if (info->msg && len > 0) {
|
|
|
|
pkt->buf = kmalloc(len, GFP_KERNEL);
|
|
|
|
if (!pkt->buf)
|
|
|
|
goto out_pkt;
|
2019-07-30 23:43:30 +08:00
|
|
|
|
|
|
|
pkt->buf_len = len;
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
err = memcpy_from_msg(pkt->buf, info->msg, len);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2021-06-11 19:13:06 +08:00
|
|
|
|
|
|
|
if (msg_data_left(info->msg) == 0 &&
|
|
|
|
info->type == VIRTIO_VSOCK_TYPE_SEQPACKET)
|
|
|
|
pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
trace_virtio_transport_alloc_pkt(src_cid, src_port,
|
|
|
|
dst_cid, dst_port,
|
|
|
|
len,
|
|
|
|
info->type,
|
|
|
|
info->op,
|
|
|
|
info->flags);
|
|
|
|
|
|
|
|
return pkt;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(pkt->buf);
|
|
|
|
out_pkt:
|
|
|
|
kfree(pkt);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-04-21 17:10:46 +08:00
|
|
|
/* Packet capture */
|
|
|
|
static struct sk_buff *virtio_transport_build_skb(void *opaque)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt *pkt = opaque;
|
|
|
|
struct af_vsockmon_hdr *hdr;
|
|
|
|
struct sk_buff *skb;
|
2019-07-30 23:43:33 +08:00
|
|
|
size_t payload_len;
|
|
|
|
void *payload_buf;
|
2017-04-21 17:10:46 +08:00
|
|
|
|
2019-07-30 23:43:33 +08:00
|
|
|
/* A packet could be split to fit the RX buffer, so we can retrieve
|
|
|
|
* the payload length from the header and the buffer pointer taking
|
|
|
|
* care of the offset in the original packet.
|
|
|
|
*/
|
|
|
|
payload_len = le32_to_cpu(pkt->hdr.len);
|
|
|
|
payload_buf = pkt->buf + pkt->off;
|
|
|
|
|
|
|
|
skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
|
2017-04-21 17:10:46 +08:00
|
|
|
GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
|
|
|
|
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:21 +08:00
|
|
|
hdr = skb_put(skb, sizeof(*hdr));
|
2017-04-21 17:10:46 +08:00
|
|
|
|
|
|
|
/* pkt->hdr is little-endian so no need to byteswap here */
|
|
|
|
hdr->src_cid = pkt->hdr.src_cid;
|
|
|
|
hdr->src_port = pkt->hdr.src_port;
|
|
|
|
hdr->dst_cid = pkt->hdr.dst_cid;
|
|
|
|
hdr->dst_port = pkt->hdr.dst_port;
|
|
|
|
|
|
|
|
hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
|
|
|
|
hdr->len = cpu_to_le16(sizeof(pkt->hdr));
|
|
|
|
memset(hdr->reserved, 0, sizeof(hdr->reserved));
|
|
|
|
|
|
|
|
switch (le16_to_cpu(pkt->hdr.op)) {
|
|
|
|
case VIRTIO_VSOCK_OP_REQUEST:
|
|
|
|
case VIRTIO_VSOCK_OP_RESPONSE:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RST:
|
|
|
|
case VIRTIO_VSOCK_OP_SHUTDOWN:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RW:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
|
|
|
|
case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-06-18 22:52:04 +08:00
|
|
|
skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
|
2017-04-21 17:10:46 +08:00
|
|
|
|
2019-07-30 23:43:33 +08:00
|
|
|
if (payload_len) {
|
|
|
|
skb_put_data(skb, payload_buf, payload_len);
|
2017-04-21 17:10:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
2020-04-24 23:08:30 +08:00
|
|
|
if (pkt->tap_delivered)
|
|
|
|
return;
|
|
|
|
|
2017-04-21 17:10:46 +08:00
|
|
|
vsock_deliver_tap(virtio_transport_build_skb, pkt);
|
2020-04-24 23:08:30 +08:00
|
|
|
pkt->tap_delivered = true;
|
2017-04-21 17:10:46 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
|
|
|
|
|
2021-06-11 19:12:53 +08:00
|
|
|
static u16 virtio_transport_get_type(struct sock *sk)
|
|
|
|
{
|
|
|
|
if (sk->sk_type == SOCK_STREAM)
|
|
|
|
return VIRTIO_VSOCK_TYPE_STREAM;
|
|
|
|
else
|
|
|
|
return VIRTIO_VSOCK_TYPE_SEQPACKET;
|
|
|
|
}
|
|
|
|
|
2019-12-14 02:48:01 +08:00
|
|
|
/* This function can only be used on connecting/connected sockets,
|
|
|
|
* since a socket assigned to a transport is required.
|
|
|
|
*
|
|
|
|
* Do not use on listener sockets!
|
|
|
|
*/
|
2016-07-28 22:36:32 +08:00
|
|
|
static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
|
|
|
|
struct virtio_vsock_pkt_info *info)
|
|
|
|
{
|
|
|
|
u32 src_cid, src_port, dst_cid, dst_port;
|
2019-12-14 02:48:01 +08:00
|
|
|
const struct virtio_transport *t_ops;
|
2016-07-28 22:36:32 +08:00
|
|
|
struct virtio_vsock_sock *vvs;
|
|
|
|
struct virtio_vsock_pkt *pkt;
|
|
|
|
u32 pkt_len = info->pkt_len;
|
|
|
|
|
2021-06-11 19:13:06 +08:00
|
|
|
info->type = virtio_transport_get_type(sk_vsock(vsk));
|
2021-06-11 19:11:31 +08:00
|
|
|
|
2019-12-14 02:48:01 +08:00
|
|
|
t_ops = virtio_transport_get_ops(vsk);
|
|
|
|
if (unlikely(!t_ops))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
src_cid = t_ops->transport.get_local_cid();
|
2016-07-28 22:36:32 +08:00
|
|
|
src_port = vsk->local_addr.svm_port;
|
|
|
|
if (!info->remote_cid) {
|
|
|
|
dst_cid = vsk->remote_addr.svm_cid;
|
|
|
|
dst_port = vsk->remote_addr.svm_port;
|
|
|
|
} else {
|
|
|
|
dst_cid = info->remote_cid;
|
|
|
|
dst_port = info->remote_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
vvs = vsk->trans;
|
|
|
|
|
|
|
|
/* we can send less than pkt_len bytes */
|
2019-07-30 23:43:34 +08:00
|
|
|
if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
|
|
|
|
pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
/* virtio_transport_get_credit might return less than pkt_len credit */
|
|
|
|
pkt_len = virtio_transport_get_credit(vvs, pkt_len);
|
|
|
|
|
|
|
|
/* Do not send zero length OP_RW pkt */
|
|
|
|
if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
|
|
|
|
return pkt_len;
|
|
|
|
|
|
|
|
pkt = virtio_transport_alloc_pkt(info, pkt_len,
|
|
|
|
src_cid, src_port,
|
|
|
|
dst_cid, dst_port);
|
|
|
|
if (!pkt) {
|
|
|
|
virtio_transport_put_credit(vvs, pkt_len);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_transport_inc_tx_pkt(vvs, pkt);
|
|
|
|
|
2019-12-14 02:48:01 +08:00
|
|
|
return t_ops->send_pkt(pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
|
2019-10-17 20:44:03 +08:00
|
|
|
static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
|
2016-07-28 22:36:32 +08:00
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
2019-10-17 20:44:03 +08:00
|
|
|
if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
|
|
|
|
return false;
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
vvs->rx_bytes += pkt->len;
|
2019-10-17 20:44:03 +08:00
|
|
|
return true;
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
vvs->rx_bytes -= pkt->len;
|
|
|
|
vvs->fwd_cnt += pkt->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
2019-07-30 23:43:32 +08:00
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
2019-07-30 23:43:31 +08:00
|
|
|
vvs->last_fwd_cnt = vvs->fwd_cnt;
|
2016-07-28 22:36:32 +08:00
|
|
|
pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
|
|
|
|
pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
|
2019-07-30 23:43:32 +08:00
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
|
|
|
|
|
|
|
|
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
|
|
|
|
{
|
|
|
|
u32 ret;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
|
|
|
|
if (ret > credit)
|
|
|
|
ret = credit;
|
|
|
|
vvs->tx_cnt += ret;
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
|
|
|
|
|
|
|
|
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
vvs->tx_cnt -= credit;
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
|
|
|
|
|
2021-06-11 19:12:08 +08:00
|
|
|
static int virtio_transport_send_credit_update(struct vsock_sock *vsk)
|
2016-07-28 22:36:32 +08:00
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 22:36:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
|
2019-10-01 02:25:23 +08:00
|
|
|
static ssize_t
|
|
|
|
virtio_transport_stream_do_peek(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
struct virtio_vsock_pkt *pkt;
|
|
|
|
size_t bytes, total = 0, off;
|
|
|
|
int err = -EFAULT;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
list_for_each_entry(pkt, &vvs->rx_queue, list) {
|
|
|
|
off = pkt->off;
|
|
|
|
|
|
|
|
if (total == len)
|
|
|
|
break;
|
|
|
|
|
|
|
|
while (total < len && off < pkt->len) {
|
|
|
|
bytes = len - total;
|
|
|
|
if (bytes > pkt->len - off)
|
|
|
|
bytes = pkt->len - off;
|
|
|
|
|
|
|
|
/* sk_lock is held by caller so no one else can dequeue.
|
|
|
|
* Unlock rx_lock since memcpy_to_msg() may sleep.
|
|
|
|
*/
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
err = memcpy_to_msg(msg, pkt->buf + off, bytes);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
total += bytes;
|
|
|
|
off += bytes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
return total;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (total)
|
|
|
|
err = total;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
static ssize_t
|
|
|
|
virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
struct virtio_vsock_pkt *pkt;
|
|
|
|
size_t bytes, total = 0;
|
2019-07-30 23:43:31 +08:00
|
|
|
u32 free_space;
|
2016-07-28 22:36:32 +08:00
|
|
|
int err = -EFAULT;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
while (total < len && !list_empty(&vvs->rx_queue)) {
|
|
|
|
pkt = list_first_entry(&vvs->rx_queue,
|
|
|
|
struct virtio_vsock_pkt, list);
|
|
|
|
|
|
|
|
bytes = len - total;
|
|
|
|
if (bytes > pkt->len - pkt->off)
|
|
|
|
bytes = pkt->len - pkt->off;
|
|
|
|
|
|
|
|
/* sk_lock is held by caller so no one else can dequeue.
|
|
|
|
* Unlock rx_lock since memcpy_to_msg() may sleep.
|
|
|
|
*/
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
total += bytes;
|
|
|
|
pkt->off += bytes;
|
|
|
|
if (pkt->off == pkt->len) {
|
|
|
|
virtio_transport_dec_rx_pkt(vvs, pkt);
|
|
|
|
list_del(&pkt->list);
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
}
|
|
|
|
}
|
2019-07-30 23:43:31 +08:00
|
|
|
|
|
|
|
free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
2019-09-03 15:38:16 +08:00
|
|
|
/* To reduce the number of credit update messages,
|
|
|
|
* don't update credits as long as lots of space is available.
|
|
|
|
* Note: the limit chosen here is arbitrary. Setting the limit
|
|
|
|
* too high causes extra messages. Too low causes transmitter
|
|
|
|
* stalls. As stalls are in theory more expensive than extra
|
|
|
|
* messages, we set the limit to a high value. TODO: experiment
|
|
|
|
* with different values.
|
2019-07-30 23:43:31 +08:00
|
|
|
*/
|
2021-06-11 19:11:31 +08:00
|
|
|
if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
|
2021-06-11 19:12:08 +08:00
|
|
|
virtio_transport_send_credit_update(vsk);
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
return total;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (total)
|
|
|
|
err = total;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-06-11 19:12:38 +08:00
|
|
|
static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
struct virtio_vsock_pkt *pkt;
|
|
|
|
int dequeued_len = 0;
|
|
|
|
size_t user_buf_len = msg_data_left(msg);
|
|
|
|
bool msg_ready = false;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
if (vvs->msg_count == 0) {
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!msg_ready) {
|
|
|
|
pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
|
|
|
|
|
2021-06-18 21:35:26 +08:00
|
|
|
if (dequeued_len >= 0) {
|
2021-06-11 19:12:38 +08:00
|
|
|
size_t pkt_len;
|
|
|
|
size_t bytes_to_copy;
|
|
|
|
|
|
|
|
pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
|
|
|
|
bytes_to_copy = min(user_buf_len, pkt_len);
|
|
|
|
|
|
|
|
if (bytes_to_copy) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* sk_lock is held by caller so no one else can dequeue.
|
|
|
|
* Unlock rx_lock since memcpy_to_msg() may sleep.
|
|
|
|
*/
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy);
|
|
|
|
if (err) {
|
2021-06-18 21:35:26 +08:00
|
|
|
/* Copy of message failed. Rest of
|
2021-06-11 19:12:38 +08:00
|
|
|
* fragments will be freed without copy.
|
|
|
|
*/
|
|
|
|
dequeued_len = err;
|
|
|
|
} else {
|
|
|
|
user_buf_len -= bytes_to_copy;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dequeued_len >= 0)
|
|
|
|
dequeued_len += pkt_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
|
|
|
|
msg_ready = true;
|
|
|
|
vvs->msg_count--;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_transport_dec_rx_pkt(vvs, pkt);
|
|
|
|
list_del(&pkt->list);
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
virtio_transport_send_credit_update(vsk);
|
|
|
|
|
|
|
|
return dequeued_len;
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
ssize_t
|
|
|
|
virtio_transport_stream_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len, int flags)
|
|
|
|
{
|
|
|
|
if (flags & MSG_PEEK)
|
2019-10-01 02:25:23 +08:00
|
|
|
return virtio_transport_stream_do_peek(vsk, msg, len);
|
|
|
|
else
|
|
|
|
return virtio_transport_stream_do_dequeue(vsk, msg, len);
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
|
|
|
|
|
2021-06-11 19:12:38 +08:00
|
|
|
ssize_t
|
|
|
|
virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
if (flags & MSG_PEEK)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
|
|
|
|
|
2021-06-11 19:13:06 +08:00
|
|
|
int
|
|
|
|
virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
if (len > vvs->peer_buf_alloc) {
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
return virtio_transport_stream_enqueue(vsk, msg, len);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue);
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
int
|
|
|
|
virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len, int flags)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
|
|
|
|
|
|
|
|
s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
s64 bytes;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
bytes = vvs->rx_bytes;
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
|
|
|
|
|
2021-06-11 19:13:06 +08:00
|
|
|
u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
u32 msg_count;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
msg_count = vvs->msg_count;
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
|
|
|
|
return msg_count;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data);
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
static s64 virtio_transport_has_space(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
s64 bytes;
|
|
|
|
|
|
|
|
bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
|
|
|
|
if (bytes < 0)
|
|
|
|
bytes = 0;
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
s64 bytes;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
bytes = virtio_transport_has_space(vsk);
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
|
|
|
|
return bytes;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
|
|
|
|
|
|
|
|
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
|
|
|
|
struct vsock_sock *psk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs;
|
|
|
|
|
|
|
|
vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
|
|
|
|
if (!vvs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
vsk->trans = vvs;
|
|
|
|
vvs->vsk = vsk;
|
2019-11-14 17:57:46 +08:00
|
|
|
if (psk && psk->trans) {
|
2016-07-28 22:36:32 +08:00
|
|
|
struct virtio_vsock_sock *ptrans = psk->trans;
|
|
|
|
|
|
|
|
vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
|
|
|
|
}
|
|
|
|
|
2019-11-14 17:57:42 +08:00
|
|
|
if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE)
|
|
|
|
vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE;
|
|
|
|
|
|
|
|
vvs->buf_alloc = vsk->buffer_size;
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
spin_lock_init(&vvs->rx_lock);
|
|
|
|
spin_lock_init(&vvs->tx_lock);
|
|
|
|
INIT_LIST_HEAD(&vvs->rx_queue);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
|
|
|
|
|
2019-11-14 17:57:42 +08:00
|
|
|
/* sk_lock held by the caller */
|
|
|
|
void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
|
2016-07-28 22:36:32 +08:00
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
|
2019-11-14 17:57:42 +08:00
|
|
|
if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
|
|
|
|
*val = VIRTIO_VSOCK_MAX_BUF_SIZE;
|
2016-07-28 22:36:32 +08:00
|
|
|
|
2019-11-14 17:57:42 +08:00
|
|
|
vvs->buf_alloc = *val;
|
2019-10-17 20:44:02 +08:00
|
|
|
|
2021-06-11 19:12:08 +08:00
|
|
|
virtio_transport_send_credit_update(vsk);
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
2019-11-14 17:57:42 +08:00
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
int
|
|
|
|
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
|
|
|
|
size_t target,
|
|
|
|
bool *data_ready_now)
|
|
|
|
{
|
|
|
|
if (vsock_stream_has_data(vsk))
|
|
|
|
*data_ready_now = true;
|
|
|
|
else
|
|
|
|
*data_ready_now = false;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
|
|
|
|
|
|
|
|
int
|
|
|
|
virtio_transport_notify_poll_out(struct vsock_sock *vsk,
|
|
|
|
size_t target,
|
|
|
|
bool *space_avail_now)
|
|
|
|
{
|
|
|
|
s64 free_space;
|
|
|
|
|
|
|
|
free_space = vsock_stream_has_space(vsk);
|
|
|
|
if (free_space > 0)
|
|
|
|
*space_avail_now = true;
|
|
|
|
else if (free_space == 0)
|
|
|
|
*space_avail_now = false;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
|
|
|
|
size_t target, struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
|
|
|
|
size_t target, struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
|
|
|
|
size_t target, struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
|
|
|
|
|
|
|
|
int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
|
|
|
|
size_t target, ssize_t copied, bool data_read,
|
|
|
|
struct vsock_transport_recv_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_init(struct vsock_sock *vsk,
|
|
|
|
struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
|
|
|
|
struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
|
|
|
|
|
|
|
|
int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
|
|
|
|
ssize_t written, struct vsock_transport_send_notify_data *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
|
|
|
|
|
|
|
|
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
|
|
|
|
{
|
2019-11-14 17:57:42 +08:00
|
|
|
return vsk->buffer_size;
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
|
|
|
|
|
|
|
|
bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
|
|
|
|
|
|
|
|
bool virtio_transport_stream_allow(u32 cid, u32 port)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
|
|
|
|
|
|
|
|
int virtio_transport_dgram_bind(struct vsock_sock *vsk,
|
|
|
|
struct sockaddr_vm *addr)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
|
|
|
|
|
|
|
|
bool virtio_transport_dgram_allow(u32 cid, u32 port)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
|
|
|
|
|
|
|
|
int virtio_transport_connect(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_REQUEST,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 22:36:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_connect);
|
|
|
|
|
|
|
|
int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_SHUTDOWN,
|
|
|
|
.flags = (mode & RCV_SHUTDOWN ?
|
|
|
|
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
|
|
|
|
(mode & SEND_SHUTDOWN ?
|
|
|
|
VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 22:36:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
|
|
|
|
|
|
|
|
int
|
|
|
|
virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct sockaddr_vm *remote_addr,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t dgram_len)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
|
|
|
|
|
|
|
|
ssize_t
|
|
|
|
virtio_transport_stream_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct msghdr *msg,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RW,
|
|
|
|
.msg = msg,
|
|
|
|
.pkt_len = len,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 22:36:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
|
|
|
|
|
|
|
|
void virtio_transport_destruct(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
|
|
|
|
kfree(vvs);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_destruct);
|
|
|
|
|
|
|
|
static int virtio_transport_reset(struct vsock_sock *vsk,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RST,
|
|
|
|
.reply = !!pkt,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 22:36:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Send RST only if the original pkt is not a RST pkt */
|
|
|
|
if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Normally packets are associated with a socket. There may be no socket if an
|
|
|
|
* attempt was made to connect to a socket that does not exist.
|
|
|
|
*/
|
2019-11-14 17:57:40 +08:00
|
|
|
static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
2016-07-28 22:36:32 +08:00
|
|
|
{
|
2019-03-06 18:13:53 +08:00
|
|
|
struct virtio_vsock_pkt *reply;
|
2016-07-28 22:36:32 +08:00
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RST,
|
|
|
|
.type = le16_to_cpu(pkt->hdr.type),
|
|
|
|
.reply = true,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Send RST only if the original pkt is not a RST pkt */
|
|
|
|
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
|
|
|
|
return 0;
|
|
|
|
|
2019-03-06 18:13:53 +08:00
|
|
|
reply = virtio_transport_alloc_pkt(&info, 0,
|
|
|
|
le64_to_cpu(pkt->hdr.dst_cid),
|
|
|
|
le32_to_cpu(pkt->hdr.dst_port),
|
|
|
|
le64_to_cpu(pkt->hdr.src_cid),
|
|
|
|
le32_to_cpu(pkt->hdr.src_port));
|
|
|
|
if (!reply)
|
2016-07-28 22:36:32 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-03-06 18:13:53 +08:00
|
|
|
if (!t) {
|
|
|
|
virtio_transport_free_pkt(reply);
|
|
|
|
return -ENOTCONN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return t->send_pkt(reply);
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
|
2021-04-20 19:07:27 +08:00
|
|
|
/* This function should be called with sk_lock held and SOCK_DONE set */
|
|
|
|
static void virtio_transport_remove_sock(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
struct virtio_vsock_pkt *pkt, *tmp;
|
|
|
|
|
|
|
|
/* We don't need to take rx_lock, as the socket is closing and we are
|
|
|
|
* removing it.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
|
|
|
|
list_del(&pkt->list);
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
vsock_remove_sock(vsk);
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
static void virtio_transport_wait_close(struct sock *sk, long timeout)
|
|
|
|
{
|
|
|
|
if (timeout) {
|
2016-11-12 02:20:50 +08:00
|
|
|
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
|
|
|
|
|
|
|
add_wait_queue(sk_sleep(sk), &wait);
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
if (sk_wait_event(sk, &timeout,
|
2016-11-12 02:20:50 +08:00
|
|
|
sock_flag(sk, SOCK_DONE), &wait))
|
2016-07-28 22:36:32 +08:00
|
|
|
break;
|
|
|
|
} while (!signal_pending(current) && timeout);
|
|
|
|
|
2016-11-12 02:20:50 +08:00
|
|
|
remove_wait_queue(sk_sleep(sk), &wait);
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_transport_do_close(struct vsock_sock *vsk,
|
|
|
|
bool cancel_timeout)
|
|
|
|
{
|
|
|
|
struct sock *sk = sk_vsock(vsk);
|
|
|
|
|
|
|
|
sock_set_flag(sk, SOCK_DONE);
|
|
|
|
vsk->peer_shutdown = SHUTDOWN_MASK;
|
|
|
|
if (vsock_stream_has_data(vsk) <= 0)
|
2017-10-06 04:46:52 +08:00
|
|
|
sk->sk_state = TCP_CLOSING;
|
2016-07-28 22:36:32 +08:00
|
|
|
sk->sk_state_change(sk);
|
|
|
|
|
|
|
|
if (vsk->close_work_scheduled &&
|
|
|
|
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
|
|
|
|
vsk->close_work_scheduled = false;
|
|
|
|
|
2021-04-20 19:07:27 +08:00
|
|
|
virtio_transport_remove_sock(vsk);
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
/* Release refcnt obtained when we scheduled the timeout */
|
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_transport_close_timeout(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct vsock_sock *vsk =
|
|
|
|
container_of(work, struct vsock_sock, close_work.work);
|
|
|
|
struct sock *sk = sk_vsock(vsk);
|
|
|
|
|
|
|
|
sock_hold(sk);
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
if (!sock_flag(sk, SOCK_DONE)) {
|
|
|
|
(void)virtio_transport_reset(vsk, NULL);
|
|
|
|
|
|
|
|
virtio_transport_do_close(vsk, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
vsk->close_work_scheduled = false;
|
|
|
|
|
|
|
|
release_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* User context, vsk->sk is locked */
|
|
|
|
static bool virtio_transport_close(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct sock *sk = &vsk->sk;
|
|
|
|
|
2017-10-06 04:46:52 +08:00
|
|
|
if (!(sk->sk_state == TCP_ESTABLISHED ||
|
|
|
|
sk->sk_state == TCP_CLOSING))
|
2016-07-28 22:36:32 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Already received SHUTDOWN from peer, reply with RST */
|
|
|
|
if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
|
|
|
|
(void)virtio_transport_reset(vsk, NULL);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
|
|
|
|
(void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
|
|
|
|
|
|
|
|
if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
|
|
|
|
virtio_transport_wait_close(sk, sk->sk_lingertime);
|
|
|
|
|
|
|
|
if (sock_flag(sk, SOCK_DONE)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
sock_hold(sk);
|
|
|
|
INIT_DELAYED_WORK(&vsk->close_work,
|
|
|
|
virtio_transport_close_timeout);
|
|
|
|
vsk->close_work_scheduled = true;
|
|
|
|
schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void virtio_transport_release(struct vsock_sock *vsk)
|
|
|
|
{
|
|
|
|
struct sock *sk = &vsk->sk;
|
|
|
|
bool remove_sock = true;
|
|
|
|
|
2021-06-11 19:13:06 +08:00
|
|
|
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
|
2016-07-28 22:36:32 +08:00
|
|
|
remove_sock = virtio_transport_close(vsk);
|
2019-05-17 22:45:43 +08:00
|
|
|
|
2020-11-20 18:47:36 +08:00
|
|
|
if (remove_sock) {
|
|
|
|
sock_set_flag(sk, SOCK_DONE);
|
2021-04-20 19:07:27 +08:00
|
|
|
virtio_transport_remove_sock(vsk);
|
2020-11-20 18:47:36 +08:00
|
|
|
}
|
2016-07-28 22:36:32 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_release);
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_transport_recv_connecting(struct sock *sk,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
int err;
|
|
|
|
int skerr;
|
|
|
|
|
|
|
|
switch (le16_to_cpu(pkt->hdr.op)) {
|
|
|
|
case VIRTIO_VSOCK_OP_RESPONSE:
|
2017-10-06 04:46:52 +08:00
|
|
|
sk->sk_state = TCP_ESTABLISHED;
|
2016-07-28 22:36:32 +08:00
|
|
|
sk->sk_socket->state = SS_CONNECTED;
|
|
|
|
vsock_insert_connected(vsk);
|
|
|
|
sk->sk_state_change(sk);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_INVALID:
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RST:
|
|
|
|
skerr = ECONNRESET;
|
|
|
|
err = 0;
|
|
|
|
goto destroy;
|
|
|
|
default:
|
|
|
|
skerr = EPROTO;
|
|
|
|
err = -EINVAL;
|
|
|
|
goto destroy;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
destroy:
|
|
|
|
virtio_transport_reset(vsk, pkt);
|
2017-10-06 04:46:52 +08:00
|
|
|
sk->sk_state = TCP_CLOSE;
|
2016-07-28 22:36:32 +08:00
|
|
|
sk->sk_err = skerr;
|
2021-06-28 06:48:21 +08:00
|
|
|
sk_error_report(sk);
|
2016-07-28 22:36:32 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-30 23:43:30 +08:00
|
|
|
static void
|
|
|
|
virtio_transport_recv_enqueue(struct vsock_sock *vsk,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
2019-10-17 20:44:03 +08:00
|
|
|
bool can_enqueue, free_pkt = false;
|
2019-07-30 23:43:30 +08:00
|
|
|
|
|
|
|
pkt->len = le32_to_cpu(pkt->hdr.len);
|
|
|
|
pkt->off = 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&vvs->rx_lock);
|
|
|
|
|
2019-10-17 20:44:03 +08:00
|
|
|
can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
|
|
|
|
if (!can_enqueue) {
|
|
|
|
free_pkt = true;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-07-30 23:43:30 +08:00
|
|
|
|
2021-06-11 19:12:53 +08:00
|
|
|
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
|
|
|
|
vvs->msg_count++;
|
|
|
|
|
2019-07-30 23:43:30 +08:00
|
|
|
/* Try to copy small packets into the buffer of last packet queued,
|
|
|
|
* to avoid wasting memory queueing the entire buffer with a small
|
|
|
|
* payload.
|
|
|
|
*/
|
|
|
|
if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
|
|
|
|
struct virtio_vsock_pkt *last_pkt;
|
|
|
|
|
|
|
|
last_pkt = list_last_entry(&vvs->rx_queue,
|
|
|
|
struct virtio_vsock_pkt, list);
|
|
|
|
|
|
|
|
/* If there is space in the last packet queued, we copy the
|
2021-06-11 19:12:53 +08:00
|
|
|
* new packet in its buffer. We avoid this if the last packet
|
|
|
|
* queued has VIRTIO_VSOCK_SEQ_EOR set, because this is
|
|
|
|
* delimiter of SEQPACKET record, so 'pkt' is the first packet
|
|
|
|
* of a new record.
|
2019-07-30 23:43:30 +08:00
|
|
|
*/
|
2021-06-11 19:12:53 +08:00
|
|
|
if ((pkt->len <= last_pkt->buf_len - last_pkt->len) &&
|
|
|
|
!(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)) {
|
2019-07-30 23:43:30 +08:00
|
|
|
memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
|
|
|
|
pkt->len);
|
|
|
|
last_pkt->len += pkt->len;
|
|
|
|
free_pkt = true;
|
2021-06-11 19:12:53 +08:00
|
|
|
last_pkt->hdr.flags |= pkt->hdr.flags;
|
2019-07-30 23:43:30 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&pkt->list, &vvs->rx_queue);
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&vvs->rx_lock);
|
|
|
|
if (free_pkt)
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
static int
|
|
|
|
virtio_transport_recv_connected(struct sock *sk,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
switch (le16_to_cpu(pkt->hdr.op)) {
|
|
|
|
case VIRTIO_VSOCK_OP_RW:
|
2019-07-30 23:43:30 +08:00
|
|
|
virtio_transport_recv_enqueue(vsk, pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
sk->sk_data_ready(sk);
|
|
|
|
return err;
|
|
|
|
case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
|
|
|
|
sk->sk_write_space(sk);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_SHUTDOWN:
|
|
|
|
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
|
|
|
|
vsk->peer_shutdown |= RCV_SHUTDOWN;
|
|
|
|
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
|
|
|
|
vsk->peer_shutdown |= SEND_SHUTDOWN;
|
|
|
|
if (vsk->peer_shutdown == SHUTDOWN_MASK &&
|
2019-11-09 00:08:50 +08:00
|
|
|
vsock_stream_has_data(vsk) <= 0 &&
|
|
|
|
!sock_flag(sk, SOCK_DONE)) {
|
|
|
|
(void)virtio_transport_reset(vsk, NULL);
|
|
|
|
|
|
|
|
virtio_transport_do_close(vsk, true);
|
2019-06-15 14:42:37 +08:00
|
|
|
}
|
2016-07-28 22:36:32 +08:00
|
|
|
if (le32_to_cpu(pkt->hdr.flags))
|
|
|
|
sk->sk_state_change(sk);
|
|
|
|
break;
|
|
|
|
case VIRTIO_VSOCK_OP_RST:
|
|
|
|
virtio_transport_do_close(vsk, true);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_transport_recv_disconnecting(struct sock *sk,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
|
|
|
|
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
|
|
|
|
virtio_transport_do_close(vsk, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_transport_send_response(struct vsock_sock *vsk,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct virtio_vsock_pkt_info info = {
|
|
|
|
.op = VIRTIO_VSOCK_OP_RESPONSE,
|
2016-12-06 12:07:15 +08:00
|
|
|
.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
|
2016-07-28 22:36:32 +08:00
|
|
|
.remote_port = le32_to_cpu(pkt->hdr.src_port),
|
|
|
|
.reply = true,
|
2017-03-15 09:32:14 +08:00
|
|
|
.vsk = vsk,
|
2016-07-28 22:36:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return virtio_transport_send_pkt_info(vsk, &info);
|
|
|
|
}
|
|
|
|
|
2019-11-14 17:57:46 +08:00
|
|
|
static bool virtio_transport_space_update(struct sock *sk,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
struct virtio_vsock_sock *vvs = vsk->trans;
|
|
|
|
bool space_available;
|
|
|
|
|
|
|
|
/* Listener sockets are not associated with any transport, so we are
|
|
|
|
* not able to take the state to see if there is space available in the
|
|
|
|
* remote peer, but since they are only used to receive requests, we
|
|
|
|
* can assume that there is always space available in the other peer.
|
|
|
|
*/
|
|
|
|
if (!vvs)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* buf_alloc and fwd_cnt is always included in the hdr */
|
|
|
|
spin_lock_bh(&vvs->tx_lock);
|
|
|
|
vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
|
|
|
|
vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
|
|
|
|
space_available = virtio_transport_has_space(vsk);
|
|
|
|
spin_unlock_bh(&vvs->tx_lock);
|
|
|
|
return space_available;
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
/* Handle server socket */
|
|
|
|
static int
|
2019-11-14 17:57:46 +08:00
|
|
|
virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
|
|
|
|
struct virtio_transport *t)
|
2016-07-28 22:36:32 +08:00
|
|
|
{
|
|
|
|
struct vsock_sock *vsk = vsock_sk(sk);
|
|
|
|
struct vsock_sock *vchild;
|
|
|
|
struct sock *child;
|
2019-11-14 17:57:46 +08:00
|
|
|
int ret;
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
|
2019-12-14 02:48:00 +08:00
|
|
|
virtio_transport_reset_no_sock(t, pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sk_acceptq_is_full(sk)) {
|
2019-12-14 02:48:00 +08:00
|
|
|
virtio_transport_reset_no_sock(t, pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-11-14 17:57:43 +08:00
|
|
|
child = vsock_create_connected(sk);
|
2016-07-28 22:36:32 +08:00
|
|
|
if (!child) {
|
2019-12-14 02:48:00 +08:00
|
|
|
virtio_transport_reset_no_sock(t, pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-11-06 06:11:52 +08:00
|
|
|
sk_acceptq_added(sk);
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
lock_sock_nested(child, SINGLE_DEPTH_NESTING);
|
|
|
|
|
2017-10-06 04:46:52 +08:00
|
|
|
child->sk_state = TCP_ESTABLISHED;
|
2016-07-28 22:36:32 +08:00
|
|
|
|
|
|
|
vchild = vsock_sk(child);
|
2016-12-06 12:07:15 +08:00
|
|
|
vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
|
2016-07-28 22:36:32 +08:00
|
|
|
le32_to_cpu(pkt->hdr.dst_port));
|
2016-12-06 12:07:15 +08:00
|
|
|
vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
|
2016-07-28 22:36:32 +08:00
|
|
|
le32_to_cpu(pkt->hdr.src_port));
|
|
|
|
|
2019-11-14 17:57:46 +08:00
|
|
|
ret = vsock_assign_transport(vchild, vsk);
|
|
|
|
/* Transport assigned (looking at remote_addr) must be the same
|
|
|
|
* where we received the request.
|
|
|
|
*/
|
|
|
|
if (ret || vchild->transport != &t->transport) {
|
|
|
|
release_sock(child);
|
2019-12-14 02:48:00 +08:00
|
|
|
virtio_transport_reset_no_sock(t, pkt);
|
2019-11-14 17:57:46 +08:00
|
|
|
sock_put(child);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virtio_transport_space_update(child, pkt))
|
|
|
|
child->sk_write_space(child);
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
vsock_insert_connected(vchild);
|
|
|
|
vsock_enqueue_accept(sk, child);
|
|
|
|
virtio_transport_send_response(vchild, pkt);
|
|
|
|
|
|
|
|
release_sock(child);
|
|
|
|
|
|
|
|
sk->sk_data_ready(sk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-11 19:12:53 +08:00
|
|
|
static bool virtio_transport_valid_type(u16 type)
|
|
|
|
{
|
|
|
|
return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
|
|
|
|
(type == VIRTIO_VSOCK_TYPE_SEQPACKET);
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
|
|
|
|
* lock.
|
|
|
|
*/
|
2019-11-14 17:57:40 +08:00
|
|
|
void virtio_transport_recv_pkt(struct virtio_transport *t,
|
|
|
|
struct virtio_vsock_pkt *pkt)
|
2016-07-28 22:36:32 +08:00
|
|
|
{
|
|
|
|
struct sockaddr_vm src, dst;
|
|
|
|
struct vsock_sock *vsk;
|
|
|
|
struct sock *sk;
|
|
|
|
bool space_available;
|
|
|
|
|
2016-12-06 12:07:15 +08:00
|
|
|
vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
|
2016-07-28 22:36:32 +08:00
|
|
|
le32_to_cpu(pkt->hdr.src_port));
|
2016-12-06 12:07:15 +08:00
|
|
|
vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
|
2016-07-28 22:36:32 +08:00
|
|
|
le32_to_cpu(pkt->hdr.dst_port));
|
|
|
|
|
|
|
|
trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
|
|
|
|
dst.svm_cid, dst.svm_port,
|
|
|
|
le32_to_cpu(pkt->hdr.len),
|
|
|
|
le16_to_cpu(pkt->hdr.type),
|
|
|
|
le16_to_cpu(pkt->hdr.op),
|
|
|
|
le32_to_cpu(pkt->hdr.flags),
|
|
|
|
le32_to_cpu(pkt->hdr.buf_alloc),
|
|
|
|
le32_to_cpu(pkt->hdr.fwd_cnt));
|
|
|
|
|
2021-06-11 19:12:53 +08:00
|
|
|
if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) {
|
2019-11-14 17:57:40 +08:00
|
|
|
(void)virtio_transport_reset_no_sock(t, pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The socket must be in connected or bound table
|
|
|
|
* otherwise send reset back
|
|
|
|
*/
|
|
|
|
sk = vsock_find_connected_socket(&src, &dst);
|
|
|
|
if (!sk) {
|
|
|
|
sk = vsock_find_bound_socket(&dst);
|
|
|
|
if (!sk) {
|
2019-11-14 17:57:40 +08:00
|
|
|
(void)virtio_transport_reset_no_sock(t, pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-11 19:12:53 +08:00
|
|
|
if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) {
|
|
|
|
(void)virtio_transport_reset_no_sock(t, pkt);
|
|
|
|
sock_put(sk);
|
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
vsk = vsock_sk(sk);
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
|
2020-11-20 18:47:36 +08:00
|
|
|
/* Check if sk has been closed before lock_sock */
|
|
|
|
if (sock_flag(sk, SOCK_DONE)) {
|
virtio_vsock: Fix race condition in virtio_transport_recv_pkt
When client on the host tries to connect(SOCK_STREAM, O_NONBLOCK) to the
server on the guest, there will be a panic on a ThunderX2 (armv8a server):
[ 463.718844] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
[ 463.718848] Mem abort info:
[ 463.718849] ESR = 0x96000044
[ 463.718852] EC = 0x25: DABT (current EL), IL = 32 bits
[ 463.718853] SET = 0, FnV = 0
[ 463.718854] EA = 0, S1PTW = 0
[ 463.718855] Data abort info:
[ 463.718856] ISV = 0, ISS = 0x00000044
[ 463.718857] CM = 0, WnR = 1
[ 463.718859] user pgtable: 4k pages, 48-bit VAs, pgdp=0000008f6f6e9000
[ 463.718861] [0000000000000000] pgd=0000000000000000
[ 463.718866] Internal error: Oops: 96000044 [#1] SMP
[...]
[ 463.718977] CPU: 213 PID: 5040 Comm: vhost-5032 Tainted: G O 5.7.0-rc7+ #139
[ 463.718980] Hardware name: GIGABYTE R281-T91-00/MT91-FS1-00, BIOS F06 09/25/2018
[ 463.718982] pstate: 60400009 (nZCv daif +PAN -UAO)
[ 463.718995] pc : virtio_transport_recv_pkt+0x4c8/0xd40 [vmw_vsock_virtio_transport_common]
[ 463.718999] lr : virtio_transport_recv_pkt+0x1fc/0xd40 [vmw_vsock_virtio_transport_common]
[ 463.719000] sp : ffff80002dbe3c40
[...]
[ 463.719025] Call trace:
[ 463.719030] virtio_transport_recv_pkt+0x4c8/0xd40 [vmw_vsock_virtio_transport_common]
[ 463.719034] vhost_vsock_handle_tx_kick+0x360/0x408 [vhost_vsock]
[ 463.719041] vhost_worker+0x100/0x1a0 [vhost]
[ 463.719048] kthread+0x128/0x130
[ 463.719052] ret_from_fork+0x10/0x18
The race condition is as follows:
Task1 Task2
===== =====
__sock_release virtio_transport_recv_pkt
__vsock_release vsock_find_bound_socket (found sk)
lock_sock_nested
vsock_remove_sock
sock_orphan
sk_set_socket(sk, NULL)
sk->sk_shutdown = SHUTDOWN_MASK
...
release_sock
lock_sock
virtio_transport_recv_connecting
sk->sk_socket->state (panic!)
The root cause is that vsock_find_bound_socket can't hold the lock_sock,
so there is a small race window between vsock_find_bound_socket() and
lock_sock(). If __vsock_release() is running in another task,
sk->sk_socket will be set to NULL inadvertently.
This fixes it by checking sk->sk_shutdown(suggested by Stefano) after
lock_sock since sk->sk_shutdown is set to SHUTDOWN_MASK under the
protection of lock_sock_nested.
Signed-off-by: Jia He <justin.he@arm.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-30 09:38:28 +08:00
|
|
|
(void)virtio_transport_reset_no_sock(t, pkt);
|
|
|
|
release_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
goto free_pkt;
|
|
|
|
}
|
|
|
|
|
2021-02-08 22:44:54 +08:00
|
|
|
space_available = virtio_transport_space_update(sk, pkt);
|
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
/* Update CID in case it has changed after a transport reset event */
|
|
|
|
vsk->local_addr.svm_cid = dst.svm_cid;
|
|
|
|
|
|
|
|
if (space_available)
|
|
|
|
sk->sk_write_space(sk);
|
|
|
|
|
|
|
|
switch (sk->sk_state) {
|
2017-10-06 04:46:52 +08:00
|
|
|
case TCP_LISTEN:
|
2019-11-14 17:57:46 +08:00
|
|
|
virtio_transport_recv_listen(sk, pkt, t);
|
2016-07-28 22:36:32 +08:00
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
break;
|
2017-10-06 04:46:52 +08:00
|
|
|
case TCP_SYN_SENT:
|
2016-07-28 22:36:32 +08:00
|
|
|
virtio_transport_recv_connecting(sk, pkt);
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
break;
|
2017-10-06 04:46:52 +08:00
|
|
|
case TCP_ESTABLISHED:
|
2016-07-28 22:36:32 +08:00
|
|
|
virtio_transport_recv_connected(sk, pkt);
|
|
|
|
break;
|
2017-10-06 04:46:52 +08:00
|
|
|
case TCP_CLOSING:
|
2016-07-28 22:36:32 +08:00
|
|
|
virtio_transport_recv_disconnecting(sk, pkt);
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
break;
|
|
|
|
default:
|
net: virtio_vsock: Enhance connection semantics
Whenever the vsock backend on the host sends a packet through the RX
queue, it expects an answer on the TX queue. Unfortunately, there is one
case where the host side will hang waiting for the answer and might
effectively never recover if no timeout mechanism was implemented.
This issue happens when the guest side starts binding to the socket,
which insert a new bound socket into the list of already bound sockets.
At this time, we expect the guest to also start listening, which will
trigger the sk_state to move from TCP_CLOSE to TCP_LISTEN. The problem
occurs if the host side queued a RX packet and triggered an interrupt
right between the end of the binding process and the beginning of the
listening process. In this specific case, the function processing the
packet virtio_transport_recv_pkt() will find a bound socket, which means
it will hit the switch statement checking for the sk_state, but the
state won't be changed into TCP_LISTEN yet, which leads the code to pick
the default statement. This default statement will only free the buffer,
while it should also respond to the host side, by sending a packet on
its TX queue.
In order to simply fix this unfortunate chain of events, it is important
that in case the default statement is entered, and because at this stage
we know the host side is waiting for an answer, we must send back a
packet containing the operation VIRTIO_VSOCK_OP_RST.
One could say that a proper timeout mechanism on the host side will be
enough to avoid the backend to hang. But the point of this patch is to
ensure the normal use case will be provided with proper responsiveness
when it comes to establishing the connection.
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-02-14 19:48:01 +08:00
|
|
|
(void)virtio_transport_reset_no_sock(t, pkt);
|
2016-07-28 22:36:32 +08:00
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
break;
|
|
|
|
}
|
2019-11-14 17:57:46 +08:00
|
|
|
|
2016-07-28 22:36:32 +08:00
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
/* Release refcnt obtained when we fetched this socket out of the
|
|
|
|
* bound or connected list.
|
|
|
|
*/
|
|
|
|
sock_put(sk);
|
|
|
|
return;
|
|
|
|
|
|
|
|
free_pkt:
|
|
|
|
virtio_transport_free_pkt(pkt);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
|
|
|
|
|
|
|
|
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
|
|
|
|
{
|
|
|
|
kfree(pkt->buf);
|
|
|
|
kfree(pkt);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Asias He");
|
|
|
|
MODULE_DESCRIPTION("common code for virtio vsock");
|