mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 18:23:53 +08:00
tun: TUN_VNET_LE support, fix sparse warnings for virtio headers
Pretty straight-forward: convert all fields to/from virtio endian-ness. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
parent
e999d6ea2a
commit
56f0dcc5aa
@ -111,7 +111,7 @@ do { \
|
||||
#define TUN_FASYNC IFF_ATTACH_QUEUE
|
||||
|
||||
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
|
||||
IFF_MULTI_QUEUE)
|
||||
IFF_VNET_LE | IFF_MULTI_QUEUE)
|
||||
#define GOODCOPY_LEN 128
|
||||
|
||||
#define FLT_EXACT_COUNT 8
|
||||
@ -205,6 +205,16 @@ struct tun_struct {
|
||||
u32 flow_count;
|
||||
};
|
||||
|
||||
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
|
||||
{
|
||||
return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val);
|
||||
}
|
||||
|
||||
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
|
||||
{
|
||||
return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val);
|
||||
}
|
||||
|
||||
static inline u32 tun_hashfn(u32 rxhash)
|
||||
{
|
||||
return rxhash & 0x3ff;
|
||||
@ -1053,10 +1063,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
return -EFAULT;
|
||||
|
||||
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
|
||||
gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
|
||||
gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
|
||||
tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
|
||||
gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
|
||||
|
||||
if (gso.hdr_len > len)
|
||||
if (tun16_to_cpu(tun, gso.hdr_len) > len)
|
||||
return -EINVAL;
|
||||
offset += tun->vnet_hdr_sz;
|
||||
}
|
||||
@ -1064,7 +1074,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
|
||||
align += NET_IP_ALIGN;
|
||||
if (unlikely(len < ETH_HLEN ||
|
||||
(gso.hdr_len && gso.hdr_len < ETH_HLEN)))
|
||||
(gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1075,7 +1085,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
* enough room for skb expand head in case it is used.
|
||||
* The rest of the buffer is mapped from userspace.
|
||||
*/
|
||||
copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
|
||||
copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
|
||||
if (copylen > good_linear)
|
||||
copylen = good_linear;
|
||||
linear = copylen;
|
||||
@ -1085,10 +1095,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
|
||||
if (!zerocopy) {
|
||||
copylen = len;
|
||||
if (gso.hdr_len > good_linear)
|
||||
if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
|
||||
linear = good_linear;
|
||||
else
|
||||
linear = gso.hdr_len;
|
||||
linear = tun16_to_cpu(tun, gso.hdr_len);
|
||||
}
|
||||
|
||||
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
|
||||
@ -1115,8 +1125,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
}
|
||||
|
||||
if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
|
||||
if (!skb_partial_csum_set(skb, gso.csum_start,
|
||||
gso.csum_offset)) {
|
||||
if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
|
||||
tun16_to_cpu(tun, gso.csum_offset))) {
|
||||
tun->dev->stats.rx_frame_errors++;
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
@ -1184,7 +1194,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
||||
|
||||
skb_shinfo(skb)->gso_size = gso.gso_size;
|
||||
skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
|
||||
if (skb_shinfo(skb)->gso_size == 0) {
|
||||
tun->dev->stats.rx_frame_errors++;
|
||||
kfree_skb(skb);
|
||||
@ -1276,8 +1286,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
||||
|
||||
/* This is a hint as to how much should be linear. */
|
||||
gso.hdr_len = skb_headlen(skb);
|
||||
gso.gso_size = sinfo->gso_size;
|
||||
gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
|
||||
gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
|
||||
if (sinfo->gso_type & SKB_GSO_TCPV4)
|
||||
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
|
||||
else if (sinfo->gso_type & SKB_GSO_TCPV6)
|
||||
@ -1285,12 +1295,12 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
else {
|
||||
pr_err("unexpected GSO type: "
|
||||
"0x%x, gso_size %d, hdr_len %d\n",
|
||||
sinfo->gso_type, gso.gso_size,
|
||||
gso.hdr_len);
|
||||
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
|
||||
tun16_to_cpu(tun, gso.hdr_len));
|
||||
print_hex_dump(KERN_ERR, "tun: ",
|
||||
DUMP_PREFIX_NONE,
|
||||
16, 1, skb->head,
|
||||
min((int)gso.hdr_len, 64), true);
|
||||
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1301,9 +1311,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||||
gso.csum_start = skb_checksum_start_offset(skb) +
|
||||
vlan_hlen;
|
||||
gso.csum_offset = skb->csum_offset;
|
||||
gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
|
||||
vlan_hlen);
|
||||
gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
|
||||
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
||||
} /* else everything is zero */
|
||||
|
Loading…
Reference in New Issue
Block a user