2019-05-19 21:51:43 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C)2002 USAGI/WIDE Project
|
2007-02-09 22:24:49 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Authors
|
|
|
|
*
|
2007-02-09 22:24:49 +08:00
|
|
|
* Mitsuru KANDA @USAGI : IPv6 Support
|
2014-08-25 04:53:10 +08:00
|
|
|
* Kazunori MIYAZAWA @USAGI :
|
|
|
|
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
|
2007-02-09 22:24:49 +08:00
|
|
|
*
|
2014-08-25 04:53:10 +08:00
|
|
|
* This file is derived from net/ipv4/esp.c
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2012-05-15 22:11:53 +08:00
|
|
|
#define pr_fmt(fmt) "IPv6: " fmt
|
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
#include <crypto/aead.h>
|
|
|
|
#include <crypto/authenc.h>
|
2006-07-30 13:41:01 +08:00
|
|
|
#include <linux/err.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/xfrm.h>
|
|
|
|
#include <net/esp.h>
|
2007-10-27 13:53:58 +08:00
|
|
|
#include <linux/scatterlist.h>
|
2005-10-11 12:11:08 +08:00
|
|
|
#include <linux/kernel.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/pfkeyv2.h>
|
|
|
|
#include <linux/random.h>
|
2008-01-29 11:35:05 +08:00
|
|
|
#include <linux/slab.h>
|
2007-10-10 04:33:35 +08:00
|
|
|
#include <linux/spinlock.h>
|
2020-04-27 23:59:34 +08:00
|
|
|
#include <net/ip6_checksum.h>
|
2012-06-16 05:54:11 +08:00
|
|
|
#include <net/ip6_route.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/icmp.h>
|
|
|
|
#include <net/ipv6.h>
|
2005-12-27 12:43:12 +08:00
|
|
|
#include <net/protocol.h>
|
2020-04-27 23:59:34 +08:00
|
|
|
#include <net/udp.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/icmpv6.h>
|
2020-04-27 23:59:35 +08:00
|
|
|
#include <net/tcp.h>
|
|
|
|
#include <net/espintcp.h>
|
|
|
|
#include <net/inet6_hashtables.h>
|
2024-04-11 03:05:01 +08:00
|
|
|
#include <linux/skbuff_ref.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
#include <linux/highmem.h>
|
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
struct esp_skb_cb {
|
|
|
|
struct xfrm_skb_cb xfrm;
|
|
|
|
void *tmp;
|
|
|
|
};
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
struct esp_output_extra {
|
|
|
|
__be32 seqhi;
|
|
|
|
u32 esphoff;
|
|
|
|
};
|
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an AEAD request structure with extra space for SG and IV.
|
|
|
|
*
|
2011-03-08 08:07:51 +08:00
|
|
|
* For alignment considerations the upper 32 bits of the sequence number are
|
|
|
|
* placed at the front, if present. Followed by the IV, the request and finally
|
|
|
|
* the SG list.
|
2008-01-29 11:35:05 +08:00
|
|
|
*
|
|
|
|
* TODO: Use spare space in skb for this where possible.
|
|
|
|
*/
|
2011-03-08 08:07:51 +08:00
|
|
|
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
|
2008-01-29 11:35:05 +08:00
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
|
2011-03-08 08:07:51 +08:00
|
|
|
len = seqihlen;
|
|
|
|
|
|
|
|
len += crypto_aead_ivsize(aead);
|
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
if (len) {
|
|
|
|
len += crypto_aead_alignmask(aead) &
|
|
|
|
~(crypto_tfm_ctx_alignment() - 1);
|
|
|
|
len = ALIGN(len, crypto_tfm_ctx_alignment());
|
|
|
|
}
|
|
|
|
|
2015-05-27 16:03:47 +08:00
|
|
|
len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
|
2008-01-29 11:35:05 +08:00
|
|
|
len = ALIGN(len, __alignof__(struct scatterlist));
|
|
|
|
|
|
|
|
len += sizeof(struct scatterlist) * nfrags;
|
|
|
|
|
|
|
|
return kmalloc(len, GFP_ATOMIC);
|
|
|
|
}
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
static inline void *esp_tmp_extra(void *tmp)
|
2011-03-08 08:07:51 +08:00
|
|
|
{
|
2020-04-27 23:59:34 +08:00
|
|
|
return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
|
2011-03-08 08:07:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
|
2008-01-29 11:35:05 +08:00
|
|
|
{
|
|
|
|
return crypto_aead_ivsize(aead) ?
|
2011-03-08 08:07:51 +08:00
|
|
|
PTR_ALIGN((u8 *)tmp + seqhilen,
|
|
|
|
crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
|
2008-01-29 11:35:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
|
|
|
|
{
|
|
|
|
struct aead_request *req;
|
|
|
|
|
|
|
|
req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
|
|
|
|
crypto_tfm_ctx_alignment());
|
|
|
|
aead_request_set_tfm(req, aead);
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
|
|
|
|
struct aead_request *req)
|
|
|
|
{
|
|
|
|
return (void *)ALIGN((unsigned long)(req + 1) +
|
|
|
|
crypto_aead_reqsize(aead),
|
|
|
|
__alignof__(struct scatterlist));
|
|
|
|
}
|
|
|
|
|
2024-03-08 23:26:00 +08:00
|
|
|
static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
|
2017-01-17 17:23:03 +08:00
|
|
|
{
|
|
|
|
struct crypto_aead *aead = x->data;
|
2020-04-27 23:59:34 +08:00
|
|
|
int extralen = 0;
|
2017-01-17 17:23:03 +08:00
|
|
|
u8 *iv;
|
|
|
|
struct aead_request *req;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
|
|
|
|
if (x->props.flags & XFRM_STATE_ESN)
|
2021-11-04 11:19:31 +08:00
|
|
|
extralen += sizeof(struct esp_output_extra);
|
2017-01-17 17:23:03 +08:00
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
iv = esp_tmp_iv(aead, tmp, extralen);
|
2017-01-17 17:23:03 +08:00
|
|
|
req = esp_tmp_req(aead, iv);
|
|
|
|
|
|
|
|
/* Unref skb_frag_pages in the src scatterlist if necessary.
|
|
|
|
* Skip the first sg which comes from skb->data.
|
|
|
|
*/
|
|
|
|
if (req->src != req->dst)
|
|
|
|
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
|
2024-04-08 23:29:56 +08:00
|
|
|
skb_page_unref(sg_page(sg), skb->pp_recycle);
|
2017-01-17 17:23:03 +08:00
|
|
|
}
|
|
|
|
|
2020-04-27 23:59:35 +08:00
|
|
|
#ifdef CONFIG_INET6_ESPINTCP
|
|
|
|
struct esp_tcp_sk {
|
|
|
|
struct sock *sk;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void esp_free_tcp_sk(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
|
|
|
|
|
|
|
|
sock_put(esk->sk);
|
|
|
|
kfree(esk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
|
|
|
|
{
|
|
|
|
struct xfrm_encap_tmpl *encap = x->encap;
|
2022-09-08 09:10:20 +08:00
|
|
|
struct net *net = xs_net(x);
|
2020-04-27 23:59:35 +08:00
|
|
|
struct esp_tcp_sk *esk;
|
|
|
|
__be16 sport, dport;
|
|
|
|
struct sock *nsk;
|
|
|
|
struct sock *sk;
|
|
|
|
|
|
|
|
sk = rcu_dereference(x->encap_sk);
|
|
|
|
if (sk && sk->sk_state == TCP_ESTABLISHED)
|
|
|
|
return sk;
|
|
|
|
|
|
|
|
spin_lock_bh(&x->lock);
|
|
|
|
sport = encap->encap_sport;
|
|
|
|
dport = encap->encap_dport;
|
|
|
|
nsk = rcu_dereference_protected(x->encap_sk,
|
|
|
|
lockdep_is_held(&x->lock));
|
|
|
|
if (sk && sk == nsk) {
|
|
|
|
esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
|
|
|
|
if (!esk) {
|
|
|
|
spin_unlock_bh(&x->lock);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
RCU_INIT_POINTER(x->encap_sk, NULL);
|
|
|
|
esk->sk = sk;
|
|
|
|
call_rcu(&esk->rcu, esp_free_tcp_sk);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&x->lock);
|
|
|
|
|
2022-09-08 09:10:20 +08:00
|
|
|
sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
|
2020-04-27 23:59:35 +08:00
|
|
|
dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
|
|
|
|
if (!sk)
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
if (!tcp_is_ulp_esp(sk)) {
|
|
|
|
sock_put(sk);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_bh(&x->lock);
|
|
|
|
nsk = rcu_dereference_protected(x->encap_sk,
|
|
|
|
lockdep_is_held(&x->lock));
|
|
|
|
if (encap->encap_sport != sport ||
|
|
|
|
encap->encap_dport != dport) {
|
|
|
|
sock_put(sk);
|
|
|
|
sk = nsk ?: ERR_PTR(-EREMCHG);
|
|
|
|
} else if (sk == nsk) {
|
|
|
|
sock_put(sk);
|
|
|
|
} else {
|
|
|
|
rcu_assign_pointer(x->encap_sk, sk);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&x->lock);
|
|
|
|
|
|
|
|
return sk;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct sock *sk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
sk = esp6_find_tcp_sk(x);
|
|
|
|
err = PTR_ERR_OR_ZERO(sk);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
bh_lock_sock(sk);
|
|
|
|
if (sock_owned_by_user(sk))
|
|
|
|
err = espintcp_queue_out(sk, skb);
|
|
|
|
else
|
|
|
|
err = espintcp_push_skb(sk, skb);
|
|
|
|
bh_unlock_sock(sk);
|
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct dst_entry *dst = skb_dst(skb);
|
|
|
|
struct xfrm_state *x = dst->xfrm;
|
|
|
|
|
|
|
|
return esp_output_tcp_finish(x, skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
|
|
|
|
local_bh_enable();
|
|
|
|
|
|
|
|
/* EINPROGRESS just happens to do the right thing. It
|
|
|
|
* actually means that the skb has been consumed and
|
|
|
|
* isn't coming back.
|
|
|
|
*/
|
|
|
|
return err ?: -EINPROGRESS;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
kfree_skb(skb);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
static void esp_output_encap_csum(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
/* UDP encap with IPv6 requires a valid checksum */
|
|
|
|
if (*skb_mac_header(skb) == IPPROTO_UDP) {
|
|
|
|
struct udphdr *uh = udp_hdr(skb);
|
|
|
|
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
|
|
|
int len = ntohs(uh->len);
|
|
|
|
unsigned int offset = skb_transport_offset(skb);
|
|
|
|
__wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
|
|
|
|
|
|
|
|
uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
|
|
|
|
len, IPPROTO_UDP, csum);
|
|
|
|
if (uh->check == 0)
|
|
|
|
uh->check = CSUM_MANGLED_0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-06 18:22:40 +08:00
|
|
|
static void esp_output_done(void *data, int err)
|
2008-01-29 11:35:05 +08:00
|
|
|
{
|
2023-02-06 18:22:40 +08:00
|
|
|
struct sk_buff *skb = data;
|
2017-12-20 17:41:36 +08:00
|
|
|
struct xfrm_offload *xo = xfrm_offload(skb);
|
2017-01-17 17:23:03 +08:00
|
|
|
void *tmp;
|
2017-12-20 17:41:36 +08:00
|
|
|
struct xfrm_state *x;
|
|
|
|
|
2018-12-19 00:15:20 +08:00
|
|
|
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
|
|
|
|
struct sec_path *sp = skb_sec_path(skb);
|
|
|
|
|
|
|
|
x = sp->xvec[sp->len - 1];
|
|
|
|
} else {
|
2017-12-20 17:41:36 +08:00
|
|
|
x = skb_dst(skb)->xfrm;
|
2018-12-19 00:15:20 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
tmp = ESP_SKB_CB(skb)->tmp;
|
2024-03-08 23:26:00 +08:00
|
|
|
esp_ssg_unref(x, tmp, skb);
|
2017-01-17 17:23:03 +08:00
|
|
|
kfree(tmp);
|
2017-12-20 17:41:36 +08:00
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
esp_output_encap_csum(skb);
|
|
|
|
|
2017-12-20 17:41:36 +08:00
|
|
|
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
|
|
|
|
if (err) {
|
|
|
|
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_push(skb, skb->data - skb_mac_header(skb));
|
|
|
|
secpath_reset(skb);
|
|
|
|
xfrm_dev_resume(skb);
|
|
|
|
} else {
|
2020-04-27 23:59:35 +08:00
|
|
|
if (!err &&
|
|
|
|
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
|
|
|
|
esp_output_tail_tcp(x, skb);
|
|
|
|
else
|
2021-03-02 03:00:04 +08:00
|
|
|
xfrm_output_resume(skb->sk, skb, err);
|
2017-12-20 17:41:36 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
}
|
|
|
|
|
2015-05-27 16:03:47 +08:00
|
|
|
/* Move ESP header back into place. */
|
|
|
|
static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
|
|
|
|
{
|
|
|
|
struct ip_esp_hdr *esph = (void *)(skb->data + offset);
|
|
|
|
void *tmp = ESP_SKB_CB(skb)->tmp;
|
2020-04-27 23:59:34 +08:00
|
|
|
__be32 *seqhi = esp_tmp_extra(tmp);
|
2015-05-27 16:03:47 +08:00
|
|
|
|
|
|
|
esph->seq_no = esph->spi;
|
|
|
|
esph->spi = *seqhi;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void esp_output_restore_header(struct sk_buff *skb)
|
|
|
|
{
|
2020-04-27 23:59:34 +08:00
|
|
|
void *tmp = ESP_SKB_CB(skb)->tmp;
|
|
|
|
struct esp_output_extra *extra = esp_tmp_extra(tmp);
|
|
|
|
|
|
|
|
esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
|
|
|
|
sizeof(__be32));
|
2015-05-27 16:03:47 +08:00
|
|
|
}
|
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
|
2017-04-14 16:06:42 +08:00
|
|
|
struct xfrm_state *x,
|
2017-01-17 17:23:03 +08:00
|
|
|
struct ip_esp_hdr *esph,
|
2020-04-27 23:59:34 +08:00
|
|
|
struct esp_output_extra *extra)
|
2017-01-17 17:23:03 +08:00
|
|
|
{
|
|
|
|
/* For ESN we move the header forward by 4 bytes to
|
2022-06-23 17:27:12 +08:00
|
|
|
* accommodate the high bits. We will move it back after
|
2017-01-17 17:23:03 +08:00
|
|
|
* encryption.
|
|
|
|
*/
|
|
|
|
if ((x->props.flags & XFRM_STATE_ESN)) {
|
2020-04-27 23:59:34 +08:00
|
|
|
__u32 seqhi;
|
2017-04-14 16:06:50 +08:00
|
|
|
struct xfrm_offload *xo = xfrm_offload(skb);
|
|
|
|
|
|
|
|
if (xo)
|
2020-04-27 23:59:34 +08:00
|
|
|
seqhi = xo->seq.hi;
|
2017-04-14 16:06:50 +08:00
|
|
|
else
|
2020-04-27 23:59:34 +08:00
|
|
|
seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
|
|
|
|
|
|
|
|
extra->esphoff = (unsigned char *)esph -
|
|
|
|
skb_transport_header(skb);
|
|
|
|
esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
|
|
|
|
extra->seqhi = esph->spi;
|
|
|
|
esph->seq_no = htonl(seqhi);
|
2017-01-17 17:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
esph->spi = x->id.spi;
|
|
|
|
|
|
|
|
return esph;
|
|
|
|
}
|
|
|
|
|
2023-02-06 18:22:40 +08:00
|
|
|
static void esp_output_done_esn(void *data, int err)
|
2015-05-27 16:03:47 +08:00
|
|
|
{
|
2023-02-06 18:22:40 +08:00
|
|
|
struct sk_buff *skb = data;
|
2015-05-27 16:03:47 +08:00
|
|
|
|
|
|
|
esp_output_restore_header(skb);
|
2023-02-06 18:22:23 +08:00
|
|
|
esp_output_done(data, err);
|
2015-05-27 16:03:47 +08:00
|
|
|
}
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
|
|
|
|
int encap_type,
|
|
|
|
struct esp_info *esp,
|
|
|
|
__be16 sport,
|
|
|
|
__be16 dport)
|
|
|
|
{
|
|
|
|
struct udphdr *uh;
|
|
|
|
unsigned int len;
|
|
|
|
|
|
|
|
len = skb->len + esp->tailen - skb_transport_offset(skb);
|
|
|
|
if (len > U16_MAX)
|
|
|
|
return ERR_PTR(-EMSGSIZE);
|
|
|
|
|
|
|
|
uh = (struct udphdr *)esp->esph;
|
|
|
|
uh->source = sport;
|
|
|
|
uh->dest = dport;
|
|
|
|
uh->len = htons(len);
|
|
|
|
uh->check = 0;
|
|
|
|
|
|
|
|
*skb_mac_header(skb) = IPPROTO_UDP;
|
|
|
|
|
|
|
|
return (struct ip_esp_hdr *)(uh + 1);
|
|
|
|
}
|
|
|
|
|
2020-04-27 23:59:35 +08:00
|
|
|
#ifdef CONFIG_INET6_ESPINTCP
|
|
|
|
static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct esp_info *esp)
|
|
|
|
{
|
|
|
|
__be16 *lenp = (void *)esp->esph;
|
|
|
|
struct ip_esp_hdr *esph;
|
|
|
|
unsigned int len;
|
|
|
|
struct sock *sk;
|
|
|
|
|
|
|
|
len = skb->len + esp->tailen - skb_transport_offset(skb);
|
|
|
|
if (len > IP_MAX_MTU)
|
|
|
|
return ERR_PTR(-EMSGSIZE);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
sk = esp6_find_tcp_sk(x);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (IS_ERR(sk))
|
|
|
|
return ERR_CAST(sk);
|
|
|
|
|
|
|
|
*lenp = htons(len);
|
|
|
|
esph = (struct ip_esp_hdr *)(lenp + 1);
|
|
|
|
|
|
|
|
return esph;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct esp_info *esp)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
|
|
|
|
struct esp_info *esp)
|
|
|
|
{
|
|
|
|
struct xfrm_encap_tmpl *encap = x->encap;
|
|
|
|
struct ip_esp_hdr *esph;
|
|
|
|
__be16 sport, dport;
|
|
|
|
int encap_type;
|
|
|
|
|
|
|
|
spin_lock_bh(&x->lock);
|
|
|
|
sport = encap->encap_sport;
|
|
|
|
dport = encap->encap_dport;
|
|
|
|
encap_type = encap->encap_type;
|
|
|
|
spin_unlock_bh(&x->lock);
|
|
|
|
|
|
|
|
switch (encap_type) {
|
|
|
|
default:
|
|
|
|
case UDP_ENCAP_ESPINUDP:
|
|
|
|
esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
|
|
|
|
break;
|
2020-04-27 23:59:35 +08:00
|
|
|
case TCP_ENCAP_ESPINTCP:
|
|
|
|
esph = esp6_output_tcp_encap(x, skb, esp);
|
|
|
|
break;
|
2020-04-27 23:59:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ERR(esph))
|
|
|
|
return PTR_ERR(esph);
|
|
|
|
|
|
|
|
esp->esph = esph;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-04-20 11:29:13 +08:00
|
|
|
u8 *tail;
|
2017-04-14 16:06:42 +08:00
|
|
|
int nfrags;
|
2020-04-27 23:59:34 +08:00
|
|
|
int esph_offset;
|
2017-04-14 16:06:42 +08:00
|
|
|
struct page *page;
|
|
|
|
struct sk_buff *trailer;
|
|
|
|
int tailen = esp->tailen;
|
2011-03-08 08:07:51 +08:00
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
if (x->encap) {
|
|
|
|
int err = esp6_output_encap(x, skb, esp);
|
|
|
|
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2022-04-13 16:10:50 +08:00
|
|
|
if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
|
|
|
|
ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
|
2022-03-07 20:11:39 +08:00
|
|
|
goto cow;
|
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
if (!skb_cloned(skb)) {
|
2017-08-25 13:34:35 +08:00
|
|
|
if (tailen <= skb_tailroom(skb)) {
|
2017-01-17 17:23:03 +08:00
|
|
|
nfrags = 1;
|
|
|
|
trailer = skb;
|
|
|
|
tail = skb_tail_pointer(trailer);
|
|
|
|
|
|
|
|
goto skip_cow;
|
|
|
|
} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
|
|
|
|
&& !skb_has_frag_list(skb)) {
|
|
|
|
int allocsize;
|
|
|
|
struct sock *sk = skb->sk;
|
|
|
|
struct page_frag *pfrag = &x->xfrag;
|
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
esp->inplace = false;
|
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
allocsize = ALIGN(tailen, L1_CACHE_BYTES);
|
|
|
|
|
|
|
|
spin_lock_bh(&x->lock);
|
|
|
|
|
|
|
|
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
|
|
|
|
spin_unlock_bh(&x->lock);
|
|
|
|
goto cow;
|
|
|
|
}
|
|
|
|
|
|
|
|
page = pfrag->page;
|
|
|
|
get_page(page);
|
|
|
|
|
2021-01-10 06:18:34 +08:00
|
|
|
tail = page_address(page) + pfrag->offset;
|
2017-01-17 17:23:03 +08:00
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
|
2017-01-17 17:23:03 +08:00
|
|
|
|
|
|
|
nfrags = skb_shinfo(skb)->nr_frags;
|
|
|
|
|
|
|
|
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
|
|
|
|
tailen);
|
|
|
|
skb_shinfo(skb)->nr_frags = ++nfrags;
|
|
|
|
|
|
|
|
pfrag->offset = pfrag->offset + allocsize;
|
2017-08-25 13:16:07 +08:00
|
|
|
|
|
|
|
spin_unlock_bh(&x->lock);
|
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
nfrags++;
|
|
|
|
|
|
|
|
skb->len += tailen;
|
|
|
|
skb->data_len += tailen;
|
|
|
|
skb->truesize += tailen;
|
2019-01-28 16:35:35 +08:00
|
|
|
if (sk && sk_fullsock(sk))
|
2017-06-30 18:08:00 +08:00
|
|
|
refcount_add(tailen, &sk->sk_wmem_alloc);
|
2017-01-17 17:23:03 +08:00
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
goto out;
|
2017-01-17 17:23:03 +08:00
|
|
|
}
|
2012-08-29 14:49:12 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
cow:
|
2020-04-27 23:59:34 +08:00
|
|
|
esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
|
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
nfrags = skb_cow_data(skb, tailen, &trailer);
|
|
|
|
if (nfrags < 0)
|
|
|
|
goto out;
|
2007-04-20 11:29:13 +08:00
|
|
|
tail = skb_tail_pointer(trailer);
|
2020-04-27 23:59:34 +08:00
|
|
|
esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
|
2017-01-17 17:23:03 +08:00
|
|
|
|
|
|
|
skip_cow:
|
2017-04-14 16:06:42 +08:00
|
|
|
esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
|
|
|
|
pskb_put(skb, trailer, tailen);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
out:
|
|
|
|
return nfrags;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(esp6_output_head);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
|
|
|
|
{
|
|
|
|
u8 *iv;
|
|
|
|
int alen;
|
|
|
|
void *tmp;
|
|
|
|
int ivlen;
|
|
|
|
int assoclen;
|
2020-04-27 23:59:34 +08:00
|
|
|
int extralen;
|
2017-04-14 16:06:42 +08:00
|
|
|
struct page *page;
|
|
|
|
struct ip_esp_hdr *esph;
|
|
|
|
struct aead_request *req;
|
|
|
|
struct crypto_aead *aead;
|
|
|
|
struct scatterlist *sg, *dsg;
|
2020-04-27 23:59:34 +08:00
|
|
|
struct esp_output_extra *extra;
|
2017-04-14 16:06:42 +08:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
assoclen = sizeof(struct ip_esp_hdr);
|
2020-04-27 23:59:34 +08:00
|
|
|
extralen = 0;
|
2017-04-14 16:06:42 +08:00
|
|
|
|
|
|
|
if (x->props.flags & XFRM_STATE_ESN) {
|
2020-04-27 23:59:34 +08:00
|
|
|
extralen += sizeof(*extra);
|
2017-04-14 16:06:42 +08:00
|
|
|
assoclen += sizeof(__be32);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
aead = x->data;
|
|
|
|
alen = crypto_aead_authsize(aead);
|
|
|
|
ivlen = crypto_aead_ivsize(aead);
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
|
2017-04-24 13:33:56 +08:00
|
|
|
if (!tmp)
|
2017-01-17 17:23:03 +08:00
|
|
|
goto error;
|
2015-05-27 16:03:47 +08:00
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
extra = esp_tmp_extra(tmp);
|
|
|
|
iv = esp_tmp_iv(aead, tmp, extralen);
|
2017-01-17 17:23:03 +08:00
|
|
|
req = esp_tmp_req(aead, iv);
|
|
|
|
sg = esp_req_sg(aead, req);
|
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
if (esp->inplace)
|
|
|
|
dsg = sg;
|
|
|
|
else
|
|
|
|
dsg = &sg[esp->nfrags];
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
esph = esp_output_set_esn(skb, x, esp->esph, extra);
|
|
|
|
esp->esph = esph;
|
2015-05-27 16:03:47 +08:00
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
sg_init_table(sg, esp->nfrags);
|
2017-06-04 10:16:23 +08:00
|
|
|
err = skb_to_sgvec(skb, sg,
|
|
|
|
(unsigned char *)esph - skb->data,
|
|
|
|
assoclen + ivlen + esp->clen + alen);
|
|
|
|
if (unlikely(err < 0))
|
2017-07-13 15:13:30 +08:00
|
|
|
goto error_free;
|
2017-04-14 16:06:42 +08:00
|
|
|
|
|
|
|
if (!esp->inplace) {
|
|
|
|
int allocsize;
|
|
|
|
struct page_frag *pfrag = &x->xfrag;
|
|
|
|
|
|
|
|
allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
|
|
|
|
|
|
|
|
spin_lock_bh(&x->lock);
|
|
|
|
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
|
|
|
|
spin_unlock_bh(&x->lock);
|
2017-07-13 15:13:30 +08:00
|
|
|
goto error_free;
|
2017-04-14 16:06:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
skb_shinfo(skb)->nr_frags = 1;
|
|
|
|
|
|
|
|
page = pfrag->page;
|
|
|
|
get_page(page);
|
|
|
|
/* replace page frags in skb with new page */
|
|
|
|
__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
|
|
|
|
pfrag->offset = pfrag->offset + allocsize;
|
|
|
|
spin_unlock_bh(&x->lock);
|
|
|
|
|
|
|
|
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
|
2017-06-04 10:16:23 +08:00
|
|
|
err = skb_to_sgvec(skb, dsg,
|
|
|
|
(unsigned char *)esph - skb->data,
|
|
|
|
assoclen + ivlen + esp->clen + alen);
|
|
|
|
if (unlikely(err < 0))
|
2017-07-13 15:13:30 +08:00
|
|
|
goto error_free;
|
2017-04-14 16:06:42 +08:00
|
|
|
}
|
2011-03-08 08:07:51 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
if ((x->props.flags & XFRM_STATE_ESN))
|
|
|
|
aead_request_set_callback(req, 0, esp_output_done_esn, skb);
|
|
|
|
else
|
|
|
|
aead_request_set_callback(req, 0, esp_output_done, skb);
|
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
|
2015-05-27 16:03:47 +08:00
|
|
|
aead_request_set_ad(req, assoclen);
|
|
|
|
|
|
|
|
memset(iv, 0, ivlen);
|
2017-04-14 16:06:42 +08:00
|
|
|
memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
|
2015-05-27 16:03:47 +08:00
|
|
|
min(ivlen, 8));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
ESP_SKB_CB(skb)->tmp = tmp;
|
2015-05-27 16:03:47 +08:00
|
|
|
err = crypto_aead_encrypt(req);
|
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case -EINPROGRESS:
|
2008-01-29 11:35:05 +08:00
|
|
|
goto error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-10-18 15:00:35 +08:00
|
|
|
case -ENOSPC:
|
2008-01-29 11:35:05 +08:00
|
|
|
err = NET_XMIT_DROP;
|
2015-05-27 16:03:47 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
if ((x->props.flags & XFRM_STATE_ESN))
|
|
|
|
esp_output_restore_header(skb);
|
2020-04-27 23:59:34 +08:00
|
|
|
esp_output_encap_csum(skb);
|
2015-05-27 16:03:47 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
if (sg != dsg)
|
2024-03-08 23:26:00 +08:00
|
|
|
esp_ssg_unref(x, tmp, skb);
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2020-04-27 23:59:35 +08:00
|
|
|
if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
|
|
|
|
err = esp_output_tail_tcp(x, skb);
|
|
|
|
|
2017-07-13 15:13:30 +08:00
|
|
|
error_free:
|
|
|
|
kfree(tmp);
|
2008-01-29 11:35:05 +08:00
|
|
|
error:
|
|
|
|
return err;
|
|
|
|
}
|
2017-04-14 16:06:42 +08:00
|
|
|
EXPORT_SYMBOL_GPL(esp6_output_tail);
|
|
|
|
|
|
|
|
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int alen;
|
|
|
|
int blksize;
|
|
|
|
struct ip_esp_hdr *esph;
|
|
|
|
struct crypto_aead *aead;
|
|
|
|
struct esp_info esp;
|
|
|
|
|
|
|
|
esp.inplace = true;
|
|
|
|
|
|
|
|
esp.proto = *skb_mac_header(skb);
|
|
|
|
*skb_mac_header(skb) = IPPROTO_ESP;
|
|
|
|
|
|
|
|
/* skb is pure payload to encrypt */
|
|
|
|
|
|
|
|
aead = x->data;
|
|
|
|
alen = crypto_aead_authsize(aead);
|
|
|
|
|
|
|
|
esp.tfclen = 0;
|
|
|
|
if (x->tfcpad) {
|
|
|
|
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
|
|
|
|
u32 padto;
|
|
|
|
|
2022-01-26 23:00:18 +08:00
|
|
|
padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
|
2017-04-14 16:06:42 +08:00
|
|
|
if (skb->len < padto)
|
|
|
|
esp.tfclen = padto - skb->len;
|
|
|
|
}
|
|
|
|
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
|
|
|
|
esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
|
|
|
|
esp.plen = esp.clen - skb->len - esp.tfclen;
|
|
|
|
esp.tailen = esp.tfclen + esp.plen + alen;
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
esp.esph = ip_esp_hdr(skb);
|
|
|
|
|
2017-04-14 16:06:42 +08:00
|
|
|
esp.nfrags = esp6_output_head(x, skb, &esp);
|
|
|
|
if (esp.nfrags < 0)
|
|
|
|
return esp.nfrags;
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
esph = esp.esph;
|
2017-04-14 16:06:42 +08:00
|
|
|
esph->spi = x->id.spi;
|
|
|
|
|
|
|
|
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
|
|
|
|
esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
|
|
|
|
((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
|
|
|
|
|
|
|
|
skb_push(skb, -skb_network_offset(skb));
|
|
|
|
|
|
|
|
return esp6_output_tail(x, skb, &esp);
|
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2017-08-30 16:30:39 +08:00
|
|
|
static inline int esp_remove_trailer(struct sk_buff *skb)
|
2008-01-29 11:35:05 +08:00
|
|
|
{
|
|
|
|
struct xfrm_state *x = xfrm_input_state(skb);
|
2013-10-18 18:09:05 +08:00
|
|
|
struct crypto_aead *aead = x->data;
|
2017-08-30 16:30:39 +08:00
|
|
|
int alen, hlen, elen;
|
2017-08-01 17:49:05 +08:00
|
|
|
int padlen, trimlen;
|
|
|
|
__wsum csumdiff;
|
2008-01-29 11:35:05 +08:00
|
|
|
u8 nexthdr[2];
|
2017-08-30 16:30:39 +08:00
|
|
|
int ret;
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2017-08-30 16:30:39 +08:00
|
|
|
alen = crypto_aead_authsize(aead);
|
|
|
|
hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
|
|
|
|
elen = skb->len - hlen;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-10-26 20:51:06 +08:00
|
|
|
ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
|
|
|
|
BUG_ON(ret);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-08-30 16:30:39 +08:00
|
|
|
ret = -EINVAL;
|
2008-01-29 11:35:05 +08:00
|
|
|
padlen = nexthdr[0];
|
|
|
|
if (padlen + 2 + alen >= elen) {
|
2014-11-12 02:59:17 +08:00
|
|
|
net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
|
|
|
|
padlen + 2, elen - alen);
|
2008-01-29 11:35:05 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-08-01 17:49:05 +08:00
|
|
|
trimlen = alen + padlen + 2;
|
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
|
|
|
csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
|
|
|
|
skb->csum = csum_block_sub(skb->csum, csumdiff,
|
|
|
|
skb->len - trimlen);
|
|
|
|
}
|
2023-10-07 08:59:53 +08:00
|
|
|
ret = pskb_trim(skb, skb->len - trimlen);
|
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
2017-08-01 17:49:05 +08:00
|
|
|
|
2017-08-30 16:30:39 +08:00
|
|
|
ret = nexthdr[1];
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int esp6_input_done2(struct sk_buff *skb, int err)
|
|
|
|
{
|
|
|
|
struct xfrm_state *x = xfrm_input_state(skb);
|
|
|
|
struct xfrm_offload *xo = xfrm_offload(skb);
|
|
|
|
struct crypto_aead *aead = x->data;
|
|
|
|
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
|
|
|
|
int hdr_len = skb_network_header_len(skb);
|
|
|
|
|
2021-02-03 10:44:30 +08:00
|
|
|
if (!xo || !(xo->flags & CRYPTO_DONE))
|
2017-08-30 16:30:39 +08:00
|
|
|
kfree(ESP_SKB_CB(skb)->tmp);
|
|
|
|
|
|
|
|
if (unlikely(err))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = esp_remove_trailer(skb);
|
|
|
|
if (unlikely(err < 0))
|
|
|
|
goto out;
|
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
if (x->encap) {
|
|
|
|
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
2020-07-03 22:57:09 +08:00
|
|
|
int offset = skb_network_offset(skb) + sizeof(*ip6h);
|
2020-04-27 23:59:34 +08:00
|
|
|
struct xfrm_encap_tmpl *encap = x->encap;
|
2020-07-03 22:57:09 +08:00
|
|
|
u8 nexthdr = ip6h->nexthdr;
|
|
|
|
__be16 frag_off, source;
|
|
|
|
struct udphdr *uh;
|
|
|
|
struct tcphdr *th;
|
|
|
|
|
|
|
|
offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
|
esp6: fix check on ipv6_skip_exthdr's return value
Commit 5f9c55c8066b ("ipv6: check return value of ipv6_skip_exthdr")
introduced an incorrect check, which leads to all ESP packets over
either TCPv6 or UDPv6 encapsulation being dropped. In this particular
case, offset is negative, since skb->data points to the ESP header in
the following chain of headers, while skb->network_header points to
the IPv6 header:
IPv6 | ext | ... | ext | UDP | ESP | ...
That doesn't seem to be a problem, especially considering that if we
reach esp6_input_done2, we're guaranteed to have a full set of headers
available (otherwise the packet would have been dropped earlier in the
stack). However, it means that the return value will (intentionally)
be negative. We can make the test more specific, as the expected
return value of ipv6_skip_exthdr will be the (negated) size of either
a UDP header, or a TCP header with possible options.
In the future, we should probably either make ipv6_skip_exthdr
explicitly accept negative offsets (and adjust its return value for
error cases), or make ipv6_skip_exthdr only take non-negative
offsets (and audit all callers).
Fixes: 5f9c55c8066b ("ipv6: check return value of ipv6_skip_exthdr")
Reported-by: Xiumei Mu <xmu@redhat.com>
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
2022-03-10 18:49:00 +08:00
|
|
|
if (offset == -1) {
|
2021-11-18 03:06:48 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-07-03 22:57:09 +08:00
|
|
|
uh = (void *)(skb->data + offset);
|
|
|
|
th = (void *)(skb->data + offset);
|
2020-07-27 22:03:47 +08:00
|
|
|
hdr_len += offset;
|
2020-04-27 23:59:34 +08:00
|
|
|
|
|
|
|
switch (x->encap->encap_type) {
|
2020-04-27 23:59:35 +08:00
|
|
|
case TCP_ENCAP_ESPINTCP:
|
|
|
|
source = th->source;
|
|
|
|
break;
|
2020-04-27 23:59:34 +08:00
|
|
|
case UDP_ENCAP_ESPINUDP:
|
|
|
|
source = uh->source;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 1) if the NAT-T peer's IP or port changed then
|
2023-10-25 14:16:56 +08:00
|
|
|
* advertise the change to the keying daemon.
|
2020-04-27 23:59:34 +08:00
|
|
|
* This is an inbound SA, so just compare
|
|
|
|
* SRC ports.
|
|
|
|
*/
|
|
|
|
if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
|
|
|
|
source != encap->encap_sport) {
|
|
|
|
xfrm_address_t ipaddr;
|
|
|
|
|
|
|
|
memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
|
|
|
|
km_new_mapping(x, &ipaddr, source);
|
|
|
|
|
|
|
|
/* XXX: perhaps add an extra
|
|
|
|
* policy check here, to see
|
|
|
|
* if we should allow or
|
|
|
|
* reject a packet from a
|
|
|
|
* different source
|
|
|
|
* address/port.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 2) ignore UDP/TCP checksums in case
|
|
|
|
* of NAT-T in Transport Mode, or
|
|
|
|
* perform other post-processing fixes
|
|
|
|
* as per draft-ietf-ipsec-udp-encaps-06,
|
|
|
|
* section 3.1.2
|
|
|
|
*/
|
|
|
|
if (x->props.mode == XFRM_MODE_TRANSPORT)
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
}
|
|
|
|
|
2017-08-30 16:30:39 +08:00
|
|
|
skb_postpull_rcsum(skb, skb_network_header(skb),
|
|
|
|
skb_network_header_len(skb));
|
2017-08-01 17:49:05 +08:00
|
|
|
skb_pull_rcsum(skb, hlen);
|
2013-01-08 15:41:12 +08:00
|
|
|
if (x->props.mode == XFRM_MODE_TUNNEL)
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
else
|
|
|
|
skb_set_transport_header(skb, -hdr_len);
|
2008-01-29 11:35:05 +08:00
|
|
|
|
|
|
|
/* RFC4303: Drop dummy packets without any error */
|
|
|
|
if (err == IPPROTO_NONE)
|
|
|
|
err = -EINVAL;
|
|
|
|
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
}
|
2017-04-14 16:06:42 +08:00
|
|
|
EXPORT_SYMBOL_GPL(esp6_input_done2);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-02-06 18:22:40 +08:00
|
|
|
static void esp_input_done(void *data, int err)
|
2008-01-29 11:35:05 +08:00
|
|
|
{
|
2023-02-06 18:22:40 +08:00
|
|
|
struct sk_buff *skb = data;
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2017-04-14 16:06:21 +08:00
|
|
|
xfrm_input_resume(skb, esp6_input_done2(skb, err));
|
2008-01-29 11:35:05 +08:00
|
|
|
}
|
|
|
|
|
2015-05-27 16:03:47 +08:00
|
|
|
static void esp_input_restore_header(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
esp_restore_header(skb, 0);
|
|
|
|
__skb_pull(skb, 4);
|
|
|
|
}
|
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
|
|
|
|
{
|
|
|
|
struct xfrm_state *x = xfrm_input_state(skb);
|
|
|
|
|
|
|
|
/* For ESN we move the header forward by 4 bytes to
|
2022-06-23 17:27:12 +08:00
|
|
|
* accommodate the high bits. We will move it back after
|
2017-01-17 17:23:03 +08:00
|
|
|
* decryption.
|
|
|
|
*/
|
|
|
|
if ((x->props.flags & XFRM_STATE_ESN)) {
|
2017-10-19 21:09:47 +08:00
|
|
|
struct ip_esp_hdr *esph = skb_push(skb, 4);
|
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
*seqhi = esph->spi;
|
|
|
|
esph->spi = esph->seq_no;
|
|
|
|
esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-06 18:22:40 +08:00
|
|
|
static void esp_input_done_esn(void *data, int err)
|
2015-05-27 16:03:47 +08:00
|
|
|
{
|
2023-02-06 18:22:40 +08:00
|
|
|
struct sk_buff *skb = data;
|
2015-05-27 16:03:47 +08:00
|
|
|
|
|
|
|
esp_input_restore_header(skb);
|
2023-02-06 18:22:23 +08:00
|
|
|
esp_input_done(data, err);
|
2015-05-27 16:03:47 +08:00
|
|
|
}
|
|
|
|
|
2006-04-01 16:52:46 +08:00
|
|
|
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-10-18 18:09:05 +08:00
|
|
|
struct crypto_aead *aead = x->data;
|
2008-01-29 11:35:05 +08:00
|
|
|
struct aead_request *req;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff *trailer;
|
2015-05-27 16:03:47 +08:00
|
|
|
int ivlen = crypto_aead_ivsize(aead);
|
2018-08-17 15:51:00 +08:00
|
|
|
int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
|
2005-04-17 06:20:36 +08:00
|
|
|
int nfrags;
|
2011-03-08 08:07:51 +08:00
|
|
|
int assoclen;
|
|
|
|
int seqhilen;
|
2005-04-17 06:20:36 +08:00
|
|
|
int ret = 0;
|
2008-01-29 11:35:05 +08:00
|
|
|
void *tmp;
|
2011-03-08 08:07:51 +08:00
|
|
|
__be32 *seqhi;
|
2008-01-29 11:35:05 +08:00
|
|
|
u8 *iv;
|
|
|
|
struct scatterlist *sg;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-08-17 15:51:00 +08:00
|
|
|
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = -EINVAL;
|
2006-05-28 14:06:13 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
if (elen <= 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = -EINVAL;
|
2006-05-28 14:06:13 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-08-17 15:51:00 +08:00
|
|
|
assoclen = sizeof(struct ip_esp_hdr);
|
2011-03-08 08:07:51 +08:00
|
|
|
seqhilen = 0;
|
|
|
|
|
|
|
|
if (x->props.flags & XFRM_STATE_ESN) {
|
|
|
|
seqhilen += sizeof(__be32);
|
|
|
|
assoclen += seqhilen;
|
|
|
|
}
|
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
if (!skb_cloned(skb)) {
|
|
|
|
if (!skb_is_nonlinear(skb)) {
|
|
|
|
nfrags = 1;
|
|
|
|
|
|
|
|
goto skip_cow;
|
|
|
|
} else if (!skb_has_frag_list(skb)) {
|
|
|
|
nfrags = skb_shinfo(skb)->nr_frags;
|
|
|
|
nfrags++;
|
|
|
|
|
|
|
|
goto skip_cow;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nfrags = skb_cow_data(skb, 0, &trailer);
|
|
|
|
if (nfrags < 0) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
skip_cow:
|
|
|
|
ret = -ENOMEM;
|
2015-05-27 16:03:47 +08:00
|
|
|
tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
|
2008-01-29 11:35:05 +08:00
|
|
|
if (!tmp)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
ESP_SKB_CB(skb)->tmp = tmp;
|
2020-04-27 23:59:34 +08:00
|
|
|
seqhi = esp_tmp_extra(tmp);
|
2011-03-08 08:07:51 +08:00
|
|
|
iv = esp_tmp_iv(aead, tmp, seqhilen);
|
2008-01-29 11:35:05 +08:00
|
|
|
req = esp_tmp_req(aead, iv);
|
2015-05-27 16:03:47 +08:00
|
|
|
sg = esp_req_sg(aead, req);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
esp_input_set_header(skb, seqhi);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
sg_init_table(sg, nfrags);
|
2017-06-04 10:16:23 +08:00
|
|
|
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
2018-06-27 11:49:28 +08:00
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
kfree(tmp);
|
2017-06-04 10:16:23 +08:00
|
|
|
goto out;
|
2018-06-27 11:49:28 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
2011-03-08 08:07:51 +08:00
|
|
|
|
2017-01-17 17:23:03 +08:00
|
|
|
if ((x->props.flags & XFRM_STATE_ESN))
|
2015-05-27 16:03:47 +08:00
|
|
|
aead_request_set_callback(req, 0, esp_input_done_esn, skb);
|
2017-01-17 17:23:03 +08:00
|
|
|
else
|
|
|
|
aead_request_set_callback(req, 0, esp_input_done, skb);
|
2015-05-27 16:03:47 +08:00
|
|
|
|
|
|
|
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
|
|
|
|
aead_request_set_ad(req, assoclen);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
ret = crypto_aead_decrypt(req);
|
|
|
|
if (ret == -EINPROGRESS)
|
|
|
|
goto out;
|
2007-12-11 08:53:29 +08:00
|
|
|
|
2015-05-27 16:03:47 +08:00
|
|
|
if ((x->props.flags & XFRM_STATE_ESN))
|
|
|
|
esp_input_restore_header(skb);
|
|
|
|
|
2017-04-14 16:06:21 +08:00
|
|
|
ret = esp6_input_done2(skb, ret);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-14 14:28:07 +08:00
|
|
|
static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|
|
|
u8 type, u8 code, int offset, __be32 info)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-11-26 09:59:27 +08:00
|
|
|
struct net *net = dev_net(skb->dev);
|
2011-04-22 12:53:02 +08:00
|
|
|
const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
|
2007-10-11 06:45:25 +08:00
|
|
|
struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct xfrm_state *x;
|
|
|
|
|
2013-09-10 19:43:09 +08:00
|
|
|
if (type != ICMPV6_PKT_TOOBIG &&
|
2012-07-12 15:25:15 +08:00
|
|
|
type != NDISC_REDIRECT)
|
2014-03-14 14:28:07 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-04-22 12:53:02 +08:00
|
|
|
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
|
|
|
|
esph->spi, IPPROTO_ESP, AF_INET6);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!x)
|
2014-03-14 14:28:07 +08:00
|
|
|
return 0;
|
2012-07-12 15:25:15 +08:00
|
|
|
|
|
|
|
if (type == NDISC_REDIRECT)
|
2016-11-04 01:23:43 +08:00
|
|
|
ip6_redirect(skb, net, skb->dev->ifindex, 0,
|
|
|
|
sock_net_uid(net, NULL));
|
2012-07-12 15:25:15 +08:00
|
|
|
else
|
2016-11-04 01:23:43 +08:00
|
|
|
ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
|
2005-04-17 06:20:36 +08:00
|
|
|
xfrm_state_put(x);
|
2014-03-14 14:28:07 +08:00
|
|
|
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void esp6_destroy(struct xfrm_state *x)
|
|
|
|
{
|
2013-10-18 18:09:05 +08:00
|
|
|
struct crypto_aead *aead = x->data;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-10-18 18:09:05 +08:00
|
|
|
if (!aead)
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
|
2013-10-18 18:09:05 +08:00
|
|
|
crypto_free_aead(aead);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2022-09-27 23:45:31 +08:00
|
|
|
static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
|
2008-01-29 11:37:29 +08:00
|
|
|
{
|
2015-05-27 16:03:47 +08:00
|
|
|
char aead_name[CRYPTO_MAX_ALG_NAME];
|
2008-01-29 11:37:29 +08:00
|
|
|
struct crypto_aead *aead;
|
|
|
|
int err;
|
|
|
|
|
2015-05-27 16:03:47 +08:00
|
|
|
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
|
2022-09-27 23:45:31 +08:00
|
|
|
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Algorithm name is too long");
|
|
|
|
return -ENAMETOOLONG;
|
|
|
|
}
|
2015-05-27 16:03:47 +08:00
|
|
|
|
2017-12-20 17:41:53 +08:00
|
|
|
aead = crypto_alloc_aead(aead_name, 0, 0);
|
2008-01-29 11:37:29 +08:00
|
|
|
err = PTR_ERR(aead);
|
|
|
|
if (IS_ERR(aead))
|
|
|
|
goto error;
|
|
|
|
|
2013-10-18 18:09:05 +08:00
|
|
|
x->data = aead;
|
2008-01-29 11:37:29 +08:00
|
|
|
|
|
|
|
err = crypto_aead_setkey(aead, x->aead->alg_key,
|
|
|
|
(x->aead->alg_key_len + 7) / 8);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
|
2022-09-27 23:45:31 +08:00
|
|
|
return 0;
|
|
|
|
|
2008-01-29 11:37:29 +08:00
|
|
|
error:
|
2022-09-27 23:45:31 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
|
2008-01-29 11:37:29 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2022-09-27 23:45:31 +08:00
|
|
|
static int esp_init_authenc(struct xfrm_state *x,
|
|
|
|
struct netlink_ext_ack *extack)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-01-29 11:35:05 +08:00
|
|
|
struct crypto_aead *aead;
|
|
|
|
struct crypto_authenc_key_param *param;
|
|
|
|
struct rtattr *rta;
|
|
|
|
char *key;
|
|
|
|
char *p;
|
|
|
|
char authenc_name[CRYPTO_MAX_ALG_NAME];
|
|
|
|
unsigned int keylen;
|
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 11:37:29 +08:00
|
|
|
err = -ENAMETOOLONG;
|
2011-03-08 08:07:51 +08:00
|
|
|
|
|
|
|
if ((x->props.flags & XFRM_STATE_ESN)) {
|
|
|
|
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
|
2015-05-27 16:03:47 +08:00
|
|
|
"%s%sauthencesn(%s,%s)%s",
|
|
|
|
x->geniv ?: "", x->geniv ? "(" : "",
|
2011-03-08 08:07:51 +08:00
|
|
|
x->aalg ? x->aalg->alg_name : "digest_null",
|
2015-05-27 16:03:47 +08:00
|
|
|
x->ealg->alg_name,
|
2022-09-27 23:45:31 +08:00
|
|
|
x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Algorithm name is too long");
|
2011-03-08 08:07:51 +08:00
|
|
|
goto error;
|
2022-09-27 23:45:31 +08:00
|
|
|
}
|
2011-03-08 08:07:51 +08:00
|
|
|
} else {
|
|
|
|
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
|
2015-05-27 16:03:47 +08:00
|
|
|
"%s%sauthenc(%s,%s)%s",
|
|
|
|
x->geniv ?: "", x->geniv ? "(" : "",
|
2011-03-08 08:07:51 +08:00
|
|
|
x->aalg ? x->aalg->alg_name : "digest_null",
|
2015-05-27 16:03:47 +08:00
|
|
|
x->ealg->alg_name,
|
2022-09-27 23:45:31 +08:00
|
|
|
x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Algorithm name is too long");
|
2011-03-08 08:07:51 +08:00
|
|
|
goto error;
|
2022-09-27 23:45:31 +08:00
|
|
|
}
|
2011-03-08 08:07:51 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2017-12-20 17:41:53 +08:00
|
|
|
aead = crypto_alloc_aead(authenc_name, 0, 0);
|
2008-01-29 11:35:05 +08:00
|
|
|
err = PTR_ERR(aead);
|
2022-09-27 23:45:31 +08:00
|
|
|
if (IS_ERR(aead)) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
|
2008-01-29 11:35:05 +08:00
|
|
|
goto error;
|
2022-09-27 23:45:31 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2013-10-18 18:09:05 +08:00
|
|
|
x->data = aead;
|
2008-01-29 11:35:05 +08:00
|
|
|
|
|
|
|
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
|
|
|
|
(x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
|
|
|
|
err = -ENOMEM;
|
|
|
|
key = kmalloc(keylen, GFP_KERNEL);
|
|
|
|
if (!key)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
p = key;
|
|
|
|
rta = (void *)p;
|
|
|
|
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
|
|
|
|
rta->rta_len = RTA_LENGTH(sizeof(*param));
|
|
|
|
param = RTA_DATA(rta);
|
|
|
|
p += RTA_SPACE(sizeof(*param));
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (x->aalg) {
|
|
|
|
struct xfrm_algo_desc *aalg_desc;
|
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
|
|
|
|
p += (x->aalg->alg_key_len + 7) / 8;
|
2007-02-09 22:24:49 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
|
|
|
|
BUG_ON(!aalg_desc);
|
2007-02-09 22:24:49 +08:00
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
err = -EINVAL;
|
2014-11-06 07:36:08 +08:00
|
|
|
if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
|
2008-01-29 11:35:05 +08:00
|
|
|
crypto_aead_authsize(aead)) {
|
2022-09-27 23:45:31 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
|
2008-01-29 11:35:05 +08:00
|
|
|
goto free_key;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-02-09 22:24:49 +08:00
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
err = crypto_aead_setauthsize(
|
2009-11-25 08:29:53 +08:00
|
|
|
aead, x->aalg->alg_trunc_len / 8);
|
2022-09-27 23:45:31 +08:00
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
|
2008-01-29 11:35:05 +08:00
|
|
|
goto free_key;
|
2022-09-27 23:45:31 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
|
|
|
param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
|
|
|
|
memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
|
|
|
|
|
|
|
|
err = crypto_aead_setkey(aead, key, keylen);
|
|
|
|
|
|
|
|
free_key:
|
|
|
|
kfree(key);
|
|
|
|
|
2008-01-29 11:37:29 +08:00
|
|
|
error:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2022-09-27 23:45:29 +08:00
|
|
|
static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
|
2008-01-29 11:37:29 +08:00
|
|
|
{
|
|
|
|
struct crypto_aead *aead;
|
|
|
|
u32 align;
|
|
|
|
int err;
|
|
|
|
|
2013-10-18 18:09:05 +08:00
|
|
|
x->data = NULL;
|
2008-01-29 11:37:29 +08:00
|
|
|
|
2022-09-27 23:45:31 +08:00
|
|
|
if (x->aead) {
|
|
|
|
err = esp_init_aead(x, extack);
|
|
|
|
} else if (x->ealg) {
|
|
|
|
err = esp_init_authenc(x, extack);
|
|
|
|
} else {
|
|
|
|
NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2008-01-29 11:37:29 +08:00
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
if (err)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto error;
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2013-10-18 18:09:05 +08:00
|
|
|
aead = x->data;
|
2008-01-29 11:37:29 +08:00
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
x->props.header_len = sizeof(struct ip_esp_hdr) +
|
|
|
|
crypto_aead_ivsize(aead);
|
2007-10-18 12:35:15 +08:00
|
|
|
switch (x->props.mode) {
|
|
|
|
case XFRM_MODE_BEET:
|
2008-08-06 17:40:25 +08:00
|
|
|
if (x->sel.family != AF_INET6)
|
|
|
|
x->props.header_len += IPV4_BEET_PHMAXLEN +
|
2014-08-25 04:53:10 +08:00
|
|
|
(sizeof(struct ipv6hdr) - sizeof(struct iphdr));
|
2008-08-06 17:40:25 +08:00
|
|
|
break;
|
2018-01-05 19:12:32 +08:00
|
|
|
default:
|
2007-10-18 12:35:15 +08:00
|
|
|
case XFRM_MODE_TRANSPORT:
|
|
|
|
break;
|
|
|
|
case XFRM_MODE_TUNNEL:
|
2005-04-17 06:20:36 +08:00
|
|
|
x->props.header_len += sizeof(struct ipv6hdr);
|
2007-10-22 17:30:15 +08:00
|
|
|
break;
|
2007-10-18 12:35:15 +08:00
|
|
|
}
|
2008-01-29 11:35:05 +08:00
|
|
|
|
2020-04-27 23:59:34 +08:00
|
|
|
if (x->encap) {
|
|
|
|
struct xfrm_encap_tmpl *encap = x->encap;
|
|
|
|
|
|
|
|
switch (encap->encap_type) {
|
|
|
|
default:
|
2022-09-27 23:45:31 +08:00
|
|
|
NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
|
2020-04-27 23:59:34 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
case UDP_ENCAP_ESPINUDP:
|
|
|
|
x->props.header_len += sizeof(struct udphdr);
|
|
|
|
break;
|
2020-04-27 23:59:35 +08:00
|
|
|
#ifdef CONFIG_INET6_ESPINTCP
|
|
|
|
case TCP_ENCAP_ESPINTCP:
|
|
|
|
/* only the length field, TCP encap is done by
|
|
|
|
* the socket
|
|
|
|
*/
|
|
|
|
x->props.header_len += 2;
|
|
|
|
break;
|
|
|
|
#endif
|
2020-04-27 23:59:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-29 11:35:05 +08:00
|
|
|
align = ALIGN(crypto_aead_blocksize(aead), 4);
|
2013-10-18 18:09:05 +08:00
|
|
|
x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
error:
|
2008-01-29 11:35:05 +08:00
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2014-03-14 14:28:07 +08:00
|
|
|
static int esp6_rcv_cb(struct sk_buff *skb, int err)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-25 04:53:11 +08:00
|
|
|
static const struct xfrm_type esp6_type = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.proto = IPPROTO_ESP,
|
2007-10-09 08:25:53 +08:00
|
|
|
.flags = XFRM_TYPE_REPLAY_PROT,
|
2005-04-17 06:20:36 +08:00
|
|
|
.init_state = esp6_init_state,
|
|
|
|
.destructor = esp6_destroy,
|
|
|
|
.input = esp6_input,
|
2006-08-24 08:57:28 +08:00
|
|
|
.output = esp6_output,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2014-03-14 14:28:07 +08:00
|
|
|
static struct xfrm6_protocol esp6_protocol = {
|
|
|
|
.handler = xfrm6_rcv,
|
2020-04-27 23:59:34 +08:00
|
|
|
.input_handler = xfrm_input,
|
2014-03-14 14:28:07 +08:00
|
|
|
.cb_handler = esp6_rcv_cb,
|
2005-04-17 06:20:36 +08:00
|
|
|
.err_handler = esp6_err,
|
2014-03-14 14:28:07 +08:00
|
|
|
.priority = 0,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init esp6_init(void)
|
|
|
|
{
|
|
|
|
if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
|
2012-05-15 22:11:53 +08:00
|
|
|
pr_info("%s: can't add xfrm type\n", __func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2014-03-14 14:28:07 +08:00
|
|
|
if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
|
2012-05-15 22:11:53 +08:00
|
|
|
pr_info("%s: can't add protocol\n", __func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
xfrm_unregister_type(&esp6_type, AF_INET6);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit esp6_fini(void)
|
|
|
|
{
|
2014-03-14 14:28:07 +08:00
|
|
|
if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
|
2012-05-15 22:11:53 +08:00
|
|
|
pr_info("%s: can't remove protocol\n", __func__);
|
2019-05-03 23:46:19 +08:00
|
|
|
xfrm_unregister_type(&esp6_type, AF_INET6);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(esp6_init);
|
|
|
|
module_exit(esp6_fini);
|
|
|
|
|
2024-02-09 00:42:40 +08:00
|
|
|
MODULE_DESCRIPTION("IPv6 ESP transformation helpers");
|
2005-04-17 06:20:36 +08:00
|
|
|
MODULE_LICENSE("GPL");
|
2007-06-27 14:57:49 +08:00
|
|
|
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
|