mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-28 04:24:34 +08:00
e7096c131e
WireGuard is a layer 3 secure networking tunnel made specifically for the kernel, that aims to be much simpler and easier to audit than IPsec. Extensive documentation and description of the protocol and considerations, along with formal proofs of the cryptography, are available at: * https://www.wireguard.com/ * https://www.wireguard.com/papers/wireguard.pdf This commit implements WireGuard as a simple network device driver, accessible in the usual RTNL way used by virtual network drivers. It makes use of the udp_tunnel APIs, GRO, GSO, NAPI, and the usual set of networking subsystem APIs. It has a somewhat novel multicore queueing system designed for maximum throughput and minimal latency of encryption operations, but it is implemented modestly using workqueues and NAPI. Configuration is done via generic Netlink, and following a review from the Netlink maintainer a year ago, several high profile userspace tools have already implemented the API. This commit also comes with several different tests, both in-kernel tests and out-of-kernel tests based on network namespaces, taking profit of the fact that sockets used by WireGuard intentionally stay in the namespace the WireGuard interface was originally created, exactly like the semantics of userspace tun devices. See wireguard.com/netns/ for pictures and examples. The source code is fairly short, but rather than combining everything into a single file, WireGuard is developed as cleanly separable files, making auditing and comprehension easier. Things are laid out as follows: * noise.[ch], cookie.[ch], messages.h: These implement the bulk of the cryptographic aspects of the protocol, and are mostly data-only in nature, taking in buffers of bytes and spitting out buffers of bytes. They also handle reference counting for their various shared pieces of data, like keys and key lists. * ratelimiter.[ch]: Used as an integral part of cookie.[ch] for ratelimiting certain types of cryptographic operations in accordance with particular WireGuard semantics. * allowedips.[ch], peerlookup.[ch]: The main lookup structures of WireGuard, the former being trie-like with particular semantics, an integral part of the design of the protocol, and the latter just being nice helper functions around the various hashtables we use. * device.[ch]: Implementation of functions for the netdevice and for rtnl, responsible for maintaining the life of a given interface and wiring it up to the rest of WireGuard. * peer.[ch]: Each interface has a list of peers, with helper functions available here for creation, destruction, and reference counting. * socket.[ch]: Implementation of functions related to udp_socket and the general set of kernel socket APIs, for sending and receiving ciphertext UDP packets, and taking care of WireGuard-specific sticky socket routing semantics for the automatic roaming. * netlink.[ch]: Userspace API entry point for configuring WireGuard peers and devices. The API has been implemented by several userspace tools and network management utility, and the WireGuard project distributes the basic wg(8) tool. * queueing.[ch]: Shared function on the rx and tx path for handling the various queues used in the multicore algorithms. * send.c: Handles encrypting outgoing packets in parallel on multiple cores, before sending them in order on a single core, via workqueues and ring buffers. Also handles sending handshake and cookie messages as part of the protocol, in parallel. * receive.c: Handles decrypting incoming packets in parallel on multiple cores, before passing them off in order to be ingested via the rest of the networking subsystem with GRO via the typical NAPI poll function. Also handles receiving handshake and cookie messages as part of the protocol, in parallel. * timers.[ch]: Uses the timer wheel to implement protocol particular event timeouts, and gives a set of very simple event-driven entry point functions for callers. * main.c, version.h: Initialization and deinitialization of the module. * selftest/*.h: Runtime unit tests for some of the most security sensitive functions. * tools/testing/selftests/wireguard/netns.sh: Aforementioned testing script using network namespaces. This commit aims to be as self-contained as possible, implementing WireGuard as a standalone module not needing much special handling or coordination from the network subsystem. I expect for future optimizations to the network stack to positively improve WireGuard, and vice-versa, but for the time being, this exists as intentionally standalone. We introduce a menu option for CONFIG_WIREGUARD, as well as providing a verbose debug log and self-tests via CONFIG_WIREGUARD_DEBUG. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Cc: David Miller <davem@davemloft.net> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
224 lines
5.9 KiB
C
224 lines
5.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
|
*/
|
|
|
|
#include "ratelimiter.h"
|
|
#include <linux/siphash.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <net/ip.h>
|
|
|
|
static struct kmem_cache *entry_cache;
|
|
static hsiphash_key_t key;
|
|
static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock");
|
|
static DEFINE_MUTEX(init_lock);
|
|
static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */
|
|
static atomic_t total_entries = ATOMIC_INIT(0);
|
|
static unsigned int max_entries, table_size;
|
|
static void wg_ratelimiter_gc_entries(struct work_struct *);
|
|
static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries);
|
|
static struct hlist_head *table_v4;
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
static struct hlist_head *table_v6;
|
|
#endif
|
|
|
|
struct ratelimiter_entry {
|
|
u64 last_time_ns, tokens, ip;
|
|
void *net;
|
|
spinlock_t lock;
|
|
struct hlist_node hash;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
enum {
|
|
PACKETS_PER_SECOND = 20,
|
|
PACKETS_BURSTABLE = 5,
|
|
PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND,
|
|
TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE
|
|
};
|
|
|
|
static void entry_free(struct rcu_head *rcu)
|
|
{
|
|
kmem_cache_free(entry_cache,
|
|
container_of(rcu, struct ratelimiter_entry, rcu));
|
|
atomic_dec(&total_entries);
|
|
}
|
|
|
|
static void entry_uninit(struct ratelimiter_entry *entry)
|
|
{
|
|
hlist_del_rcu(&entry->hash);
|
|
call_rcu(&entry->rcu, entry_free);
|
|
}
|
|
|
|
/* Calling this function with a NULL work uninits all entries. */
|
|
static void wg_ratelimiter_gc_entries(struct work_struct *work)
|
|
{
|
|
const u64 now = ktime_get_coarse_boottime_ns();
|
|
struct ratelimiter_entry *entry;
|
|
struct hlist_node *temp;
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < table_size; ++i) {
|
|
spin_lock(&table_lock);
|
|
hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) {
|
|
if (unlikely(!work) ||
|
|
now - entry->last_time_ns > NSEC_PER_SEC)
|
|
entry_uninit(entry);
|
|
}
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) {
|
|
if (unlikely(!work) ||
|
|
now - entry->last_time_ns > NSEC_PER_SEC)
|
|
entry_uninit(entry);
|
|
}
|
|
#endif
|
|
spin_unlock(&table_lock);
|
|
if (likely(work))
|
|
cond_resched();
|
|
}
|
|
if (likely(work))
|
|
queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
|
|
}
|
|
|
|
bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
|
|
{
|
|
/* We only take the bottom half of the net pointer, so that we can hash
|
|
* 3 words in the end. This way, siphash's len param fits into the final
|
|
* u32, and we don't incur an extra round.
|
|
*/
|
|
const u32 net_word = (unsigned long)net;
|
|
struct ratelimiter_entry *entry;
|
|
struct hlist_head *bucket;
|
|
u64 ip;
|
|
|
|
if (skb->protocol == htons(ETH_P_IP)) {
|
|
ip = (u64 __force)ip_hdr(skb)->saddr;
|
|
bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) &
|
|
(table_size - 1)];
|
|
}
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
else if (skb->protocol == htons(ETH_P_IPV6)) {
|
|
/* Only use 64 bits, so as to ratelimit the whole /64. */
|
|
memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip));
|
|
bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) &
|
|
(table_size - 1)];
|
|
}
|
|
#endif
|
|
else
|
|
return false;
|
|
rcu_read_lock();
|
|
hlist_for_each_entry_rcu(entry, bucket, hash) {
|
|
if (entry->net == net && entry->ip == ip) {
|
|
u64 now, tokens;
|
|
bool ret;
|
|
/* Quasi-inspired by nft_limit.c, but this is actually a
|
|
* slightly different algorithm. Namely, we incorporate
|
|
* the burst as part of the maximum tokens, rather than
|
|
* as part of the rate.
|
|
*/
|
|
spin_lock(&entry->lock);
|
|
now = ktime_get_coarse_boottime_ns();
|
|
tokens = min_t(u64, TOKEN_MAX,
|
|
entry->tokens + now -
|
|
entry->last_time_ns);
|
|
entry->last_time_ns = now;
|
|
ret = tokens >= PACKET_COST;
|
|
entry->tokens = ret ? tokens - PACKET_COST : tokens;
|
|
spin_unlock(&entry->lock);
|
|
rcu_read_unlock();
|
|
return ret;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
if (atomic_inc_return(&total_entries) > max_entries)
|
|
goto err_oom;
|
|
|
|
entry = kmem_cache_alloc(entry_cache, GFP_KERNEL);
|
|
if (unlikely(!entry))
|
|
goto err_oom;
|
|
|
|
entry->net = net;
|
|
entry->ip = ip;
|
|
INIT_HLIST_NODE(&entry->hash);
|
|
spin_lock_init(&entry->lock);
|
|
entry->last_time_ns = ktime_get_coarse_boottime_ns();
|
|
entry->tokens = TOKEN_MAX - PACKET_COST;
|
|
spin_lock(&table_lock);
|
|
hlist_add_head_rcu(&entry->hash, bucket);
|
|
spin_unlock(&table_lock);
|
|
return true;
|
|
|
|
err_oom:
|
|
atomic_dec(&total_entries);
|
|
return false;
|
|
}
|
|
|
|
int wg_ratelimiter_init(void)
|
|
{
|
|
mutex_lock(&init_lock);
|
|
if (++init_refcnt != 1)
|
|
goto out;
|
|
|
|
entry_cache = KMEM_CACHE(ratelimiter_entry, 0);
|
|
if (!entry_cache)
|
|
goto err;
|
|
|
|
/* xt_hashlimit.c uses a slightly different algorithm for ratelimiting,
|
|
* but what it shares in common is that it uses a massive hashtable. So,
|
|
* we borrow their wisdom about good table sizes on different systems
|
|
* dependent on RAM. This calculation here comes from there.
|
|
*/
|
|
table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 :
|
|
max_t(unsigned long, 16, roundup_pow_of_two(
|
|
(totalram_pages() << PAGE_SHIFT) /
|
|
(1U << 14) / sizeof(struct hlist_head)));
|
|
max_entries = table_size * 8;
|
|
|
|
table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
|
|
if (unlikely(!table_v4))
|
|
goto err_kmemcache;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
|
|
if (unlikely(!table_v6)) {
|
|
kvfree(table_v4);
|
|
goto err_kmemcache;
|
|
}
|
|
#endif
|
|
|
|
queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
|
|
get_random_bytes(&key, sizeof(key));
|
|
out:
|
|
mutex_unlock(&init_lock);
|
|
return 0;
|
|
|
|
err_kmemcache:
|
|
kmem_cache_destroy(entry_cache);
|
|
err:
|
|
--init_refcnt;
|
|
mutex_unlock(&init_lock);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
void wg_ratelimiter_uninit(void)
|
|
{
|
|
mutex_lock(&init_lock);
|
|
if (!init_refcnt || --init_refcnt)
|
|
goto out;
|
|
|
|
cancel_delayed_work_sync(&gc_work);
|
|
wg_ratelimiter_gc_entries(NULL);
|
|
rcu_barrier();
|
|
kvfree(table_v4);
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
kvfree(table_v6);
|
|
#endif
|
|
kmem_cache_destroy(entry_cache);
|
|
out:
|
|
mutex_unlock(&init_lock);
|
|
}
|
|
|
|
#include "selftest/ratelimiter.c"
|