mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
19952cc4f8
This patch implements per hash bucket locking for the frag queue hash. This removes two write locks, and the only remaining write lock is for protecting hash rebuild. This essentially reduce the readers-writer lock to a rebuild lock. This patch is part of "net: frag performance followup" http://thread.gmane.org/gmane.linux.network/263644 of which two patches have already been accepted: Same test setup as previous: (http://thread.gmane.org/gmane.linux.network/257155) Two 10G interfaces, on seperate NUMA nodes, are under-test, and uses Ethernet flow-control. A third interface is used for generating the DoS attack (with trafgen). Notice, I have changed the frag DoS generator script to be more efficient/deadly. Before it would only hit one RX queue, now its sending packets causing multi-queue RX, due to "better" RX hashing. Test types summary (netperf UDP_STREAM): Test-20G64K == 2x10G with 65K fragments Test-20G3F == 2x10G with 3x fragments (3*1472 bytes) Test-20G64K+DoS == Same as 20G64K with frag DoS Test-20G3F+DoS == Same as 20G3F with frag DoS Test-20G64K+MQ == Same as 20G64K with Multi-Queue frag DoS Test-20G3F+MQ == Same as 20G3F with Multi-Queue frag DoS When I rebased this-patch(03) (on top of net-next commita210576c
) and removed the _bh spinlock, I saw a performance regression. BUT this was caused by some unrelated change in-between. See tests below. Test (A) is what I reported before for patch-02, accepted in commit1b5ab0de
. Test (B) verifying-retest of commit1b5ab0de
corrospond to patch-02. Test (C) is what I reported before for this-patch Test (D) is net-next master HEAD (commita210576c
), which reveals some (unknown) performance regression (compared against test (B)). Test (D) function as a new base-test. Performance table summary (in Mbit/s): (#) Test-type: 20G64K 20G3F 20G64K+DoS 20G3F+DoS 20G64K+MQ 20G3F+MQ ---------- ------- ------- ---------- --------- -------- ------- (A) Patch-02 : 18848.7 13230.1 4103.04 5310.36 130.0 440.2 (B)1b5ab0de
: 18841.5 13156.8 4101.08 5314.57 129.0 424.2 (C) Patch-03v1: 18838.0 13490.5 4405.11 6814.72 196.6 461.6 (D)a210576c
: 18321.5 11250.4 3635.34 5160.13 119.1 405.2 (E) with _bh : 17247.3 11492.6 3994.74 6405.29 166.7 413.6 (F) without bh: 17471.3 11298.7 3818.05 6102.11 165.7 406.3 Test (E) and (F) is this-patch(03), with(V1) and without(V2) the _bh spinlocks. I cannot explain the slow down for 20G64K (but its an artificial "lab-test" so I'm not worried). But the other results does show improvements. And test (E) "with _bh" version is slightly better. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Acked-by: Eric Dumazet <edumazet@google.com> ---- V2: - By analysis from Hannes Frederic Sowa and Eric Dumazet, we don't need the spinlock _bh versions, as Netfilter currently does a local_bh_disable() before entering inet_fragment. - Fold-in desc from cover-mail V3: - Drop the chain_len counter per hash bucket. Signed-off-by: David S. Miller <davem@davemloft.net>
365 lines
8.7 KiB
C
365 lines
8.7 KiB
C
/*
|
|
* inet fragments management
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* Authors: Pavel Emelyanov <xemul@openvz.org>
|
|
* Started as consolidation of ipv4/ip_fragment.c,
|
|
* ipv6/reassembly. and ipv6 nf conntrack reassembly
|
|
*/
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/module.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/random.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <net/sock.h>
|
|
#include <net/inet_frag.h>
|
|
#include <net/inet_ecn.h>
|
|
|
|
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
|
|
* Value : 0xff if frame should be dropped.
|
|
* 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
|
|
*/
|
|
const u8 ip_frag_ecn_table[16] = {
|
|
/* at least one fragment had CE, and others ECT_0 or ECT_1 */
|
|
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
|
|
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
|
|
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
|
|
|
|
/* invalid combinations : drop frame */
|
|
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
|
|
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
|
|
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
|
|
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
|
|
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
|
|
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
|
|
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
|
|
};
|
|
EXPORT_SYMBOL(ip_frag_ecn_table);
|
|
|
|
static void inet_frag_secret_rebuild(unsigned long dummy)
|
|
{
|
|
struct inet_frags *f = (struct inet_frags *)dummy;
|
|
unsigned long now = jiffies;
|
|
int i;
|
|
|
|
/* Per bucket lock NOT needed here, due to write lock protection */
|
|
write_lock(&f->lock);
|
|
|
|
get_random_bytes(&f->rnd, sizeof(u32));
|
|
for (i = 0; i < INETFRAGS_HASHSZ; i++) {
|
|
struct inet_frag_bucket *hb;
|
|
struct inet_frag_queue *q;
|
|
struct hlist_node *n;
|
|
|
|
hb = &f->hash[i];
|
|
hlist_for_each_entry_safe(q, n, &hb->chain, list) {
|
|
unsigned int hval = f->hashfn(q);
|
|
|
|
if (hval != i) {
|
|
struct inet_frag_bucket *hb_dest;
|
|
|
|
hlist_del(&q->list);
|
|
|
|
/* Relink to new hash chain. */
|
|
hb_dest = &f->hash[hval];
|
|
hlist_add_head(&q->list, &hb_dest->chain);
|
|
}
|
|
}
|
|
}
|
|
write_unlock(&f->lock);
|
|
|
|
mod_timer(&f->secret_timer, now + f->secret_interval);
|
|
}
|
|
|
|
void inet_frags_init(struct inet_frags *f)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < INETFRAGS_HASHSZ; i++) {
|
|
struct inet_frag_bucket *hb = &f->hash[i];
|
|
|
|
spin_lock_init(&hb->chain_lock);
|
|
INIT_HLIST_HEAD(&hb->chain);
|
|
}
|
|
rwlock_init(&f->lock);
|
|
|
|
f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
|
|
(jiffies ^ (jiffies >> 6)));
|
|
|
|
setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
|
|
(unsigned long)f);
|
|
f->secret_timer.expires = jiffies + f->secret_interval;
|
|
add_timer(&f->secret_timer);
|
|
}
|
|
EXPORT_SYMBOL(inet_frags_init);
|
|
|
|
void inet_frags_init_net(struct netns_frags *nf)
|
|
{
|
|
nf->nqueues = 0;
|
|
init_frag_mem_limit(nf);
|
|
INIT_LIST_HEAD(&nf->lru_list);
|
|
spin_lock_init(&nf->lru_lock);
|
|
}
|
|
EXPORT_SYMBOL(inet_frags_init_net);
|
|
|
|
void inet_frags_fini(struct inet_frags *f)
|
|
{
|
|
del_timer(&f->secret_timer);
|
|
}
|
|
EXPORT_SYMBOL(inet_frags_fini);
|
|
|
|
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
|
|
{
|
|
nf->low_thresh = 0;
|
|
|
|
local_bh_disable();
|
|
inet_frag_evictor(nf, f, true);
|
|
local_bh_enable();
|
|
|
|
percpu_counter_destroy(&nf->mem);
|
|
}
|
|
EXPORT_SYMBOL(inet_frags_exit_net);
|
|
|
|
static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
{
|
|
struct inet_frag_bucket *hb;
|
|
unsigned int hash;
|
|
|
|
read_lock(&f->lock);
|
|
hash = f->hashfn(fq);
|
|
hb = &f->hash[hash];
|
|
|
|
spin_lock(&hb->chain_lock);
|
|
hlist_del(&fq->list);
|
|
spin_unlock(&hb->chain_lock);
|
|
|
|
read_unlock(&f->lock);
|
|
inet_frag_lru_del(fq);
|
|
}
|
|
|
|
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
|
|
{
|
|
if (del_timer(&fq->timer))
|
|
atomic_dec(&fq->refcnt);
|
|
|
|
if (!(fq->last_in & INET_FRAG_COMPLETE)) {
|
|
fq_unlink(fq, f);
|
|
atomic_dec(&fq->refcnt);
|
|
fq->last_in |= INET_FRAG_COMPLETE;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_kill);
|
|
|
|
static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
|
|
struct sk_buff *skb)
|
|
{
|
|
if (f->skb_free)
|
|
f->skb_free(skb);
|
|
kfree_skb(skb);
|
|
}
|
|
|
|
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
|
|
int *work)
|
|
{
|
|
struct sk_buff *fp;
|
|
struct netns_frags *nf;
|
|
unsigned int sum, sum_truesize = 0;
|
|
|
|
WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
|
|
WARN_ON(del_timer(&q->timer) != 0);
|
|
|
|
/* Release all fragment data. */
|
|
fp = q->fragments;
|
|
nf = q->net;
|
|
while (fp) {
|
|
struct sk_buff *xp = fp->next;
|
|
|
|
sum_truesize += fp->truesize;
|
|
frag_kfree_skb(nf, f, fp);
|
|
fp = xp;
|
|
}
|
|
sum = sum_truesize + f->qsize;
|
|
if (work)
|
|
*work -= sum;
|
|
sub_frag_mem_limit(q, sum);
|
|
|
|
if (f->destructor)
|
|
f->destructor(q);
|
|
kfree(q);
|
|
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_destroy);
|
|
|
|
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
|
|
{
|
|
struct inet_frag_queue *q;
|
|
int work, evicted = 0;
|
|
|
|
if (!force) {
|
|
if (frag_mem_limit(nf) <= nf->high_thresh)
|
|
return 0;
|
|
}
|
|
|
|
work = frag_mem_limit(nf) - nf->low_thresh;
|
|
while (work > 0) {
|
|
spin_lock(&nf->lru_lock);
|
|
|
|
if (list_empty(&nf->lru_list)) {
|
|
spin_unlock(&nf->lru_lock);
|
|
break;
|
|
}
|
|
|
|
q = list_first_entry(&nf->lru_list,
|
|
struct inet_frag_queue, lru_list);
|
|
atomic_inc(&q->refcnt);
|
|
/* Remove q from list to avoid several CPUs grabbing it */
|
|
list_del_init(&q->lru_list);
|
|
|
|
spin_unlock(&nf->lru_lock);
|
|
|
|
spin_lock(&q->lock);
|
|
if (!(q->last_in & INET_FRAG_COMPLETE))
|
|
inet_frag_kill(q, f);
|
|
spin_unlock(&q->lock);
|
|
|
|
if (atomic_dec_and_test(&q->refcnt))
|
|
inet_frag_destroy(q, f, &work);
|
|
evicted++;
|
|
}
|
|
|
|
return evicted;
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_evictor);
|
|
|
|
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
|
|
struct inet_frag_queue *qp_in, struct inet_frags *f,
|
|
void *arg)
|
|
{
|
|
struct inet_frag_bucket *hb;
|
|
struct inet_frag_queue *qp;
|
|
#ifdef CONFIG_SMP
|
|
#endif
|
|
unsigned int hash;
|
|
|
|
read_lock(&f->lock); /* Protects against hash rebuild */
|
|
/*
|
|
* While we stayed w/o the lock other CPU could update
|
|
* the rnd seed, so we need to re-calculate the hash
|
|
* chain. Fortunatelly the qp_in can be used to get one.
|
|
*/
|
|
hash = f->hashfn(qp_in);
|
|
hb = &f->hash[hash];
|
|
spin_lock(&hb->chain_lock);
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* With SMP race we have to recheck hash table, because
|
|
* such entry could be created on other cpu, while we
|
|
* released the hash bucket lock.
|
|
*/
|
|
hlist_for_each_entry(qp, &hb->chain, list) {
|
|
if (qp->net == nf && f->match(qp, arg)) {
|
|
atomic_inc(&qp->refcnt);
|
|
spin_unlock(&hb->chain_lock);
|
|
read_unlock(&f->lock);
|
|
qp_in->last_in |= INET_FRAG_COMPLETE;
|
|
inet_frag_put(qp_in, f);
|
|
return qp;
|
|
}
|
|
}
|
|
#endif
|
|
qp = qp_in;
|
|
if (!mod_timer(&qp->timer, jiffies + nf->timeout))
|
|
atomic_inc(&qp->refcnt);
|
|
|
|
atomic_inc(&qp->refcnt);
|
|
hlist_add_head(&qp->list, &hb->chain);
|
|
spin_unlock(&hb->chain_lock);
|
|
read_unlock(&f->lock);
|
|
inet_frag_lru_add(nf, qp);
|
|
return qp;
|
|
}
|
|
|
|
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
|
struct inet_frags *f, void *arg)
|
|
{
|
|
struct inet_frag_queue *q;
|
|
|
|
q = kzalloc(f->qsize, GFP_ATOMIC);
|
|
if (q == NULL)
|
|
return NULL;
|
|
|
|
q->net = nf;
|
|
f->constructor(q, arg);
|
|
add_frag_mem_limit(q, f->qsize);
|
|
|
|
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
|
|
spin_lock_init(&q->lock);
|
|
atomic_set(&q->refcnt, 1);
|
|
|
|
return q;
|
|
}
|
|
|
|
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
|
struct inet_frags *f, void *arg)
|
|
{
|
|
struct inet_frag_queue *q;
|
|
|
|
q = inet_frag_alloc(nf, f, arg);
|
|
if (q == NULL)
|
|
return NULL;
|
|
|
|
return inet_frag_intern(nf, q, f, arg);
|
|
}
|
|
|
|
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
|
struct inet_frags *f, void *key, unsigned int hash)
|
|
__releases(&f->lock)
|
|
{
|
|
struct inet_frag_bucket *hb;
|
|
struct inet_frag_queue *q;
|
|
int depth = 0;
|
|
|
|
hb = &f->hash[hash];
|
|
|
|
spin_lock(&hb->chain_lock);
|
|
hlist_for_each_entry(q, &hb->chain, list) {
|
|
if (q->net == nf && f->match(q, key)) {
|
|
atomic_inc(&q->refcnt);
|
|
spin_unlock(&hb->chain_lock);
|
|
read_unlock(&f->lock);
|
|
return q;
|
|
}
|
|
depth++;
|
|
}
|
|
spin_unlock(&hb->chain_lock);
|
|
read_unlock(&f->lock);
|
|
|
|
if (depth <= INETFRAGS_MAXDEPTH)
|
|
return inet_frag_create(nf, f, key);
|
|
else
|
|
return ERR_PTR(-ENOBUFS);
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_find);
|
|
|
|
void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
|
|
const char *prefix)
|
|
{
|
|
static const char msg[] = "inet_frag_find: Fragment hash bucket"
|
|
" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
|
|
". Dropping fragment.\n";
|
|
|
|
if (PTR_ERR(q) == -ENOBUFS)
|
|
LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
|
|
}
|
|
EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
|