mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
039930945a
Changing API xdp_return_frame() to take struct xdp_frame as argument, seems like a natural choice. But there are some subtle performance details here that needs extra care, which is a deliberate choice. When de-referencing xdp_frame on a remote CPU during DMA-TX completion, result in the cache-line is change to "Shared" state. Later when the page is reused for RX, then this xdp_frame cache-line is written, which change the state to "Modified". This situation already happens (naturally) for, virtio_net, tun and cpumap as the xdp_frame pointer is the queued object. In tun and cpumap, the ptr_ring is used for efficiently transferring cache-lines (with pointers) between CPUs. Thus, the only option is to de-referencing xdp_frame. It is only the ixgbe driver that had an optimization, in which it can avoid doing the de-reference of xdp_frame. The driver already have TX-ring queue, which (in case of remote DMA-TX completion) have to be transferred between CPUs anyhow. In this data area, we stored a struct xdp_mem_info and a data pointer, which allowed us to avoid de-referencing xdp_frame. To compensate for this, a prefetchw is used for telling the cache coherency protocol about our access pattern. My benchmarks show that this prefetchw is enough to compensate the ixgbe driver. V7: Adjust for commitd9314c474d
("i40e: add support for XDP_REDIRECT") V8: Adjust for commitbd658dda42
("net/mlx5e: Separate dma base address and offset in dma_sync call") Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
343 lines
7.4 KiB
C
343 lines
7.4 KiB
C
/* net/core/xdp.c
|
|
*
|
|
* Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
|
|
* Released under terms in GPL version 2. See COPYING.
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/idr.h>
|
|
#include <linux/rhashtable.h>
|
|
#include <net/page_pool.h>
|
|
|
|
#include <net/xdp.h>
|
|
|
|
#define REG_STATE_NEW 0x0
|
|
#define REG_STATE_REGISTERED 0x1
|
|
#define REG_STATE_UNREGISTERED 0x2
|
|
#define REG_STATE_UNUSED 0x3
|
|
|
|
static DEFINE_IDA(mem_id_pool);
|
|
static DEFINE_MUTEX(mem_id_lock);
|
|
#define MEM_ID_MAX 0xFFFE
|
|
#define MEM_ID_MIN 1
|
|
static int mem_id_next = MEM_ID_MIN;
|
|
|
|
static bool mem_id_init; /* false */
|
|
static struct rhashtable *mem_id_ht;
|
|
|
|
struct xdp_mem_allocator {
|
|
struct xdp_mem_info mem;
|
|
union {
|
|
void *allocator;
|
|
struct page_pool *page_pool;
|
|
};
|
|
struct rhash_head node;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
|
|
{
|
|
const u32 *k = data;
|
|
const u32 key = *k;
|
|
|
|
BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
|
|
!= sizeof(u32));
|
|
|
|
/* Use cyclic increasing ID as direct hash key, see rht_bucket_index */
|
|
return key << RHT_HASH_RESERVED_SPACE;
|
|
}
|
|
|
|
static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
|
|
const void *ptr)
|
|
{
|
|
const struct xdp_mem_allocator *xa = ptr;
|
|
u32 mem_id = *(u32 *)arg->key;
|
|
|
|
return xa->mem.id != mem_id;
|
|
}
|
|
|
|
static const struct rhashtable_params mem_id_rht_params = {
|
|
.nelem_hint = 64,
|
|
.head_offset = offsetof(struct xdp_mem_allocator, node),
|
|
.key_offset = offsetof(struct xdp_mem_allocator, mem.id),
|
|
.key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id),
|
|
.max_size = MEM_ID_MAX,
|
|
.min_size = 8,
|
|
.automatic_shrinking = true,
|
|
.hashfn = xdp_mem_id_hashfn,
|
|
.obj_cmpfn = xdp_mem_id_cmp,
|
|
};
|
|
|
|
static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
|
|
{
|
|
struct xdp_mem_allocator *xa;
|
|
|
|
xa = container_of(rcu, struct xdp_mem_allocator, rcu);
|
|
|
|
/* Allow this ID to be reused */
|
|
ida_simple_remove(&mem_id_pool, xa->mem.id);
|
|
|
|
/* Notice, driver is expected to free the *allocator,
|
|
* e.g. page_pool, and MUST also use RCU free.
|
|
*/
|
|
|
|
/* Poison memory */
|
|
xa->mem.id = 0xFFFF;
|
|
xa->mem.type = 0xF0F0;
|
|
xa->allocator = (void *)0xDEAD9001;
|
|
|
|
kfree(xa);
|
|
}
|
|
|
|
static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
|
|
{
|
|
struct xdp_mem_allocator *xa;
|
|
int id = xdp_rxq->mem.id;
|
|
int err;
|
|
|
|
if (id == 0)
|
|
return;
|
|
|
|
mutex_lock(&mem_id_lock);
|
|
|
|
xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
|
|
if (!xa) {
|
|
mutex_unlock(&mem_id_lock);
|
|
return;
|
|
}
|
|
|
|
err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params);
|
|
WARN_ON(err);
|
|
|
|
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
|
|
|
|
mutex_unlock(&mem_id_lock);
|
|
}
|
|
|
|
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
|
|
{
|
|
/* Simplify driver cleanup code paths, allow unreg "unused" */
|
|
if (xdp_rxq->reg_state == REG_STATE_UNUSED)
|
|
return;
|
|
|
|
WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
|
|
|
|
__xdp_rxq_info_unreg_mem_model(xdp_rxq);
|
|
|
|
xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
|
|
xdp_rxq->dev = NULL;
|
|
|
|
/* Reset mem info to defaults */
|
|
xdp_rxq->mem.id = 0;
|
|
xdp_rxq->mem.type = 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
|
|
|
|
static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
|
|
{
|
|
memset(xdp_rxq, 0, sizeof(*xdp_rxq));
|
|
}
|
|
|
|
/* Returns 0 on success, negative on failure */
|
|
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
|
|
struct net_device *dev, u32 queue_index)
|
|
{
|
|
if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
|
|
WARN(1, "Driver promised not to register this");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
|
|
WARN(1, "Missing unregister, handled but fix driver");
|
|
xdp_rxq_info_unreg(xdp_rxq);
|
|
}
|
|
|
|
if (!dev) {
|
|
WARN(1, "Missing net_device from driver");
|
|
return -ENODEV;
|
|
}
|
|
|
|
/* State either UNREGISTERED or NEW */
|
|
xdp_rxq_info_init(xdp_rxq);
|
|
xdp_rxq->dev = dev;
|
|
xdp_rxq->queue_index = queue_index;
|
|
|
|
xdp_rxq->reg_state = REG_STATE_REGISTERED;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
|
|
|
|
void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
|
|
{
|
|
xdp_rxq->reg_state = REG_STATE_UNUSED;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
|
|
|
|
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
|
|
{
|
|
return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
|
|
}
|
|
EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
|
|
|
|
static int __mem_id_init_hash_table(void)
|
|
{
|
|
struct rhashtable *rht;
|
|
int ret;
|
|
|
|
if (unlikely(mem_id_init))
|
|
return 0;
|
|
|
|
rht = kzalloc(sizeof(*rht), GFP_KERNEL);
|
|
if (!rht)
|
|
return -ENOMEM;
|
|
|
|
ret = rhashtable_init(rht, &mem_id_rht_params);
|
|
if (ret < 0) {
|
|
kfree(rht);
|
|
return ret;
|
|
}
|
|
mem_id_ht = rht;
|
|
smp_mb(); /* mutex lock should provide enough pairing */
|
|
mem_id_init = true;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Allocate a cyclic ID that maps to allocator pointer.
|
|
* See: https://www.kernel.org/doc/html/latest/core-api/idr.html
|
|
*
|
|
* Caller must lock mem_id_lock.
|
|
*/
|
|
static int __mem_id_cyclic_get(gfp_t gfp)
|
|
{
|
|
int retries = 1;
|
|
int id;
|
|
|
|
again:
|
|
id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
|
|
if (id < 0) {
|
|
if (id == -ENOSPC) {
|
|
/* Cyclic allocator, reset next id */
|
|
if (retries--) {
|
|
mem_id_next = MEM_ID_MIN;
|
|
goto again;
|
|
}
|
|
}
|
|
return id; /* errno */
|
|
}
|
|
mem_id_next = id + 1;
|
|
|
|
return id;
|
|
}
|
|
|
|
static bool __is_supported_mem_type(enum xdp_mem_type type)
|
|
{
|
|
if (type == MEM_TYPE_PAGE_POOL)
|
|
return is_page_pool_compiled_in();
|
|
|
|
if (type >= MEM_TYPE_MAX)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
|
enum xdp_mem_type type, void *allocator)
|
|
{
|
|
struct xdp_mem_allocator *xdp_alloc;
|
|
gfp_t gfp = GFP_KERNEL;
|
|
int id, errno, ret;
|
|
void *ptr;
|
|
|
|
if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
|
|
WARN(1, "Missing register, driver bug");
|
|
return -EFAULT;
|
|
}
|
|
|
|
if (!__is_supported_mem_type(type))
|
|
return -EOPNOTSUPP;
|
|
|
|
xdp_rxq->mem.type = type;
|
|
|
|
if (!allocator) {
|
|
if (type == MEM_TYPE_PAGE_POOL)
|
|
return -EINVAL; /* Setup time check page_pool req */
|
|
return 0;
|
|
}
|
|
|
|
/* Delay init of rhashtable to save memory if feature isn't used */
|
|
if (!mem_id_init) {
|
|
mutex_lock(&mem_id_lock);
|
|
ret = __mem_id_init_hash_table();
|
|
mutex_unlock(&mem_id_lock);
|
|
if (ret < 0) {
|
|
WARN_ON(1);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
|
|
if (!xdp_alloc)
|
|
return -ENOMEM;
|
|
|
|
mutex_lock(&mem_id_lock);
|
|
id = __mem_id_cyclic_get(gfp);
|
|
if (id < 0) {
|
|
errno = id;
|
|
goto err;
|
|
}
|
|
xdp_rxq->mem.id = id;
|
|
xdp_alloc->mem = xdp_rxq->mem;
|
|
xdp_alloc->allocator = allocator;
|
|
|
|
/* Insert allocator into ID lookup table */
|
|
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
|
|
if (IS_ERR(ptr)) {
|
|
errno = PTR_ERR(ptr);
|
|
goto err;
|
|
}
|
|
|
|
mutex_unlock(&mem_id_lock);
|
|
|
|
return 0;
|
|
err:
|
|
mutex_unlock(&mem_id_lock);
|
|
kfree(xdp_alloc);
|
|
return errno;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
|
|
|
|
void xdp_return_frame(struct xdp_frame *xdpf)
|
|
{
|
|
struct xdp_mem_info *mem = &xdpf->mem;
|
|
struct xdp_mem_allocator *xa;
|
|
void *data = xdpf->data;
|
|
struct page *page;
|
|
|
|
switch (mem->type) {
|
|
case MEM_TYPE_PAGE_POOL:
|
|
rcu_read_lock();
|
|
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
|
|
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
|
|
page = virt_to_head_page(data);
|
|
if (xa)
|
|
page_pool_put_page(xa->page_pool, page);
|
|
else
|
|
put_page(page);
|
|
rcu_read_unlock();
|
|
break;
|
|
case MEM_TYPE_PAGE_SHARED:
|
|
page_frag_free(data);
|
|
break;
|
|
case MEM_TYPE_PAGE_ORDER0:
|
|
page = virt_to_page(data); /* Assumes order0 page*/
|
|
put_page(page);
|
|
break;
|
|
default:
|
|
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
|
|
break;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(xdp_return_frame);
|