mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
Merge branch 'tsnep-xdp-socket-zero-copy-support'
Gerhard Engleder says: ==================== tsnep: XDP socket zero-copy support Implement XDP socket zero-copy support for tsnep driver. I tried to follow existing drivers like igc as far as possible. But one main difference is that tsnep does not need any reconfiguration for XDP BPF program setup. So I decided to keep this behavior no matter if a XSK pool is used or not. As a result, tsnep starts using the XSK pool even if no XDP BPF program is available. Another difference is that I tried to prevent potentially failing allocations during XSK pool setup. E.g. both memory models for page pool and XSK pool are registered all the time. Thus, XSK pool setup cannot end up with not working queues. Some prework is done to reduce the last two XSK commits to actual XSK changes. ==================== Link: https://lore.kernel.org/r/20230421194656.48063-1-gerhard@engleder-embedded.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9610a8dc0a
@ -18,6 +18,7 @@
|
||||
#define TSNEP "tsnep"
|
||||
|
||||
#define TSNEP_RING_SIZE 256
|
||||
#define TSNEP_RING_MASK (TSNEP_RING_SIZE - 1)
|
||||
#define TSNEP_RING_RX_REFILL 16
|
||||
#define TSNEP_RING_RX_REUSE (TSNEP_RING_SIZE - TSNEP_RING_SIZE / 4)
|
||||
#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
|
||||
@ -69,6 +70,7 @@ struct tsnep_tx_entry {
|
||||
union {
|
||||
struct sk_buff *skb;
|
||||
struct xdp_frame *xdpf;
|
||||
bool zc;
|
||||
};
|
||||
size_t len;
|
||||
DEFINE_DMA_UNMAP_ADDR(dma);
|
||||
@ -87,6 +89,7 @@ struct tsnep_tx {
|
||||
int read;
|
||||
u32 owner_counter;
|
||||
int increment_owner_counter;
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
|
||||
u32 packets;
|
||||
u32 bytes;
|
||||
@ -100,7 +103,10 @@ struct tsnep_rx_entry {
|
||||
|
||||
u32 properties;
|
||||
|
||||
union {
|
||||
struct page *page;
|
||||
struct xdp_buff *xdp;
|
||||
};
|
||||
size_t len;
|
||||
dma_addr_t dma;
|
||||
};
|
||||
@ -120,6 +126,9 @@ struct tsnep_rx {
|
||||
u32 owner_counter;
|
||||
int increment_owner_counter;
|
||||
struct page_pool *page_pool;
|
||||
struct page **page_buffer;
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
struct xdp_buff **xdp_batch;
|
||||
|
||||
u32 packets;
|
||||
u32 bytes;
|
||||
@ -128,6 +137,7 @@ struct tsnep_rx {
|
||||
u32 alloc_failed;
|
||||
|
||||
struct xdp_rxq_info xdp_rxq;
|
||||
struct xdp_rxq_info xdp_rxq_zc;
|
||||
};
|
||||
|
||||
struct tsnep_queue {
|
||||
@ -213,6 +223,8 @@ int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
|
||||
|
||||
int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
|
||||
struct netlink_ext_ack *extack);
|
||||
int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
|
||||
struct xsk_buff_pool *pool, u16 queue_id);
|
||||
|
||||
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
|
||||
int tsnep_ethtool_get_test_count(void);
|
||||
@ -241,5 +253,7 @@ static inline void tsnep_ethtool_self_test(struct net_device *dev,
|
||||
void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
|
||||
int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs);
|
||||
u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue);
|
||||
int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool);
|
||||
void tsnep_disable_xsk(struct tsnep_queue *queue);
|
||||
|
||||
#endif /* _TSNEP_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -17,3 +17,69 @@ int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsnep_xdp_enable_pool(struct tsnep_adapter *adapter,
|
||||
struct xsk_buff_pool *pool, u16 queue_id)
|
||||
{
|
||||
struct tsnep_queue *queue;
|
||||
int retval;
|
||||
|
||||
if (queue_id >= adapter->num_rx_queues ||
|
||||
queue_id >= adapter->num_tx_queues)
|
||||
return -EINVAL;
|
||||
|
||||
queue = &adapter->queue[queue_id];
|
||||
if (queue->rx->queue_index != queue_id ||
|
||||
queue->tx->queue_index != queue_id) {
|
||||
netdev_err(adapter->netdev,
|
||||
"XSK support only for TX/RX queue pairs\n");
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
retval = xsk_pool_dma_map(pool, adapter->dmadev,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (retval) {
|
||||
netdev_err(adapter->netdev, "failed to map XSK pool\n");
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
retval = tsnep_enable_xsk(queue, pool);
|
||||
if (retval) {
|
||||
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsnep_xdp_disable_pool(struct tsnep_adapter *adapter, u16 queue_id)
|
||||
{
|
||||
struct xsk_buff_pool *pool;
|
||||
struct tsnep_queue *queue;
|
||||
|
||||
if (queue_id >= adapter->num_rx_queues ||
|
||||
queue_id >= adapter->num_tx_queues)
|
||||
return -EINVAL;
|
||||
|
||||
pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
|
||||
if (!pool)
|
||||
return -EINVAL;
|
||||
|
||||
queue = &adapter->queue[queue_id];
|
||||
|
||||
tsnep_disable_xsk(queue);
|
||||
|
||||
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
|
||||
struct xsk_buff_pool *pool, u16 queue_id)
|
||||
{
|
||||
return pool ? tsnep_xdp_enable_pool(adapter, pool, queue_id) :
|
||||
tsnep_xdp_disable_pool(adapter, queue_id);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user