linux/drivers/net/ethernet/intel/ice/ice_xsk.h
Magnus Karlsson 1742b3d528 xsk: i40e: ice: ixgbe: mlx5: Pass buffer pool to driver instead of umem
Replace the explicit umem reference passed to the driver in AF_XDP
zero-copy mode with the buffer pool instead. This in preparation for
extending the functionality of the zero-copy mode so that umems can be
shared between queues on the same netdev and also between netdevs. In
this commit, only an umem reference has been added to the buffer pool
struct. But later commits will add other entities to it. These are
going to be entities that are different between different queue ids
and netdevs even though the umem is shared between them.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-2-git-send-email-magnus.karlsson@intel.com
2020-08-31 21:15:03 +02:00

67 lines
1.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_XSK_H_
#define _ICE_XSK_H_
#include "ice_txrx.h"
#include "ice.h"
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
#else
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
struct xsk_buff_pool __always_unused *pool,
u16 __always_unused qid)
{
return -EOPNOTSUPP;
}
static inline int
ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
int __always_unused budget)
{
return 0;
}
static inline bool
ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
int __always_unused budget)
{
return false;
}
static inline bool
ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
u16 __always_unused count)
{
return false;
}
static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
{
return false;
}
static inline int
ice_xsk_wakeup(struct net_device __always_unused *netdev,
u32 __always_unused queue_id, u32 __always_unused flags)
{
return -EOPNOTSUPP;
}
#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */