mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 06:55:13 +08:00
nfp: add control vNIC datapath
Since control vNICs don't have a netdev, they can't use napi and queuing stack provides. Add simple tasklet-based data receive and send of control messages with queuing on a skb_list. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5c0dbe9ecf
commit
77ece8d5f1
@ -38,6 +38,7 @@ struct bpf_prog;
|
||||
struct net_device;
|
||||
struct pci_dev;
|
||||
struct tc_to_netdev;
|
||||
struct sk_buff;
|
||||
struct nfp_app;
|
||||
struct nfp_cpp;
|
||||
struct nfp_pf;
|
||||
@ -55,6 +56,7 @@ extern const struct nfp_app_type app_bpf;
|
||||
* struct nfp_app_type - application definition
|
||||
* @id: application ID
|
||||
* @name: application name
|
||||
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
|
||||
*
|
||||
* Callbacks
|
||||
* @init: perform basic app checks
|
||||
@ -69,6 +71,8 @@ struct nfp_app_type {
|
||||
enum nfp_app_id id;
|
||||
const char *name;
|
||||
|
||||
bool ctrl_has_meta;
|
||||
|
||||
int (*init)(struct nfp_app *app);
|
||||
|
||||
const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn);
|
||||
@ -99,6 +103,8 @@ struct nfp_app {
|
||||
const struct nfp_app_type *type;
|
||||
};
|
||||
|
||||
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
|
||||
|
||||
static inline int nfp_app_init(struct nfp_app *app)
|
||||
{
|
||||
if (!app->type->init)
|
||||
@ -125,6 +131,11 @@ static inline const char *nfp_app_name(struct nfp_app *app)
|
||||
return app->type->name;
|
||||
}
|
||||
|
||||
static inline bool nfp_app_ctrl_has_meta(struct nfp_app *app)
|
||||
{
|
||||
return app->type->ctrl_has_meta;
|
||||
}
|
||||
|
||||
static inline const char *nfp_app_extra_cap(struct nfp_app *app,
|
||||
struct nfp_net *nn)
|
||||
{
|
||||
|
@ -405,7 +405,14 @@ struct nfp_net_rx_ring {
|
||||
*/
|
||||
struct nfp_net_r_vector {
|
||||
struct nfp_net *nfp_net;
|
||||
struct napi_struct napi;
|
||||
union {
|
||||
struct napi_struct napi;
|
||||
struct {
|
||||
struct tasklet_struct tasklet;
|
||||
struct sk_buff_head queue;
|
||||
struct spinlock lock;
|
||||
};
|
||||
};
|
||||
|
||||
struct nfp_net_tx_ring *tx_ring;
|
||||
struct nfp_net_rx_ring *rx_ring;
|
||||
@ -816,6 +823,11 @@ static inline bool nfp_net_running(struct nfp_net *nn)
|
||||
return nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE;
|
||||
}
|
||||
|
||||
static inline const char *nfp_net_name(struct nfp_net *nn)
|
||||
{
|
||||
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
|
||||
}
|
||||
|
||||
/* Globals */
|
||||
extern const char nfp_driver_version[];
|
||||
|
||||
@ -838,6 +850,9 @@ void nfp_net_free(struct nfp_net *nn);
|
||||
int nfp_net_init(struct nfp_net *nn);
|
||||
void nfp_net_clean(struct nfp_net *nn);
|
||||
|
||||
int nfp_ctrl_open(struct nfp_net *nn);
|
||||
void nfp_ctrl_close(struct nfp_net *nn);
|
||||
|
||||
void nfp_net_set_ethtool_ops(struct net_device *netdev);
|
||||
void nfp_net_info(struct nfp_net *nn);
|
||||
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
|
||||
|
@ -392,6 +392,15 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
|
||||
{
|
||||
struct nfp_net_r_vector *r_vec = data;
|
||||
|
||||
tasklet_schedule(&r_vec->tasklet);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_net_read_link_status() - Reread link status from control BAR
|
||||
* @nn: NFP Network structure
|
||||
@ -523,7 +532,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
|
||||
|
||||
entry = &nn->irq_entries[vector_idx];
|
||||
|
||||
snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
|
||||
snprintf(name, name_sz, format, nfp_net_name(nn));
|
||||
err = request_irq(entry->vector, handler, 0, name, nn);
|
||||
if (err) {
|
||||
nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
|
||||
@ -943,6 +952,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
|
||||
r_vec->tx_pkts += done_pkts;
|
||||
u64_stats_update_end(&r_vec->tx_sync);
|
||||
|
||||
if (!dp->netdev)
|
||||
return;
|
||||
|
||||
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
|
||||
netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
|
||||
if (nfp_net_tx_ring_should_wake(tx_ring)) {
|
||||
@ -1052,7 +1064,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
|
||||
tx_ring->qcp_rd_p = 0;
|
||||
tx_ring->wr_ptr_add = 0;
|
||||
|
||||
if (tx_ring->is_xdp)
|
||||
if (tx_ring->is_xdp || !dp->netdev)
|
||||
return;
|
||||
|
||||
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
|
||||
@ -1742,6 +1754,231 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
|
||||
return pkts_polled;
|
||||
}
|
||||
|
||||
/* Control device data path
|
||||
*/
|
||||
|
||||
static bool
|
||||
nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
||||
struct sk_buff *skb, bool old)
|
||||
{
|
||||
unsigned int real_len = skb->len, meta_len = 0;
|
||||
struct nfp_net_tx_ring *tx_ring;
|
||||
struct nfp_net_tx_buf *txbuf;
|
||||
struct nfp_net_tx_desc *txd;
|
||||
struct nfp_net_dp *dp;
|
||||
dma_addr_t dma_addr;
|
||||
int wr_idx;
|
||||
|
||||
dp = &r_vec->nfp_net->dp;
|
||||
tx_ring = r_vec->tx_ring;
|
||||
|
||||
if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
|
||||
nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
|
||||
u64_stats_update_begin(&r_vec->tx_sync);
|
||||
r_vec->tx_busy++;
|
||||
u64_stats_update_end(&r_vec->tx_sync);
|
||||
if (!old)
|
||||
__skb_queue_tail(&r_vec->queue, skb);
|
||||
else
|
||||
__skb_queue_head(&r_vec->queue, skb);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (nfp_app_ctrl_has_meta(nn->app)) {
|
||||
if (unlikely(skb_headroom(skb) < 8)) {
|
||||
nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
|
||||
goto err_free;
|
||||
}
|
||||
meta_len = 8;
|
||||
put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
|
||||
put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
|
||||
}
|
||||
|
||||
/* Start with the head skbuf */
|
||||
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dp->dev, dma_addr))
|
||||
goto err_dma_warn;
|
||||
|
||||
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
|
||||
|
||||
/* Stash the soft descriptor of the head then initialize it */
|
||||
txbuf = &tx_ring->txbufs[wr_idx];
|
||||
txbuf->skb = skb;
|
||||
txbuf->dma_addr = dma_addr;
|
||||
txbuf->fidx = -1;
|
||||
txbuf->pkt_cnt = 1;
|
||||
txbuf->real_len = real_len;
|
||||
|
||||
/* Build TX descriptor */
|
||||
txd = &tx_ring->txds[wr_idx];
|
||||
txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
|
||||
txd->dma_len = cpu_to_le16(skb_headlen(skb));
|
||||
nfp_desc_set_dma_addr(txd, dma_addr);
|
||||
txd->data_len = cpu_to_le16(skb->len);
|
||||
|
||||
txd->flags = 0;
|
||||
txd->mss = 0;
|
||||
txd->lso_hdrlen = 0;
|
||||
|
||||
tx_ring->wr_p++;
|
||||
tx_ring->wr_ptr_add++;
|
||||
nfp_net_tx_xmit_more_flush(tx_ring);
|
||||
|
||||
return false;
|
||||
|
||||
err_dma_warn:
|
||||
nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
|
||||
err_free:
|
||||
u64_stats_update_begin(&r_vec->tx_sync);
|
||||
r_vec->tx_errors++;
|
||||
u64_stats_update_end(&r_vec->tx_sync);
|
||||
dev_kfree_skb_any(skb);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
|
||||
{
|
||||
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
|
||||
bool ret;
|
||||
|
||||
spin_lock_bh(&r_vec->lock);
|
||||
ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
|
||||
spin_unlock_bh(&r_vec->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(&r_vec->queue)))
|
||||
if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
|
||||
return;
|
||||
}
|
||||
|
||||
static bool
|
||||
nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
|
||||
{
|
||||
u32 meta_type, meta_tag;
|
||||
|
||||
if (!nfp_app_ctrl_has_meta(nn->app))
|
||||
return !meta_len;
|
||||
|
||||
if (meta_len != 8)
|
||||
return false;
|
||||
|
||||
meta_type = get_unaligned_be32(data);
|
||||
meta_tag = get_unaligned_be32(data + 4);
|
||||
|
||||
return (meta_type == NFP_NET_META_PORTID &&
|
||||
meta_tag == NFP_META_PORT_ID_CTRL);
|
||||
}
|
||||
|
||||
static bool
|
||||
nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
|
||||
struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
|
||||
{
|
||||
unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
|
||||
struct nfp_net_rx_buf *rxbuf;
|
||||
struct nfp_net_rx_desc *rxd;
|
||||
dma_addr_t new_dma_addr;
|
||||
struct sk_buff *skb;
|
||||
void *new_frag;
|
||||
int idx;
|
||||
|
||||
idx = D_IDX(rx_ring, rx_ring->rd_p);
|
||||
|
||||
rxd = &rx_ring->rxds[idx];
|
||||
if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
|
||||
return false;
|
||||
|
||||
/* Memory barrier to ensure that we won't do other reads
|
||||
* before the DD bit.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
rx_ring->rd_p++;
|
||||
|
||||
rxbuf = &rx_ring->rxbufs[idx];
|
||||
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
|
||||
data_len = le16_to_cpu(rxd->rxd.data_len);
|
||||
pkt_len = data_len - meta_len;
|
||||
|
||||
pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
|
||||
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
|
||||
pkt_off += meta_len;
|
||||
else
|
||||
pkt_off += dp->rx_offset;
|
||||
meta_off = pkt_off - meta_len;
|
||||
|
||||
/* Stats update */
|
||||
u64_stats_update_begin(&r_vec->rx_sync);
|
||||
r_vec->rx_pkts++;
|
||||
r_vec->rx_bytes += pkt_len;
|
||||
u64_stats_update_end(&r_vec->rx_sync);
|
||||
|
||||
nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
|
||||
|
||||
if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
|
||||
nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
|
||||
meta_len);
|
||||
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
skb = build_skb(rxbuf->frag, dp->fl_bufsz);
|
||||
if (unlikely(!skb)) {
|
||||
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
|
||||
return true;
|
||||
}
|
||||
new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
|
||||
if (unlikely(!new_frag)) {
|
||||
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
|
||||
return true;
|
||||
}
|
||||
|
||||
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
|
||||
|
||||
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
|
||||
|
||||
skb_reserve(skb, pkt_off);
|
||||
skb_put(skb, pkt_len);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
|
||||
{
|
||||
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
|
||||
struct nfp_net *nn = r_vec->nfp_net;
|
||||
struct nfp_net_dp *dp = &nn->dp;
|
||||
|
||||
while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
|
||||
continue;
|
||||
}
|
||||
|
||||
static void nfp_ctrl_poll(unsigned long arg)
|
||||
{
|
||||
struct nfp_net_r_vector *r_vec = (void *)arg;
|
||||
|
||||
spin_lock_bh(&r_vec->lock);
|
||||
nfp_net_tx_complete(r_vec->tx_ring);
|
||||
__nfp_ctrl_tx_queued(r_vec);
|
||||
spin_unlock_bh(&r_vec->lock);
|
||||
|
||||
nfp_ctrl_rx(r_vec);
|
||||
|
||||
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
|
||||
}
|
||||
|
||||
/* Setup and Configuration
|
||||
*/
|
||||
|
||||
@ -1764,10 +2001,21 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
|
||||
|
||||
r_vec = &nn->r_vecs[r];
|
||||
r_vec->nfp_net = nn;
|
||||
r_vec->handler = nfp_net_irq_rxtx;
|
||||
r_vec->irq_entry = entry->entry;
|
||||
r_vec->irq_vector = entry->vector;
|
||||
|
||||
if (nn->dp.netdev) {
|
||||
r_vec->handler = nfp_net_irq_rxtx;
|
||||
} else {
|
||||
r_vec->handler = nfp_ctrl_irq_rxtx;
|
||||
|
||||
__skb_queue_head_init(&r_vec->queue);
|
||||
spin_lock_init(&r_vec->lock);
|
||||
tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
|
||||
(unsigned long)r_vec);
|
||||
tasklet_disable(&r_vec->tasklet);
|
||||
}
|
||||
|
||||
cpumask_set_cpu(r, &r_vec->affinity_mask);
|
||||
}
|
||||
}
|
||||
@ -2034,15 +2282,22 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
|
||||
int err;
|
||||
|
||||
/* Setup NAPI */
|
||||
netif_napi_add(nn->dp.netdev, &r_vec->napi,
|
||||
nfp_net_poll, NAPI_POLL_WEIGHT);
|
||||
if (nn->dp.netdev)
|
||||
netif_napi_add(nn->dp.netdev, &r_vec->napi,
|
||||
nfp_net_poll, NAPI_POLL_WEIGHT);
|
||||
else
|
||||
tasklet_enable(&r_vec->tasklet);
|
||||
|
||||
snprintf(r_vec->name, sizeof(r_vec->name),
|
||||
"%s-rxtx-%d", nn->dp.netdev->name, idx);
|
||||
"%s-rxtx-%d", nfp_net_name(nn), idx);
|
||||
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
|
||||
r_vec);
|
||||
if (err) {
|
||||
netif_napi_del(&r_vec->napi);
|
||||
if (nn->dp.netdev)
|
||||
netif_napi_del(&r_vec->napi);
|
||||
else
|
||||
tasklet_disable(&r_vec->tasklet);
|
||||
|
||||
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
|
||||
return err;
|
||||
}
|
||||
@ -2060,7 +2315,11 @@ static void
|
||||
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
|
||||
{
|
||||
irq_set_affinity_hint(r_vec->irq_vector, NULL);
|
||||
netif_napi_del(&r_vec->napi);
|
||||
if (nn->dp.netdev)
|
||||
netif_napi_del(&r_vec->napi);
|
||||
else
|
||||
tasklet_disable(&r_vec->tasklet);
|
||||
|
||||
free_irq(r_vec->irq_vector, r_vec);
|
||||
}
|
||||
|
||||
@ -2338,6 +2597,24 @@ static int nfp_net_netdev_close(struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nfp_ctrl_close(struct nfp_net *nn)
|
||||
{
|
||||
int r;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
for (r = 0; r < nn->dp.num_r_vecs; r++) {
|
||||
disable_irq(nn->r_vecs[r].irq_vector);
|
||||
tasklet_disable(&nn->r_vecs[r].tasklet);
|
||||
}
|
||||
|
||||
nfp_net_clear_config_and_disable(nn);
|
||||
|
||||
nfp_net_close_free_all(nn);
|
||||
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* nfp_net_open_stack() - Start the device from stack's perspective
|
||||
* @nn: NFP Net device to reconfigure
|
||||
@ -2453,6 +2730,35 @@ err_free_all:
|
||||
return err;
|
||||
}
|
||||
|
||||
int nfp_ctrl_open(struct nfp_net *nn)
|
||||
{
|
||||
int err, r;
|
||||
|
||||
/* ring dumping depends on vNICs being opened/closed under rtnl */
|
||||
rtnl_lock();
|
||||
|
||||
err = nfp_net_open_alloc_all(nn);
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
err = nfp_net_set_config_and_enable(nn);
|
||||
if (err)
|
||||
goto err_free_all;
|
||||
|
||||
for (r = 0; r < nn->dp.num_r_vecs; r++)
|
||||
enable_irq(nn->r_vecs[r].irq_vector);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_all:
|
||||
nfp_net_close_free_all(nn);
|
||||
err_unlock:
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nfp_net_set_rx_mode(struct net_device *netdev)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
@ -3278,6 +3584,7 @@ int nfp_net_init(struct nfp_net *nn)
|
||||
|
||||
/* Chained metadata is signalled by capabilities except in version 4 */
|
||||
nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
|
||||
!nn->dp.netdev ||
|
||||
nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
|
||||
if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
|
||||
nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
|
||||
|
@ -71,8 +71,11 @@
|
||||
#define NFP_NET_META_FIELD_SIZE 4
|
||||
#define NFP_NET_META_HASH 1 /* next field carries hash type */
|
||||
#define NFP_NET_META_MARK 2
|
||||
#define NFP_NET_META_PORTID 5
|
||||
#define NFP_NET_META_CSUM 6 /* checksum complete type */
|
||||
|
||||
#define NFP_META_PORT_ID_CTRL ~0U
|
||||
|
||||
/**
|
||||
* Hash type pre-pended when a RSS hash was computed
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user