mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-27 21:14:44 +08:00
9b8dd5e5ea
The RX queue has an array of `gve_rx_buf_state_dqo` objects. All allocated pages have an associated buf_state object. When a buffer is posted on the RX buffer queue, the buffer ID will be the buf_state's index into the RX queue's array. On packet reception, the RX queue will have one descriptor for each buffer associated with a received packet. Each RX descriptor will have a buffer_id that was posted on the buffer queue. Notable mentions: - We use a default buffer size of 2048 bytes. Based on page size, we may post separate sections of a single page as separate buffers. - The driver holds an extra reference on pages passed up the receive path with an skb and keeps these pages on a list. When posting new buffers to the NIC, we check if any of these pages has only our reference, or another buffer sized segment of the page has no references. If so, it is free to reuse. This page recycling approach is a common netdev optimization that reduces page alloc/free calls. - Pages in the free list have a page_count bias in order to avoid an atomic increment of pagecount every time we attempt to reuse a page. # references = page_count() - bias - In order to track when a page is safe to reuse, we keep track of the last offset which had a single SKB reference. When this occurs, it implies that every single other offset is reusable. Otherwise, we don't know if offsets can be safely reused. - We maintain two free lists of pages. List #1 (recycled_buf_states) contains pages we know can be reused right away. List #2 (used_buf_states) contains pages which cannot be used right away. We only attempt to get pages from list #2 when list #1 is empty. We only attempt to use a small fixed number pages from list #2 before giving up and allocating a new page. Both lists are FIFOs in hope that by the time we attempt to reuse a page, the references were dropped. Signed-off-by: Bailey Forrest <bcf@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Catherine Sullivan <csully@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
82 lines
1.9 KiB
C
82 lines
1.9 KiB
C
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
|
/* Google virtual Ethernet (gve) driver
|
|
*
|
|
* Copyright (C) 2015-2021 Google, Inc.
|
|
*/
|
|
|
|
#include "gve.h"
|
|
#include "gve_adminq.h"
|
|
#include "gve_utils.h"
|
|
|
|
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
struct gve_notify_block *block =
|
|
&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
|
|
|
|
block->tx = NULL;
|
|
}
|
|
|
|
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
struct gve_tx_ring *tx = &priv->tx[queue_idx];
|
|
|
|
block->tx = tx;
|
|
tx->ntfy_id = ntfy_idx;
|
|
}
|
|
|
|
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
struct gve_notify_block *block =
|
|
&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
|
|
|
|
block->rx = NULL;
|
|
}
|
|
|
|
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
|
|
{
|
|
u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
|
|
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
|
|
struct gve_rx_ring *rx = &priv->rx[queue_idx];
|
|
|
|
block->rx = rx;
|
|
rx->ntfy_id = ntfy_idx;
|
|
}
|
|
|
|
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
|
|
struct gve_rx_slot_page_info *page_info, u16 len,
|
|
u16 pad)
|
|
{
|
|
struct sk_buff *skb = napi_alloc_skb(napi, len);
|
|
void *va = page_info->page_address + pad +
|
|
page_info->page_offset;
|
|
|
|
if (unlikely(!skb))
|
|
return NULL;
|
|
|
|
__skb_put(skb, len);
|
|
|
|
skb_copy_to_linear_data(skb, va, len);
|
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
|
|
return skb;
|
|
}
|
|
|
|
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
|
|
{
|
|
page_info->pagecnt_bias--;
|
|
if (page_info->pagecnt_bias == 0) {
|
|
int pagecount = page_count(page_info->page);
|
|
|
|
/* If we have run out of bias - set it back up to INT_MAX
|
|
* minus the existing refs.
|
|
*/
|
|
page_info->pagecnt_bias = INT_MAX - pagecount;
|
|
|
|
/* Set pagecount back up to max. */
|
|
page_ref_add(page_info->page, INT_MAX - pagecount);
|
|
}
|
|
}
|