[gve] Increase number of receive buffers to reduce packet loss

Experiments suggest that using fewer than 64 receive buffers leads to
excessive packet drop rates on some instance types (observed with a
c3-standard-4 instance in europe-west4-a).

Fix by increasing the number of receive data buffers (and adjusting
the length of the registrable queue page address list to match).

Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
Michael Brown 2024-07-25 00:10:38 +01:00
parent c7b76e3adc
commit d2d194bc60
2 changed files with 13 additions and 5 deletions

View File

@ -722,6 +722,7 @@ static int gve_alloc_qpl ( struct gve_nic *gve, struct gve_qpl *qpl,
/* Calculate number of pages required */
build_assert ( GVE_BUF_SIZE <= GVE_PAGE_SIZE );
qpl->count = ( ( buffers + GVE_BUF_PER_PAGE - 1 ) / GVE_BUF_PER_PAGE );
assert ( qpl->count <= GVE_QPL_MAX );
/* Allocate pages (as a single block) */
len = ( qpl->count * GVE_PAGE_SIZE );

View File

@ -59,9 +59,6 @@ struct google_mac {
*/
#define GVE_LEN_ALIGN 64
/** Maximum number of pages per queue (must be a power of two) */
#define GVE_QPL_MAX 16
/** Configuration BAR */
#define GVE_CFG_BAR PCI_BASE_ADDRESS_0
@ -208,6 +205,14 @@ struct gve_admin_register {
uint64_t size;
} __attribute__ (( packed ));
/**
* Maximum number of pages per queue
*
* This is a policy decision. Must be sufficient to allow for both
* the transmit and receive queue fill levels.
*/
#define GVE_QPL_MAX 32
/** Page list */
struct gve_pages {
/** Page address */
@ -538,9 +543,11 @@ struct gve_tx_descriptor {
/**
* Maximum number of receive buffers
*
* This is a policy decision.
* This is a policy decision. Experiments suggest that using fewer
* than 64 receive buffers leads to excessive packet drop rates on
* some instance types.
*/
#define GVE_RX_FILL 16
#define GVE_RX_FILL 64
/** Receive queue page list ID */
#define GVE_RX_QPL 0x18ae5258