mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-26 20:44:32 +08:00
gve: add support to read ring size ranges from the device
Add support to read ring size change capability and the min and max descriptor counts from the device and store it in the driver. Also accommodate a special case where the device does not provide minimum ring size depending on the version of the device. In that case, rely on default values for the minimums. Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b94d3703c1
commit
ed4fb32694
@ -50,6 +50,10 @@
|
||||
/* PTYPEs are always 10 bits. */
|
||||
#define GVE_NUM_PTYPES 1024
|
||||
|
||||
/* Default minimum ring size */
|
||||
#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
|
||||
#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
|
||||
|
||||
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
|
||||
|
||||
#define GVE_MAX_RX_BUFFER_SIZE 4096
|
||||
@ -712,6 +716,12 @@ struct gve_priv {
|
||||
u16 num_event_counters;
|
||||
u16 tx_desc_cnt; /* num desc per ring */
|
||||
u16 rx_desc_cnt; /* num desc per ring */
|
||||
u16 max_tx_desc_cnt;
|
||||
u16 max_rx_desc_cnt;
|
||||
u16 min_tx_desc_cnt;
|
||||
u16 min_rx_desc_cnt;
|
||||
bool modify_ring_size_enabled;
|
||||
bool default_min_ring_size;
|
||||
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
|
||||
u64 max_registered_pages;
|
||||
u64 num_registered_pages; /* num pages registered with NIC */
|
||||
|
@ -32,6 +32,8 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
|
||||
return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
|
||||
}
|
||||
|
||||
#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8
|
||||
|
||||
static
|
||||
void gve_parse_device_option(struct gve_priv *priv,
|
||||
struct gve_device_descriptor *device_descriptor,
|
||||
@ -41,7 +43,8 @@ void gve_parse_device_option(struct gve_priv *priv,
|
||||
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
|
||||
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
|
||||
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
|
||||
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
|
||||
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
|
||||
struct gve_device_option_modify_ring **dev_op_modify_ring)
|
||||
{
|
||||
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
|
||||
u16 option_length = be16_to_cpu(option->option_length);
|
||||
@ -165,6 +168,27 @@ void gve_parse_device_option(struct gve_priv *priv,
|
||||
"Buffer Sizes");
|
||||
*dev_op_buffer_sizes = (void *)(option + 1);
|
||||
break;
|
||||
case GVE_DEV_OPT_ID_MODIFY_RING:
|
||||
if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
|
||||
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
|
||||
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
|
||||
"Modify Ring", (int)sizeof(**dev_op_modify_ring),
|
||||
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
|
||||
option_length, req_feat_mask);
|
||||
break;
|
||||
}
|
||||
|
||||
if (option_length > sizeof(**dev_op_modify_ring)) {
|
||||
dev_warn(&priv->pdev->dev,
|
||||
GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
|
||||
}
|
||||
|
||||
*dev_op_modify_ring = (void *)(option + 1);
|
||||
|
||||
/* device has not provided min ring size */
|
||||
if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
|
||||
priv->default_min_ring_size = true;
|
||||
break;
|
||||
default:
|
||||
/* If we don't recognize the option just continue
|
||||
* without doing anything.
|
||||
@ -183,7 +207,8 @@ gve_process_device_options(struct gve_priv *priv,
|
||||
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
|
||||
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
|
||||
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
|
||||
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
|
||||
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
|
||||
struct gve_device_option_modify_ring **dev_op_modify_ring)
|
||||
{
|
||||
const int num_options = be16_to_cpu(descriptor->num_device_options);
|
||||
struct gve_device_option *dev_opt;
|
||||
@ -204,7 +229,8 @@ gve_process_device_options(struct gve_priv *priv,
|
||||
gve_parse_device_option(priv, descriptor, dev_opt,
|
||||
dev_op_gqi_rda, dev_op_gqi_qpl,
|
||||
dev_op_dqo_rda, dev_op_jumbo_frames,
|
||||
dev_op_dqo_qpl, dev_op_buffer_sizes);
|
||||
dev_op_dqo_qpl, dev_op_buffer_sizes,
|
||||
dev_op_modify_ring);
|
||||
dev_opt = next_opt;
|
||||
}
|
||||
|
||||
@ -738,6 +764,12 @@ static void gve_set_default_desc_cnt(struct gve_priv *priv,
|
||||
{
|
||||
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
|
||||
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
|
||||
|
||||
/* set default ranges */
|
||||
priv->max_tx_desc_cnt = priv->tx_desc_cnt;
|
||||
priv->max_rx_desc_cnt = priv->rx_desc_cnt;
|
||||
priv->min_tx_desc_cnt = priv->tx_desc_cnt;
|
||||
priv->min_rx_desc_cnt = priv->rx_desc_cnt;
|
||||
}
|
||||
|
||||
static void gve_enable_supported_features(struct gve_priv *priv,
|
||||
@ -747,7 +779,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
|
||||
const struct gve_device_option_dqo_qpl
|
||||
*dev_op_dqo_qpl,
|
||||
const struct gve_device_option_buffer_sizes
|
||||
*dev_op_buffer_sizes)
|
||||
*dev_op_buffer_sizes,
|
||||
const struct gve_device_option_modify_ring
|
||||
*dev_op_modify_ring)
|
||||
{
|
||||
/* Before control reaches this point, the page-size-capped max MTU from
|
||||
* the gve_device_descriptor field has already been stored in
|
||||
@ -778,12 +812,33 @@ static void gve_enable_supported_features(struct gve_priv *priv,
|
||||
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
|
||||
priv->max_rx_buffer_size, priv->header_buf_size);
|
||||
}
|
||||
|
||||
/* Read and store ring size ranges given by device */
|
||||
if (dev_op_modify_ring &&
|
||||
(supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
|
||||
priv->modify_ring_size_enabled = true;
|
||||
|
||||
/* max ring size for DQO QPL should not be overwritten because of device limit */
|
||||
if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
|
||||
priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
|
||||
priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
|
||||
}
|
||||
if (priv->default_min_ring_size) {
|
||||
/* If device hasn't provided minimums, use default minimums */
|
||||
priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
|
||||
priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
|
||||
} else {
|
||||
priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
|
||||
priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int gve_adminq_describe_device(struct gve_priv *priv)
|
||||
{
|
||||
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
|
||||
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
|
||||
struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
|
||||
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
|
||||
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
|
||||
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
|
||||
@ -815,9 +870,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
|
||||
|
||||
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
|
||||
&dev_op_gqi_qpl, &dev_op_dqo_rda,
|
||||
&dev_op_jumbo_frames,
|
||||
&dev_op_dqo_qpl,
|
||||
&dev_op_buffer_sizes);
|
||||
&dev_op_jumbo_frames, &dev_op_dqo_qpl,
|
||||
&dev_op_buffer_sizes,
|
||||
&dev_op_modify_ring);
|
||||
if (err)
|
||||
goto free_device_descriptor;
|
||||
|
||||
@ -878,7 +933,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
|
||||
|
||||
gve_enable_supported_features(priv, supported_features_mask,
|
||||
dev_op_jumbo_frames, dev_op_dqo_qpl,
|
||||
dev_op_buffer_sizes);
|
||||
dev_op_buffer_sizes, dev_op_modify_ring);
|
||||
|
||||
free_device_descriptor:
|
||||
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
|
||||
|
@ -133,6 +133,16 @@ struct gve_device_option_buffer_sizes {
|
||||
|
||||
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
|
||||
|
||||
struct gve_device_option_modify_ring {
|
||||
__be32 supported_featured_mask;
|
||||
__be16 max_rx_ring_size;
|
||||
__be16 max_tx_ring_size;
|
||||
__be16 min_rx_ring_size;
|
||||
__be16 min_tx_ring_size;
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
|
||||
|
||||
/* Terminology:
|
||||
*
|
||||
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
|
||||
@ -146,6 +156,7 @@ enum gve_dev_opt_id {
|
||||
GVE_DEV_OPT_ID_GQI_RDA = 0x2,
|
||||
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
|
||||
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
|
||||
GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
|
||||
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
|
||||
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
|
||||
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
|
||||
@ -159,9 +170,11 @@ enum gve_dev_opt_req_feat_mask {
|
||||
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
|
||||
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
|
||||
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
|
||||
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
|
||||
};
|
||||
|
||||
enum gve_sup_feature_mask {
|
||||
GVE_SUP_MODIFY_RING_MASK = 1 << 0,
|
||||
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
|
||||
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user