ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2018, Intel Corporation. */
|
|
|
|
|
|
|
|
#include "ice_sched.h"
|
|
|
|
|
2018-03-20 22:58:09 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
|
|
|
|
* @pi: port information structure
|
|
|
|
* @info: Scheduler element information from firmware
|
|
|
|
*
|
|
|
|
* This function inserts the root node of the scheduling tree topology
|
|
|
|
* to the SW DB.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_add_root_node(struct ice_port_info *pi,
|
|
|
|
struct ice_aqc_txsched_elem_data *info)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *root;
|
|
|
|
struct ice_hw *hw;
|
|
|
|
|
|
|
|
if (!pi)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
|
|
|
|
hw = pi->hw;
|
|
|
|
|
|
|
|
root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
|
|
|
|
if (!root)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
|
2018-08-09 21:29:45 +08:00
|
|
|
/* coverity[suspicious_sizeof] */
|
|
|
|
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
|
2018-03-20 22:58:09 +08:00
|
|
|
sizeof(*root), GFP_KERNEL);
|
|
|
|
if (!root->children) {
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), root);
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(&root->info, info, sizeof(*info));
|
|
|
|
pi->root = root;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
|
|
|
|
* @start_node: pointer to the starting ice_sched_node struct in a sub-tree
|
2019-02-20 07:04:13 +08:00
|
|
|
* @teid: node TEID to search
|
2018-03-20 22:58:09 +08:00
|
|
|
*
|
2019-02-20 07:04:13 +08:00
|
|
|
* This function searches for a node matching the TEID in the scheduling tree
|
2018-03-20 22:58:09 +08:00
|
|
|
* from the SW DB. The search is recursive and is restricted by the number of
|
|
|
|
* layers it has searched through; stopping at the max supported layer.
|
|
|
|
*
|
|
|
|
* This function needs to be called when holding the port_info->sched_lock
|
|
|
|
*/
|
|
|
|
struct ice_sched_node *
|
|
|
|
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
|
|
|
|
{
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
/* The TEID is same as that of the start_node */
|
|
|
|
if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
|
|
|
|
return start_node;
|
|
|
|
|
|
|
|
/* The node has no children or is at the max layer */
|
|
|
|
if (!start_node->num_children ||
|
|
|
|
start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
|
|
|
|
start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
|
|
|
|
return NULL;
|
|
|
|
|
2019-02-20 07:04:13 +08:00
|
|
|
/* Check if TEID matches to any of the children nodes */
|
2018-03-20 22:58:09 +08:00
|
|
|
for (i = 0; i < start_node->num_children; i++)
|
|
|
|
if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
|
|
|
|
return start_node->children[i];
|
|
|
|
|
|
|
|
/* Search within each child's sub-tree */
|
|
|
|
for (i = 0; i < start_node->num_children; i++) {
|
|
|
|
struct ice_sched_node *tmp;
|
|
|
|
|
|
|
|
tmp = ice_sched_find_node_by_teid(start_node->children[i],
|
|
|
|
teid);
|
|
|
|
if (tmp)
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-09-20 08:23:09 +08:00
|
|
|
/**
|
2018-12-20 02:03:21 +08:00
|
|
|
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-12-20 02:03:21 +08:00
|
|
|
* @cmd_opc: cmd opcode
|
|
|
|
* @elems_req: number of elements to request
|
2018-09-20 08:23:09 +08:00
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
2018-12-20 02:03:21 +08:00
|
|
|
* @elems_resp: returns total number of elements response
|
2018-09-20 08:23:09 +08:00
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
2018-12-20 02:03:21 +08:00
|
|
|
* This function sends a scheduling elements cmd (cmd_opc)
|
2018-09-20 08:23:09 +08:00
|
|
|
*/
|
|
|
|
static enum ice_status
|
2018-12-20 02:03:21 +08:00
|
|
|
ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
|
|
|
|
u16 elems_req, void *buf, u16 buf_size,
|
|
|
|
u16 *elems_resp, struct ice_sq_cd *cd)
|
2018-09-20 08:23:09 +08:00
|
|
|
{
|
2018-12-20 02:03:21 +08:00
|
|
|
struct ice_aqc_sched_elem_cmd *cmd;
|
2018-09-20 08:23:09 +08:00
|
|
|
struct ice_aq_desc desc;
|
|
|
|
enum ice_status status;
|
|
|
|
|
2018-12-20 02:03:21 +08:00
|
|
|
cmd = &desc.params.sched_elem_cmd;
|
|
|
|
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
|
2018-09-20 08:23:09 +08:00
|
|
|
cmd->num_elem_req = cpu_to_le16(elems_req);
|
|
|
|
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
|
|
|
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
|
2018-12-20 02:03:21 +08:00
|
|
|
if (!status && elems_resp)
|
|
|
|
*elems_resp = le16_to_cpu(cmd->num_elem_resp);
|
2018-09-20 08:23:09 +08:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-12-20 02:03:21 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_query_sched_elems - query scheduler elements
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-12-20 02:03:21 +08:00
|
|
|
* @elems_req: number of elements to query
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @elems_ret: returns total number of elements returned
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Query scheduling elements (0x0404)
|
|
|
|
*/
|
2019-03-01 07:24:24 +08:00
|
|
|
enum ice_status
|
2018-12-20 02:03:21 +08:00
|
|
|
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
|
2018-12-20 02:03:21 +08:00
|
|
|
u16 *elems_ret, struct ice_sq_cd *cd)
|
|
|
|
{
|
|
|
|
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
|
|
|
|
elems_req, (void *)buf, buf_size,
|
|
|
|
elems_ret, cd);
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:09 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
|
|
|
|
* @pi: port information structure
|
|
|
|
* @layer: Scheduler layer of the node
|
|
|
|
* @info: Scheduler element information from firmware
|
|
|
|
*
|
|
|
|
* This function inserts a scheduler node to the SW DB.
|
|
|
|
*/
|
|
|
|
enum ice_status
|
|
|
|
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
|
|
|
|
struct ice_aqc_txsched_elem_data *info)
|
|
|
|
{
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_txsched_elem_data elem;
|
2018-03-20 22:58:09 +08:00
|
|
|
struct ice_sched_node *parent;
|
|
|
|
struct ice_sched_node *node;
|
2018-09-20 08:23:09 +08:00
|
|
|
enum ice_status status;
|
2018-03-20 22:58:09 +08:00
|
|
|
struct ice_hw *hw;
|
|
|
|
|
|
|
|
if (!pi)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
|
|
|
|
hw = pi->hw;
|
|
|
|
|
|
|
|
/* A valid parent node should be there */
|
|
|
|
parent = ice_sched_find_node_by_teid(pi->root,
|
|
|
|
le32_to_cpu(info->parent_teid));
|
|
|
|
if (!parent) {
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED,
|
|
|
|
"Parent Node not found for parent_teid=0x%x\n",
|
|
|
|
le32_to_cpu(info->parent_teid));
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
}
|
|
|
|
|
2020-07-30 08:19:22 +08:00
|
|
|
/* query the current node information from FW before adding it
|
2018-09-20 08:23:09 +08:00
|
|
|
* to the SW DB
|
|
|
|
*/
|
|
|
|
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
2018-03-20 22:58:09 +08:00
|
|
|
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
|
|
|
|
if (!node)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
2018-08-09 21:29:45 +08:00
|
|
|
if (hw->max_children[layer]) {
|
|
|
|
/* coverity[suspicious_sizeof] */
|
|
|
|
node->children = devm_kcalloc(ice_hw_to_dev(hw),
|
|
|
|
hw->max_children[layer],
|
2018-03-20 22:58:09 +08:00
|
|
|
sizeof(*node), GFP_KERNEL);
|
|
|
|
if (!node->children) {
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), node);
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
node->in_use = true;
|
|
|
|
node->parent = parent;
|
|
|
|
node->tx_sched_layer = layer;
|
|
|
|
parent->children[parent->num_children++] = node;
|
2020-06-30 08:27:45 +08:00
|
|
|
node->info = elem;
|
2018-03-20 22:58:09 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_delete_sched_elems - delete scheduler elements
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
* @grps_req: number of groups to delete
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @grps_del: returns total number of elements deleted
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Delete scheduling elements (0x040F)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
|
|
|
|
struct ice_aqc_delete_elem *buf, u16 buf_size,
|
|
|
|
u16 *grps_del, struct ice_sq_cd *cd)
|
|
|
|
{
|
2018-12-20 02:03:21 +08:00
|
|
|
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
|
|
|
|
grps_req, (void *)buf, buf_size,
|
|
|
|
grps_del, cd);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-02-20 07:04:13 +08:00
|
|
|
* ice_sched_remove_elems - remove nodes from HW
|
|
|
|
* @hw: pointer to the HW struct
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
* @parent: pointer to the parent node
|
|
|
|
* @num_nodes: number of nodes
|
|
|
|
* @node_teids: array of node teids to be deleted
|
|
|
|
*
|
2019-02-20 07:04:13 +08:00
|
|
|
* This function remove nodes from HW
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
|
|
|
|
u16 num_nodes, u32 *node_teids)
|
|
|
|
{
|
|
|
|
struct ice_aqc_delete_elem *buf;
|
|
|
|
u16 i, num_groups_removed = 0;
|
|
|
|
enum ice_status status;
|
|
|
|
u16 buf_size;
|
|
|
|
|
2020-06-30 08:27:46 +08:00
|
|
|
buf_size = struct_size(buf, teid, num_nodes);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
2018-08-09 21:29:45 +08:00
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
buf->hdr.parent_teid = parent->info.node_teid;
|
|
|
|
buf->hdr.num_elems = cpu_to_le16(num_nodes);
|
|
|
|
for (i = 0; i < num_nodes; i++)
|
|
|
|
buf->teid[i] = cpu_to_le32(node_teids[i]);
|
2018-08-09 21:29:45 +08:00
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
|
|
|
|
&num_groups_removed, NULL);
|
|
|
|
if (status || num_groups_removed != 1)
|
2019-02-27 08:35:13 +08:00
|
|
|
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
|
|
|
|
hw->adminq.sq_last_status);
|
2018-08-09 21:29:45 +08:00
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
devm_kfree(ice_hw_to_dev(hw), buf);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_get_first_node - get the first node of the given layer
|
2019-07-25 17:53:54 +08:00
|
|
|
* @pi: port information structure
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
* @parent: pointer the base node of the subtree
|
|
|
|
* @layer: layer number
|
|
|
|
*
|
|
|
|
* This function retrieves the first node of the given layer from the subtree
|
|
|
|
*/
|
|
|
|
static struct ice_sched_node *
|
2019-07-25 17:53:54 +08:00
|
|
|
ice_sched_get_first_node(struct ice_port_info *pi,
|
|
|
|
struct ice_sched_node *parent, u8 layer)
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
{
|
2019-07-25 17:53:54 +08:00
|
|
|
return pi->sib_head[parent->tc_num][layer];
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_get_tc_node - get pointer to TC node
|
|
|
|
* @pi: port information structure
|
|
|
|
* @tc: TC number
|
|
|
|
*
|
|
|
|
* This function returns the TC node pointer
|
|
|
|
*/
|
|
|
|
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
|
|
|
|
{
|
|
|
|
u8 i;
|
|
|
|
|
2019-08-08 22:39:34 +08:00
|
|
|
if (!pi || !pi->root)
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
return NULL;
|
|
|
|
for (i = 0; i < pi->root->num_children; i++)
|
|
|
|
if (pi->root->children[i]->tc_num == tc)
|
|
|
|
return pi->root->children[i];
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_free_sched_node - Free a Tx scheduler node from SW DB
|
|
|
|
* @pi: port information structure
|
|
|
|
* @node: pointer to the ice_sched_node struct
|
|
|
|
*
|
|
|
|
* This function frees up a node from SW DB as well as from HW
|
|
|
|
*
|
|
|
|
* This function needs to be called with the port_info->sched_lock held
|
|
|
|
*/
|
|
|
|
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *parent;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
u8 i, j;
|
|
|
|
|
|
|
|
/* Free the children before freeing up the parent node
|
|
|
|
* The parent array is updated below and that shifts the nodes
|
|
|
|
* in the array. So always pick the first child if num children > 0
|
|
|
|
*/
|
|
|
|
while (node->num_children)
|
|
|
|
ice_free_sched_node(pi, node->children[0]);
|
|
|
|
|
|
|
|
/* Leaf, TC and root nodes can't be deleted by SW */
|
|
|
|
if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
|
|
|
|
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
|
|
|
|
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
|
|
|
|
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
|
|
|
|
u32 teid = le32_to_cpu(node->info.node_teid);
|
|
|
|
|
2019-02-27 08:35:13 +08:00
|
|
|
ice_sched_remove_elems(hw, node->parent, 1, &teid);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
}
|
|
|
|
parent = node->parent;
|
|
|
|
/* root has no parent */
|
|
|
|
if (parent) {
|
2019-07-25 17:53:54 +08:00
|
|
|
struct ice_sched_node *p;
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
|
|
|
|
/* update the parent */
|
|
|
|
for (i = 0; i < parent->num_children; i++)
|
|
|
|
if (parent->children[i] == node) {
|
|
|
|
for (j = i + 1; j < parent->num_children; j++)
|
|
|
|
parent->children[j - 1] =
|
|
|
|
parent->children[j];
|
|
|
|
parent->num_children--;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-25 17:53:54 +08:00
|
|
|
p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
while (p) {
|
|
|
|
if (p->sibling == node) {
|
|
|
|
p->sibling = node->sibling;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
p = p->sibling;
|
|
|
|
}
|
2019-07-25 17:53:54 +08:00
|
|
|
|
|
|
|
/* update the sibling head if head is getting removed */
|
|
|
|
if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
|
|
|
|
pi->sib_head[node->tc_num][node->tx_sched_layer] =
|
|
|
|
node->sibling;
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
}
|
2019-07-25 17:53:54 +08:00
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
/* leaf nodes have no children */
|
|
|
|
if (node->children)
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), node->children);
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), node);
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:09 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_get_dflt_topo - gets default scheduler topology
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:09 +08:00
|
|
|
* @lport: logical port number
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @num_branches: returns total number of queue to port branches
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Get default scheduler topology (0x400)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
|
|
|
|
struct ice_aqc_get_topo_elem *buf, u16 buf_size,
|
|
|
|
u8 *num_branches, struct ice_sq_cd *cd)
|
|
|
|
{
|
|
|
|
struct ice_aqc_get_topo *cmd;
|
|
|
|
struct ice_aq_desc desc;
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
cmd = &desc.params.get_topo;
|
|
|
|
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
|
|
|
|
cmd->port_num = lport;
|
|
|
|
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
|
|
|
|
if (!status && num_branches)
|
|
|
|
*num_branches = cmd->num_branches;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_add_sched_elems - adds scheduling element
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
* @grps_req: the number of groups that are requested to be added
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @grps_added: returns total number of groups added
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Add scheduling elements (0x0401)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
|
|
|
|
struct ice_aqc_add_elem *buf, u16 buf_size,
|
|
|
|
u16 *grps_added, struct ice_sq_cd *cd)
|
|
|
|
{
|
2018-12-20 02:03:21 +08:00
|
|
|
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
|
|
|
|
grps_req, (void *)buf, buf_size,
|
|
|
|
grps_added, cd);
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
|
2019-11-06 18:05:28 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_cfg_sched_elems - configures scheduler elements
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @elems_req: number of elements to configure
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @elems_cfgd: returns total number of elements configured
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Configure scheduling elements (0x0403)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
|
2019-11-06 18:05:28 +08:00
|
|
|
u16 *elems_cfgd, struct ice_sq_cd *cd)
|
|
|
|
{
|
|
|
|
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
|
|
|
|
elems_req, (void *)buf, buf_size,
|
|
|
|
elems_cfgd, cd);
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_suspend_sched_elems - suspend scheduler elements
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
* @elems_req: number of elements to suspend
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @elems_ret: returns total number of elements suspended
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Suspend scheduling elements (0x0409)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
2020-06-30 08:27:45 +08:00
|
|
|
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
|
2018-03-20 22:58:17 +08:00
|
|
|
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
|
|
|
|
{
|
2018-12-20 02:03:21 +08:00
|
|
|
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
|
|
|
|
elems_req, (void *)buf, buf_size,
|
|
|
|
elems_ret, cd);
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_aq_resume_sched_elems - resume scheduler elements
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
* @elems_req: number of elements to resume
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @elems_ret: returns total number of elements resumed
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* resume scheduling elements (0x040A)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
2020-06-30 08:27:45 +08:00
|
|
|
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
|
2018-03-20 22:58:17 +08:00
|
|
|
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
|
|
|
|
{
|
2018-12-20 02:03:21 +08:00
|
|
|
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
|
|
|
|
elems_req, (void *)buf, buf_size,
|
|
|
|
elems_ret, cd);
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_query_sched_res - query scheduler resource
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Query scheduler resource allocation (0x0412)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
|
|
|
|
struct ice_aqc_query_txsched_res_resp *buf,
|
|
|
|
struct ice_sq_cd *cd)
|
|
|
|
{
|
|
|
|
struct ice_aq_desc desc;
|
|
|
|
|
|
|
|
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
|
|
|
|
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
/**
|
2019-02-20 07:04:13 +08:00
|
|
|
* ice_sched_suspend_resume_elems - suspend or resume HW nodes
|
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
* @num_nodes: number of nodes
|
|
|
|
* @node_teids: array of node teids to be suspended or resumed
|
|
|
|
* @suspend: true means suspend / false means resume
|
|
|
|
*
|
2019-02-20 07:04:13 +08:00
|
|
|
* This function suspends or resumes HW nodes
|
2018-03-20 22:58:17 +08:00
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
|
|
|
|
bool suspend)
|
|
|
|
{
|
|
|
|
u16 i, buf_size, num_elem_ret = 0;
|
|
|
|
enum ice_status status;
|
2020-06-30 08:27:45 +08:00
|
|
|
__le32 *buf;
|
2018-03-20 22:58:17 +08:00
|
|
|
|
|
|
|
buf_size = sizeof(*buf) * num_nodes;
|
|
|
|
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
|
|
|
|
for (i = 0; i < num_nodes; i++)
|
2020-06-30 08:27:45 +08:00
|
|
|
buf[i] = cpu_to_le32(node_teids[i]);
|
2018-03-20 22:58:17 +08:00
|
|
|
|
|
|
|
if (suspend)
|
|
|
|
status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
|
|
|
|
buf_size, &num_elem_ret,
|
|
|
|
NULL);
|
|
|
|
else
|
|
|
|
status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
|
|
|
|
buf_size, &num_elem_ret,
|
|
|
|
NULL);
|
|
|
|
if (status || num_elem_ret != num_nodes)
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
|
|
|
|
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), buf);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2019-03-01 07:25:48 +08:00
|
|
|
/**
|
|
|
|
* ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @vsi_handle: VSI handle
|
|
|
|
* @tc: TC number
|
|
|
|
* @new_numqs: number of queues
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
|
|
|
|
{
|
|
|
|
struct ice_vsi_ctx *vsi_ctx;
|
|
|
|
struct ice_q_ctx *q_ctx;
|
|
|
|
|
|
|
|
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
|
|
|
|
if (!vsi_ctx)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
/* allocate LAN queue contexts */
|
|
|
|
if (!vsi_ctx->lan_q_ctx[tc]) {
|
|
|
|
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
|
|
|
|
new_numqs,
|
|
|
|
sizeof(*q_ctx),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!vsi_ctx->lan_q_ctx[tc])
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* num queues are increased, update the queue contexts */
|
|
|
|
if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
|
|
|
|
u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
|
|
|
|
|
|
|
|
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
|
|
|
|
sizeof(*q_ctx), GFP_KERNEL);
|
|
|
|
if (!q_ctx)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
|
|
|
|
prev_num * sizeof(*q_ctx));
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
|
|
|
|
vsi_ctx->lan_q_ctx[tc] = q_ctx;
|
|
|
|
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-06 18:05:28 +08:00
|
|
|
/**
|
|
|
|
* ice_aq_rl_profile - performs a rate limiting task
|
|
|
|
* @hw: pointer to the HW struct
|
2020-07-30 08:19:22 +08:00
|
|
|
* @opcode: opcode for add, query, or remove profile(s)
|
2019-11-06 18:05:28 +08:00
|
|
|
* @num_profiles: the number of profiles
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @num_processed: number of processed add or remove profile(s) to return
|
|
|
|
* @cd: pointer to command details structure
|
|
|
|
*
|
|
|
|
* RL profile function to add, query, or remove profile(s)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
|
2020-06-30 08:27:45 +08:00
|
|
|
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
|
2019-11-06 18:05:28 +08:00
|
|
|
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
|
|
|
|
{
|
|
|
|
struct ice_aqc_rl_profile *cmd;
|
|
|
|
struct ice_aq_desc desc;
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
cmd = &desc.params.rl_profile;
|
|
|
|
|
|
|
|
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
|
|
|
|
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
|
|
|
|
cmd->num_profiles = cpu_to_le16(num_profiles);
|
|
|
|
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
|
|
|
|
if (!status && num_processed)
|
|
|
|
*num_processed = le16_to_cpu(cmd->num_processed);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_aq_add_rl_profile - adds rate limiting profile(s)
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @num_profiles: the number of profile(s) to be add
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @num_profiles_added: total number of profiles added to return
|
|
|
|
* @cd: pointer to command details structure
|
|
|
|
*
|
|
|
|
* Add RL profile (0x0410)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
|
|
|
|
u16 *num_profiles_added, struct ice_sq_cd *cd)
|
2019-11-06 18:05:28 +08:00
|
|
|
{
|
2020-06-30 08:27:45 +08:00
|
|
|
return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
|
|
|
|
buf, buf_size, num_profiles_added, cd);
|
2019-11-06 18:05:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_aq_remove_rl_profile - removes RL profile(s)
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @num_profiles: the number of profile(s) to remove
|
|
|
|
* @buf: pointer to buffer
|
|
|
|
* @buf_size: buffer size in bytes
|
|
|
|
* @num_profiles_removed: total number of profiles removed to return
|
|
|
|
* @cd: pointer to command details structure or NULL
|
|
|
|
*
|
|
|
|
* Remove RL profile (0x0415)
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
|
|
|
|
u16 *num_profiles_removed, struct ice_sq_cd *cd)
|
2019-11-06 18:05:28 +08:00
|
|
|
{
|
|
|
|
return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
|
2020-06-30 08:27:45 +08:00
|
|
|
num_profiles, buf, buf_size,
|
|
|
|
num_profiles_removed, cd);
|
2019-11-06 18:05:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_del_rl_profile - remove RL profile
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @rl_info: rate limit profile information
|
|
|
|
*
|
|
|
|
* If the profile ID is not referenced anymore, it removes profile ID with
|
|
|
|
* its associated parameters from HW DB,and locally. The caller needs to
|
|
|
|
* hold scheduler lock.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_del_rl_profile(struct ice_hw *hw,
|
|
|
|
struct ice_aqc_rl_profile_info *rl_info)
|
|
|
|
{
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_rl_profile_elem *buf;
|
2019-11-06 18:05:28 +08:00
|
|
|
u16 num_profiles_removed;
|
|
|
|
enum ice_status status;
|
|
|
|
u16 num_profiles = 1;
|
|
|
|
|
|
|
|
if (rl_info->prof_id_ref != 0)
|
|
|
|
return ICE_ERR_IN_USE;
|
|
|
|
|
|
|
|
/* Safe to remove profile ID */
|
2020-06-30 08:27:45 +08:00
|
|
|
buf = &rl_info->profile;
|
2019-11-06 18:05:28 +08:00
|
|
|
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
|
|
|
|
&num_profiles_removed, NULL);
|
|
|
|
if (status || num_profiles_removed != num_profiles)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
|
|
|
|
/* Delete stale entry now */
|
|
|
|
list_del(&rl_info->list_entry);
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), rl_info);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_clear_rl_prof - clears RL prof entries
|
|
|
|
* @pi: port information structure
|
|
|
|
*
|
|
|
|
* This function removes all RL profile from HW as well as from SW DB.
|
|
|
|
*/
|
|
|
|
static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
|
|
|
|
{
|
|
|
|
u16 ln;
|
|
|
|
|
|
|
|
for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
|
|
|
|
struct ice_aqc_rl_profile_info *rl_prof_elem;
|
|
|
|
struct ice_aqc_rl_profile_info *rl_prof_tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
|
|
|
|
&pi->rl_prof_list[ln], list_entry) {
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
rl_prof_elem->prof_id_ref = 0;
|
|
|
|
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
|
|
|
|
if (status) {
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED,
|
|
|
|
"Remove rl profile failed\n");
|
|
|
|
/* On error, free mem required */
|
|
|
|
list_del(&rl_prof_elem->list_entry);
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
/**
|
2019-02-20 07:04:13 +08:00
|
|
|
* ice_sched_clear_agg - clears the aggregator related information
|
2018-12-20 02:03:28 +08:00
|
|
|
* @hw: pointer to the hardware structure
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
*
|
2019-02-20 07:04:13 +08:00
|
|
|
* This function removes aggregator list and free up aggregator related memory
|
2018-12-20 02:03:28 +08:00
|
|
|
* previously allocated.
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
*/
|
2018-12-20 02:03:28 +08:00
|
|
|
void ice_sched_clear_agg(struct ice_hw *hw)
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
{
|
|
|
|
struct ice_sched_agg_info *agg_info;
|
|
|
|
struct ice_sched_agg_info *atmp;
|
|
|
|
|
2018-12-20 02:03:28 +08:00
|
|
|
list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
struct ice_sched_agg_vsi_info *agg_vsi_info;
|
|
|
|
struct ice_sched_agg_vsi_info *vtmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(agg_vsi_info, vtmp,
|
|
|
|
&agg_info->agg_vsi_list, list_entry) {
|
|
|
|
list_del(&agg_vsi_info->list_entry);
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
|
|
|
|
}
|
2018-12-20 02:03:28 +08:00
|
|
|
list_del(&agg_info->list_entry);
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), agg_info);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
}
|
2018-12-20 02:03:28 +08:00
|
|
|
}
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
|
2018-12-20 02:03:28 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_clear_tx_topo - clears the scheduler tree nodes
|
|
|
|
* @pi: port information structure
|
|
|
|
*
|
|
|
|
* This function removes all the nodes from HW as well as from SW DB.
|
|
|
|
*/
|
|
|
|
static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
|
|
|
|
{
|
|
|
|
if (!pi)
|
|
|
|
return;
|
2019-11-06 18:05:28 +08:00
|
|
|
/* remove RL profiles related lists */
|
|
|
|
ice_sched_clear_rl_prof(pi);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
if (pi->root) {
|
|
|
|
ice_free_sched_node(pi, pi->root);
|
|
|
|
pi->root = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_clear_port - clear the scheduler elements from SW DB for a port
|
|
|
|
* @pi: port information structure
|
|
|
|
*
|
|
|
|
* Cleanup scheduling elements from SW DB
|
|
|
|
*/
|
2018-10-27 02:44:35 +08:00
|
|
|
void ice_sched_clear_port(struct ice_port_info *pi)
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
{
|
|
|
|
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pi->port_state = ICE_SCHED_PORT_STATE_INIT;
|
|
|
|
mutex_lock(&pi->sched_lock);
|
|
|
|
ice_sched_clear_tx_topo(pi);
|
|
|
|
mutex_unlock(&pi->sched_lock);
|
|
|
|
mutex_destroy(&pi->sched_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
*
|
|
|
|
* Cleanup scheduling elements from SW DB for all the ports
|
|
|
|
*/
|
|
|
|
void ice_sched_cleanup_all(struct ice_hw *hw)
|
|
|
|
{
|
2018-08-09 21:29:45 +08:00
|
|
|
if (!hw)
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
return;
|
|
|
|
|
2018-08-09 21:29:45 +08:00
|
|
|
if (hw->layer_info) {
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
|
2018-08-09 21:29:45 +08:00
|
|
|
hw->layer_info = NULL;
|
|
|
|
}
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
|
2019-11-08 22:23:30 +08:00
|
|
|
ice_sched_clear_port(hw->port_info);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
|
|
|
|
hw->num_tx_sched_layers = 0;
|
|
|
|
hw->num_tx_sched_phys_layers = 0;
|
|
|
|
hw->flattened_layers = 0;
|
|
|
|
hw->max_cgds = 0;
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
/**
|
2019-02-20 07:04:13 +08:00
|
|
|
* ice_sched_add_elems - add nodes to HW and SW DB
|
2018-03-20 22:58:17 +08:00
|
|
|
* @pi: port information structure
|
|
|
|
* @tc_node: pointer to the branch node
|
|
|
|
* @parent: pointer to the parent node
|
|
|
|
* @layer: layer number to add nodes
|
|
|
|
* @num_nodes: number of nodes
|
|
|
|
* @num_nodes_added: pointer to num nodes added
|
2019-02-20 07:04:13 +08:00
|
|
|
* @first_node_teid: if new nodes are added then return the TEID of first node
|
2018-03-20 22:58:17 +08:00
|
|
|
*
|
2019-02-20 07:04:13 +08:00
|
|
|
* This function add nodes to HW as well as to SW DB for a given layer
|
2018-03-20 22:58:17 +08:00
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
|
|
|
|
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
|
|
|
|
u16 *num_nodes_added, u32 *first_node_teid)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *prev, *new_node;
|
|
|
|
struct ice_aqc_add_elem *buf;
|
|
|
|
u16 i, num_groups_added = 0;
|
|
|
|
enum ice_status status = 0;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
2019-03-30 07:38:47 +08:00
|
|
|
size_t buf_size;
|
2018-03-20 22:58:17 +08:00
|
|
|
u32 teid;
|
|
|
|
|
2020-06-30 08:27:46 +08:00
|
|
|
buf_size = struct_size(buf, generic, num_nodes);
|
2018-03-20 22:58:17 +08:00
|
|
|
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
|
|
|
|
buf->hdr.parent_teid = parent->info.node_teid;
|
|
|
|
buf->hdr.num_elems = cpu_to_le16(num_nodes);
|
|
|
|
for (i = 0; i < num_nodes; i++) {
|
|
|
|
buf->generic[i].parent_teid = parent->info.node_teid;
|
|
|
|
buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
|
|
|
|
buf->generic[i].data.valid_sections =
|
|
|
|
ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
|
|
|
|
ICE_AQC_ELEM_VALID_EIR;
|
|
|
|
buf->generic[i].data.generic = 0;
|
|
|
|
buf->generic[i].data.cir_bw.bw_profile_idx =
|
2018-08-09 21:29:45 +08:00
|
|
|
cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
|
|
|
|
buf->generic[i].data.cir_bw.bw_alloc =
|
|
|
|
cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
|
2018-03-20 22:58:17 +08:00
|
|
|
buf->generic[i].data.eir_bw.bw_profile_idx =
|
2018-08-09 21:29:45 +08:00
|
|
|
cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
|
|
|
|
buf->generic[i].data.eir_bw.bw_alloc =
|
|
|
|
cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
|
|
|
|
&num_groups_added, NULL);
|
|
|
|
if (status || num_groups_added != 1) {
|
2019-02-27 08:35:13 +08:00
|
|
|
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
|
|
|
|
hw->adminq.sq_last_status);
|
2018-03-20 22:58:17 +08:00
|
|
|
devm_kfree(ice_hw_to_dev(hw), buf);
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
}
|
|
|
|
|
|
|
|
*num_nodes_added = num_nodes;
|
|
|
|
/* add nodes to the SW DB */
|
|
|
|
for (i = 0; i < num_nodes; i++) {
|
|
|
|
status = ice_sched_add_node(pi, layer, &buf->generic[i]);
|
|
|
|
if (status) {
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED,
|
|
|
|
"add nodes in SW DB failed status =%d\n",
|
|
|
|
status);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
teid = le32_to_cpu(buf->generic[i].node_teid);
|
|
|
|
new_node = ice_sched_find_node_by_teid(parent, teid);
|
|
|
|
if (!new_node) {
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED,
|
|
|
|
"Node is missing for teid =%d\n", teid);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_node->sibling = NULL;
|
|
|
|
new_node->tc_num = tc_node->tc_num;
|
|
|
|
|
|
|
|
/* add it to previous node sibling pointer */
|
|
|
|
/* Note: siblings are not linked across branches */
|
2019-07-25 17:53:54 +08:00
|
|
|
prev = ice_sched_get_first_node(pi, tc_node, layer);
|
2018-03-20 22:58:17 +08:00
|
|
|
if (prev && prev != new_node) {
|
|
|
|
while (prev->sibling)
|
|
|
|
prev = prev->sibling;
|
|
|
|
prev->sibling = new_node;
|
|
|
|
}
|
|
|
|
|
2019-07-25 17:53:54 +08:00
|
|
|
/* initialize the sibling head */
|
|
|
|
if (!pi->sib_head[tc_node->tc_num][layer])
|
|
|
|
pi->sib_head[tc_node->tc_num][layer] = new_node;
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
if (i == 0)
|
|
|
|
*first_node_teid = teid;
|
|
|
|
}
|
|
|
|
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), buf);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_add_nodes_to_layer - Add nodes to a given layer
|
|
|
|
* @pi: port information structure
|
|
|
|
* @tc_node: pointer to TC node
|
|
|
|
* @parent: pointer to parent node
|
|
|
|
* @layer: layer number to add nodes
|
|
|
|
* @num_nodes: number of nodes to be added
|
2019-02-20 07:04:13 +08:00
|
|
|
* @first_node_teid: pointer to the first node TEID
|
2018-03-20 22:58:17 +08:00
|
|
|
* @num_nodes_added: pointer to number of nodes added
|
|
|
|
*
|
|
|
|
* This function add nodes to a given layer.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
|
|
|
|
struct ice_sched_node *tc_node,
|
|
|
|
struct ice_sched_node *parent, u8 layer,
|
|
|
|
u16 num_nodes, u32 *first_node_teid,
|
|
|
|
u16 *num_nodes_added)
|
|
|
|
{
|
|
|
|
u32 *first_teid_ptr = first_node_teid;
|
|
|
|
u16 new_num_nodes, max_child_nodes;
|
|
|
|
enum ice_status status = 0;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
u16 num_added = 0;
|
|
|
|
u32 temp;
|
|
|
|
|
2018-04-11 01:49:49 +08:00
|
|
|
*num_nodes_added = 0;
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
if (!num_nodes)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
if (!parent || layer < hw->sw_entry_point_layer)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
|
|
|
|
/* max children per node per layer */
|
2018-08-09 21:29:45 +08:00
|
|
|
max_child_nodes = hw->max_children[parent->tx_sched_layer];
|
2018-03-20 22:58:17 +08:00
|
|
|
|
|
|
|
/* current number of children + required nodes exceed max children ? */
|
|
|
|
if ((parent->num_children + num_nodes) > max_child_nodes) {
|
|
|
|
/* Fail if the parent is a TC node */
|
|
|
|
if (parent == tc_node)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
|
|
|
|
/* utilize all the spaces if the parent is not full */
|
|
|
|
if (parent->num_children < max_child_nodes) {
|
|
|
|
new_num_nodes = max_child_nodes - parent->num_children;
|
|
|
|
/* this recursion is intentional, and wouldn't
|
|
|
|
* go more than 2 calls
|
|
|
|
*/
|
|
|
|
status = ice_sched_add_nodes_to_layer(pi, tc_node,
|
|
|
|
parent, layer,
|
|
|
|
new_num_nodes,
|
|
|
|
first_node_teid,
|
|
|
|
&num_added);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
*num_nodes_added += num_added;
|
|
|
|
}
|
2019-02-20 07:04:13 +08:00
|
|
|
/* Don't modify the first node TEID memory if the first node was
|
2018-03-20 22:58:17 +08:00
|
|
|
* added already in the above call. Instead send some temp
|
|
|
|
* memory for all other recursive calls.
|
|
|
|
*/
|
|
|
|
if (num_added)
|
|
|
|
first_teid_ptr = &temp;
|
|
|
|
|
|
|
|
new_num_nodes = num_nodes - num_added;
|
|
|
|
|
|
|
|
/* This parent is full, try the next sibling */
|
|
|
|
parent = parent->sibling;
|
|
|
|
|
|
|
|
/* this recursion is intentional, for 1024 queues
|
|
|
|
* per VSI, it goes max of 16 iterations.
|
|
|
|
* 1024 / 8 = 128 layer 8 nodes
|
|
|
|
* 128 /8 = 16 (add 8 nodes per iteration)
|
|
|
|
*/
|
|
|
|
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
|
|
|
|
layer, new_num_nodes,
|
|
|
|
first_teid_ptr,
|
|
|
|
&num_added);
|
|
|
|
*num_nodes_added += num_added;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
|
|
|
|
num_nodes_added, first_node_teid);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:13 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_get_qgrp_layer - get the current queue group layer number
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:13 +08:00
|
|
|
*
|
|
|
|
* This function returns the current queue group layer number
|
|
|
|
*/
|
|
|
|
static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
|
|
|
|
{
|
|
|
|
/* It's always total layers - 1, the array is 0 relative so -2 */
|
|
|
|
return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_get_vsi_layer - get the current VSI layer number
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
*
|
|
|
|
* This function returns the current VSI layer number
|
|
|
|
*/
|
|
|
|
static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
|
|
|
|
{
|
|
|
|
/* Num Layers VSI layer
|
|
|
|
* 9 6
|
|
|
|
* 7 4
|
|
|
|
* 5 or less sw_entry_point_layer
|
|
|
|
*/
|
2019-02-20 07:04:13 +08:00
|
|
|
/* calculate the VSI layer based on number of layers. */
|
2018-03-20 22:58:17 +08:00
|
|
|
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
|
|
|
|
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
|
|
|
|
|
|
|
|
if (layer > hw->sw_entry_point_layer)
|
|
|
|
return layer;
|
|
|
|
}
|
|
|
|
return hw->sw_entry_point_layer;
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:09 +08:00
|
|
|
/**
|
|
|
|
* ice_rm_dflt_leaf_node - remove the default leaf node in the tree
|
|
|
|
* @pi: port information structure
|
|
|
|
*
|
|
|
|
* This function removes the leaf node that was created by the FW
|
|
|
|
* during initialization
|
|
|
|
*/
|
2018-10-27 02:44:45 +08:00
|
|
|
static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
|
2018-03-20 22:58:09 +08:00
|
|
|
{
|
|
|
|
struct ice_sched_node *node;
|
|
|
|
|
|
|
|
node = pi->root;
|
|
|
|
while (node) {
|
|
|
|
if (!node->num_children)
|
|
|
|
break;
|
|
|
|
node = node->children[0];
|
|
|
|
}
|
|
|
|
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
|
|
|
|
u32 teid = le32_to_cpu(node->info.node_teid);
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
/* remove the default leaf node */
|
|
|
|
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
|
|
|
|
if (!status)
|
|
|
|
ice_free_sched_node(pi, node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_rm_dflt_nodes - free the default nodes in the tree
|
|
|
|
* @pi: port information structure
|
|
|
|
*
|
|
|
|
* This function frees all the nodes except root and TC that were created by
|
|
|
|
* the FW during initialization
|
|
|
|
*/
|
2018-10-27 02:44:45 +08:00
|
|
|
static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
|
2018-03-20 22:58:09 +08:00
|
|
|
{
|
|
|
|
struct ice_sched_node *node;
|
|
|
|
|
|
|
|
ice_rm_dflt_leaf_node(pi);
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2018-03-20 22:58:09 +08:00
|
|
|
/* remove the default nodes except TC and root nodes */
|
|
|
|
node = pi->root;
|
|
|
|
while (node) {
|
|
|
|
if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
|
|
|
|
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
|
|
|
|
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
|
|
|
|
ice_free_sched_node(pi, node);
|
|
|
|
break;
|
|
|
|
}
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2018-03-20 22:58:09 +08:00
|
|
|
if (!node->num_children)
|
|
|
|
break;
|
|
|
|
node = node->children[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_init_port - Initialize scheduler by querying information from FW
|
|
|
|
* @pi: port info structure for the tree to cleanup
|
|
|
|
*
|
|
|
|
* This function is the initial call to find the total number of Tx scheduler
|
|
|
|
* resources, default topology created by firmware and storing the information
|
|
|
|
* in SW DB.
|
|
|
|
*/
|
|
|
|
enum ice_status ice_sched_init_port(struct ice_port_info *pi)
|
|
|
|
{
|
|
|
|
struct ice_aqc_get_topo_elem *buf;
|
|
|
|
enum ice_status status;
|
|
|
|
struct ice_hw *hw;
|
|
|
|
u8 num_branches;
|
|
|
|
u16 num_elems;
|
|
|
|
u8 i, j;
|
|
|
|
|
|
|
|
if (!pi)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
hw = pi->hw;
|
|
|
|
|
|
|
|
/* Query the Default Topology from FW */
|
2018-08-09 21:29:45 +08:00
|
|
|
buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
|
2018-03-20 22:58:09 +08:00
|
|
|
if (!buf)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
|
|
|
|
/* Query default scheduling tree topology */
|
2018-08-09 21:29:45 +08:00
|
|
|
status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
|
2018-03-20 22:58:09 +08:00
|
|
|
&num_branches, NULL);
|
|
|
|
if (status)
|
|
|
|
goto err_init_port;
|
|
|
|
|
|
|
|
/* num_branches should be between 1-8 */
|
|
|
|
if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
|
|
|
|
num_branches);
|
|
|
|
status = ICE_ERR_PARAM;
|
|
|
|
goto err_init_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get the number of elements on the default/first branch */
|
|
|
|
num_elems = le16_to_cpu(buf[0].hdr.num_elems);
|
|
|
|
|
|
|
|
/* num_elems should always be between 1-9 */
|
|
|
|
if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
|
|
|
|
num_elems);
|
|
|
|
status = ICE_ERR_PARAM;
|
|
|
|
goto err_init_port;
|
|
|
|
}
|
|
|
|
|
2019-02-20 07:04:13 +08:00
|
|
|
/* If the last node is a leaf node then the index of the queue group
|
2018-03-20 22:58:09 +08:00
|
|
|
* layer is two less than the number of elements.
|
|
|
|
*/
|
|
|
|
if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
|
|
|
|
ICE_AQC_ELEM_TYPE_LEAF)
|
|
|
|
pi->last_node_teid =
|
|
|
|
le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
|
|
|
|
else
|
|
|
|
pi->last_node_teid =
|
|
|
|
le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
|
|
|
|
|
|
|
|
/* Insert the Tx Sched root node */
|
|
|
|
status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
|
|
|
|
if (status)
|
|
|
|
goto err_init_port;
|
|
|
|
|
|
|
|
/* Parse the default tree and cache the information */
|
|
|
|
for (i = 0; i < num_branches; i++) {
|
|
|
|
num_elems = le16_to_cpu(buf[i].hdr.num_elems);
|
|
|
|
|
|
|
|
/* Skip root element as already inserted */
|
|
|
|
for (j = 1; j < num_elems; j++) {
|
|
|
|
/* update the sw entry point */
|
|
|
|
if (buf[0].generic[j].data.elem_type ==
|
|
|
|
ICE_AQC_ELEM_TYPE_ENTRY_POINT)
|
|
|
|
hw->sw_entry_point_layer = j;
|
|
|
|
|
|
|
|
status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
|
|
|
|
if (status)
|
|
|
|
goto err_init_port;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the default nodes. */
|
|
|
|
if (pi->root)
|
|
|
|
ice_sched_rm_dflt_nodes(pi);
|
|
|
|
|
|
|
|
/* initialize the port for handling the scheduler tree */
|
|
|
|
pi->port_state = ICE_SCHED_PORT_STATE_READY;
|
|
|
|
mutex_init(&pi->sched_lock);
|
2019-11-06 18:05:28 +08:00
|
|
|
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
|
|
|
|
INIT_LIST_HEAD(&pi->rl_prof_list[i]);
|
2018-03-20 22:58:09 +08:00
|
|
|
|
|
|
|
err_init_port:
|
|
|
|
if (status && pi->root) {
|
|
|
|
ice_free_sched_node(pi, pi->root);
|
|
|
|
pi->root = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), buf);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_query_res_alloc - query the FW for num of logical sched layers
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
*
|
|
|
|
* query FW for allocated scheduler resources and store in HW struct
|
|
|
|
*/
|
|
|
|
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
|
|
|
|
{
|
|
|
|
struct ice_aqc_query_txsched_res_resp *buf;
|
|
|
|
enum ice_status status = 0;
|
2018-08-09 21:29:45 +08:00
|
|
|
__le16 max_sibl;
|
2019-11-01 22:00:17 +08:00
|
|
|
u16 i;
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
|
|
|
|
if (hw->layer_info)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return ICE_ERR_NO_MEMORY;
|
|
|
|
|
|
|
|
status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
|
|
|
|
if (status)
|
|
|
|
goto sched_query_out;
|
|
|
|
|
|
|
|
hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
|
|
|
|
hw->num_tx_sched_phys_layers =
|
|
|
|
le16_to_cpu(buf->sched_props.phys_levels);
|
|
|
|
hw->flattened_layers = buf->sched_props.flattening_bitmap;
|
|
|
|
hw->max_cgds = buf->sched_props.max_pf_cgds;
|
|
|
|
|
2018-08-09 21:29:45 +08:00
|
|
|
/* max sibling group size of current layer refers to the max children
|
|
|
|
* of the below layer node.
|
|
|
|
* layer 1 node max children will be layer 2 max sibling group size
|
|
|
|
* layer 2 node max children will be layer 3 max sibling group size
|
|
|
|
* and so on. This array will be populated from root (index 0) to
|
|
|
|
* qgroup layer 7. Leaf node has no children.
|
|
|
|
*/
|
2019-11-06 18:05:28 +08:00
|
|
|
for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
|
|
|
|
max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
|
2018-08-09 21:29:45 +08:00
|
|
|
hw->max_children[i] = le16_to_cpu(max_sibl);
|
|
|
|
}
|
|
|
|
|
2019-02-09 04:50:31 +08:00
|
|
|
hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
|
|
|
|
(hw->num_tx_sched_layers *
|
|
|
|
sizeof(*hw->layer_info)),
|
|
|
|
GFP_KERNEL);
|
ice: Get switch config, scheduler config and device capabilities
This patch adds to the initialization flow by getting switch
configuration, scheduler configuration and device capabilities.
Switch configuration:
On boot, an L2 switch element is created in the firmware per physical
function. Each physical function is also mapped to a port, to which its
switch element is connected. In other words, this switch can be visualized
as an embedded vSwitch that can connect a physical function's virtual
station interfaces (VSIs) to the egress/ingress port. Egress/ingress
filters will be eventually created and applied on this switch element.
As part of the initialization flow, the driver gets configuration data
from this switch element and stores it.
Scheduler configuration:
The Tx scheduler is a subsystem responsible for setting and enforcing QoS.
As part of the initialization flow, the driver queries and stores the
default scheduler configuration for the given physical function.
Device capabilities:
As part of initialization, the driver has to determine what the device is
capable of (ex. max queues, VSIs, etc). This information is obtained from
the firmware and stored by the driver.
CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@oracle.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 22:58:08 +08:00
|
|
|
if (!hw->layer_info) {
|
|
|
|
status = ICE_ERR_NO_MEMORY;
|
|
|
|
goto sched_query_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
sched_query_out:
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), buf);
|
|
|
|
return status;
|
|
|
|
}
|
2018-03-20 22:58:13 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:13 +08:00
|
|
|
* @base: pointer to the base node
|
|
|
|
* @node: pointer to the node to search
|
|
|
|
*
|
|
|
|
* This function checks whether a given node is part of the base node
|
|
|
|
* subtree or not
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
|
|
|
|
struct ice_sched_node *node)
|
|
|
|
{
|
|
|
|
u8 i;
|
|
|
|
|
|
|
|
for (i = 0; i < base->num_children; i++) {
|
|
|
|
struct ice_sched_node *child = base->children[i];
|
|
|
|
|
|
|
|
if (node == child)
|
|
|
|
return true;
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2018-03-20 22:58:13 +08:00
|
|
|
if (child->tx_sched_layer > node->tx_sched_layer)
|
|
|
|
return false;
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2018-03-20 22:58:13 +08:00
|
|
|
/* this recursion is intentional, and wouldn't
|
|
|
|
* go more than 8 calls
|
|
|
|
*/
|
|
|
|
if (ice_sched_find_node_in_subtree(hw, child, node))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-07-14 04:53:12 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
|
|
|
|
* @pi: port information structure
|
|
|
|
* @vsi_node: software VSI handle
|
|
|
|
* @qgrp_node: first queue group node identified for scanning
|
|
|
|
* @owner: LAN or RDMA
|
|
|
|
*
|
|
|
|
* This function retrieves a free LAN or RDMA queue group node by scanning
|
|
|
|
* qgrp_node and its siblings for the queue group with the fewest number
|
|
|
|
* of queues currently assigned.
|
|
|
|
*/
|
|
|
|
static struct ice_sched_node *
|
|
|
|
ice_sched_get_free_qgrp(struct ice_port_info *pi,
|
|
|
|
struct ice_sched_node *vsi_node,
|
|
|
|
struct ice_sched_node *qgrp_node, u8 owner)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *min_qgrp;
|
|
|
|
u8 min_children;
|
|
|
|
|
|
|
|
if (!qgrp_node)
|
|
|
|
return qgrp_node;
|
|
|
|
min_children = qgrp_node->num_children;
|
|
|
|
if (!min_children)
|
|
|
|
return qgrp_node;
|
|
|
|
min_qgrp = qgrp_node;
|
|
|
|
/* scan all queue groups until find a node which has less than the
|
|
|
|
* minimum number of children. This way all queue group nodes get
|
|
|
|
* equal number of shares and active. The bandwidth will be equally
|
|
|
|
* distributed across all queues.
|
|
|
|
*/
|
|
|
|
while (qgrp_node) {
|
|
|
|
/* make sure the qgroup node is part of the VSI subtree */
|
|
|
|
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
|
|
|
|
if (qgrp_node->num_children < min_children &&
|
|
|
|
qgrp_node->owner == owner) {
|
|
|
|
/* replace the new min queue group node */
|
|
|
|
min_qgrp = qgrp_node;
|
|
|
|
min_children = min_qgrp->num_children;
|
|
|
|
/* break if it has no children, */
|
|
|
|
if (!min_children)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
qgrp_node = qgrp_node->sibling;
|
|
|
|
}
|
|
|
|
return min_qgrp;
|
|
|
|
}
|
|
|
|
|
2018-03-20 22:58:13 +08:00
|
|
|
/**
|
2019-02-20 07:04:13 +08:00
|
|
|
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
|
2018-03-20 22:58:13 +08:00
|
|
|
* @pi: port information structure
|
2018-09-20 08:23:13 +08:00
|
|
|
* @vsi_handle: software VSI handle
|
2018-03-20 22:58:13 +08:00
|
|
|
* @tc: branch number
|
2019-02-20 07:04:13 +08:00
|
|
|
* @owner: LAN or RDMA
|
2018-03-20 22:58:13 +08:00
|
|
|
*
|
2019-02-20 07:04:13 +08:00
|
|
|
* This function retrieves a free LAN or RDMA queue group node
|
2018-03-20 22:58:13 +08:00
|
|
|
*/
|
|
|
|
struct ice_sched_node *
|
2018-09-20 08:23:13 +08:00
|
|
|
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
2018-03-20 22:58:13 +08:00
|
|
|
u8 owner)
|
|
|
|
{
|
2020-07-14 04:53:12 +08:00
|
|
|
struct ice_sched_node *vsi_node, *qgrp_node;
|
2018-09-20 08:23:13 +08:00
|
|
|
struct ice_vsi_ctx *vsi_ctx;
|
2018-03-20 22:58:13 +08:00
|
|
|
u16 max_children;
|
|
|
|
u8 qgrp_layer;
|
|
|
|
|
|
|
|
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
|
2018-08-09 21:29:45 +08:00
|
|
|
max_children = pi->hw->max_children[qgrp_layer];
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2018-09-20 08:23:13 +08:00
|
|
|
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
|
|
|
|
if (!vsi_ctx)
|
|
|
|
return NULL;
|
|
|
|
vsi_node = vsi_ctx->sched.vsi_node[tc];
|
2019-02-20 07:04:13 +08:00
|
|
|
/* validate invalid VSI ID */
|
2018-03-20 22:58:13 +08:00
|
|
|
if (!vsi_node)
|
2020-07-14 04:53:12 +08:00
|
|
|
return NULL;
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2019-02-20 07:04:13 +08:00
|
|
|
/* get the first queue group node from VSI sub-tree */
|
2019-07-25 17:53:54 +08:00
|
|
|
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
|
2018-03-20 22:58:13 +08:00
|
|
|
while (qgrp_node) {
|
|
|
|
/* make sure the qgroup node is part of the VSI subtree */
|
|
|
|
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
|
|
|
|
if (qgrp_node->num_children < max_children &&
|
|
|
|
qgrp_node->owner == owner)
|
|
|
|
break;
|
|
|
|
qgrp_node = qgrp_node->sibling;
|
|
|
|
}
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2020-07-14 04:53:12 +08:00
|
|
|
/* Select the best queue group */
|
|
|
|
return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
|
2018-03-20 22:58:13 +08:00
|
|
|
}
|
2018-03-20 22:58:17 +08:00
|
|
|
|
|
|
|
/**
|
2019-02-20 07:04:13 +08:00
|
|
|
* ice_sched_get_vsi_node - Get a VSI node based on VSI ID
|
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
* @tc_node: pointer to the TC node
|
2018-09-20 08:23:13 +08:00
|
|
|
* @vsi_handle: software VSI handle
|
2018-03-20 22:58:17 +08:00
|
|
|
*
|
2019-02-20 07:04:13 +08:00
|
|
|
* This function retrieves a VSI node for a given VSI ID from a given
|
2018-03-20 22:58:17 +08:00
|
|
|
* TC branch
|
|
|
|
*/
|
|
|
|
static struct ice_sched_node *
|
|
|
|
ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
|
2018-09-20 08:23:13 +08:00
|
|
|
u16 vsi_handle)
|
2018-03-20 22:58:17 +08:00
|
|
|
{
|
|
|
|
struct ice_sched_node *node;
|
|
|
|
u8 vsi_layer;
|
|
|
|
|
|
|
|
vsi_layer = ice_sched_get_vsi_layer(hw);
|
2019-07-25 17:53:54 +08:00
|
|
|
node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
|
2018-03-20 22:58:17 +08:00
|
|
|
|
|
|
|
/* Check whether it already exists */
|
|
|
|
while (node) {
|
2018-09-20 08:23:13 +08:00
|
|
|
if (node->vsi_handle == vsi_handle)
|
2018-03-20 22:58:17 +08:00
|
|
|
return node;
|
|
|
|
node = node->sibling;
|
|
|
|
}
|
|
|
|
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
* @num_qs: number of queues
|
|
|
|
* @num_nodes: num nodes array
|
|
|
|
*
|
|
|
|
* This function calculates the number of VSI child nodes based on the
|
|
|
|
* number of queues.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
|
|
|
|
{
|
|
|
|
u16 num = num_qs;
|
|
|
|
u8 i, qgl, vsil;
|
|
|
|
|
|
|
|
qgl = ice_sched_get_qgrp_layer(hw);
|
|
|
|
vsil = ice_sched_get_vsi_layer(hw);
|
|
|
|
|
2019-02-20 07:04:13 +08:00
|
|
|
/* calculate num nodes from queue group to VSI layer */
|
2018-03-20 22:58:17 +08:00
|
|
|
for (i = qgl; i > vsil; i--) {
|
|
|
|
/* round to the next integer if there is a remainder */
|
2018-08-09 21:29:45 +08:00
|
|
|
num = DIV_ROUND_UP(num, hw->max_children[i]);
|
2018-03-20 22:58:17 +08:00
|
|
|
|
|
|
|
/* need at least one node */
|
|
|
|
num_nodes[i] = num ? num : 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
|
|
|
|
* @pi: port information structure
|
2018-09-20 08:23:13 +08:00
|
|
|
* @vsi_handle: software VSI handle
|
2018-03-20 22:58:17 +08:00
|
|
|
* @tc_node: pointer to the TC node
|
|
|
|
* @num_nodes: pointer to the num nodes that needs to be added per layer
|
2019-02-20 07:04:13 +08:00
|
|
|
* @owner: node owner (LAN or RDMA)
|
2018-03-20 22:58:17 +08:00
|
|
|
*
|
|
|
|
* This function adds the VSI child nodes to tree. It gets called for
|
2019-02-20 07:04:13 +08:00
|
|
|
* LAN and RDMA separately.
|
2018-03-20 22:58:17 +08:00
|
|
|
*/
|
|
|
|
static enum ice_status
|
2018-09-20 08:23:13 +08:00
|
|
|
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
2018-03-20 22:58:17 +08:00
|
|
|
struct ice_sched_node *tc_node, u16 *num_nodes,
|
|
|
|
u8 owner)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *parent, *node;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
enum ice_status status;
|
|
|
|
u32 first_node_teid;
|
|
|
|
u16 num_added = 0;
|
|
|
|
u8 i, qgl, vsil;
|
|
|
|
|
|
|
|
qgl = ice_sched_get_qgrp_layer(hw);
|
|
|
|
vsil = ice_sched_get_vsi_layer(hw);
|
2018-09-20 08:23:13 +08:00
|
|
|
parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
|
2018-03-20 22:58:17 +08:00
|
|
|
for (i = vsil + 1; i <= qgl; i++) {
|
|
|
|
if (!parent)
|
|
|
|
return ICE_ERR_CFG;
|
2018-08-09 21:29:45 +08:00
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
|
|
|
|
num_nodes[i],
|
|
|
|
&first_node_teid,
|
|
|
|
&num_added);
|
|
|
|
if (status || num_nodes[i] != num_added)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
|
|
|
|
/* The newly added node can be a new parent for the next
|
|
|
|
* layer nodes
|
|
|
|
*/
|
|
|
|
if (num_added) {
|
|
|
|
parent = ice_sched_find_node_by_teid(tc_node,
|
|
|
|
first_node_teid);
|
|
|
|
node = parent;
|
|
|
|
while (node) {
|
|
|
|
node->owner = owner;
|
|
|
|
node = node->sibling;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
parent = parent->children[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
|
2019-02-20 07:04:13 +08:00
|
|
|
* @hw: pointer to the HW struct
|
2018-03-20 22:58:17 +08:00
|
|
|
* @tc_node: pointer to TC node
|
|
|
|
* @num_nodes: pointer to num nodes array
|
|
|
|
*
|
|
|
|
* This function calculates the number of supported nodes needed to add this
|
2018-10-27 02:44:47 +08:00
|
|
|
* VSI into Tx tree including the VSI, parent and intermediate nodes in below
|
2018-03-20 22:58:17 +08:00
|
|
|
* layers
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
|
|
|
|
struct ice_sched_node *tc_node, u16 *num_nodes)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *node;
|
2018-08-09 21:29:45 +08:00
|
|
|
u8 vsil;
|
|
|
|
int i;
|
2018-03-20 22:58:17 +08:00
|
|
|
|
|
|
|
vsil = ice_sched_get_vsi_layer(hw);
|
|
|
|
for (i = vsil; i >= hw->sw_entry_point_layer; i--)
|
|
|
|
/* Add intermediate nodes if TC has no children and
|
|
|
|
* need at least one node for VSI
|
|
|
|
*/
|
|
|
|
if (!tc_node->num_children || i == vsil) {
|
|
|
|
num_nodes[i]++;
|
|
|
|
} else {
|
|
|
|
/* If intermediate nodes are reached max children
|
|
|
|
* then add a new one.
|
|
|
|
*/
|
2019-07-25 17:53:54 +08:00
|
|
|
node = ice_sched_get_first_node(hw->port_info, tc_node,
|
|
|
|
(u8)i);
|
2018-03-20 22:58:17 +08:00
|
|
|
/* scan all the siblings */
|
|
|
|
while (node) {
|
2018-08-09 21:29:45 +08:00
|
|
|
if (node->num_children < hw->max_children[i])
|
2018-03-20 22:58:17 +08:00
|
|
|
break;
|
|
|
|
node = node->sibling;
|
|
|
|
}
|
|
|
|
|
2019-02-09 04:50:30 +08:00
|
|
|
/* tree has one intermediate node to add this new VSI.
|
|
|
|
* So no need to calculate supported nodes for below
|
|
|
|
* layers.
|
|
|
|
*/
|
|
|
|
if (node)
|
|
|
|
break;
|
2018-03-20 22:58:17 +08:00
|
|
|
/* all the nodes are full, allocate a new one */
|
2019-02-09 04:50:30 +08:00
|
|
|
num_nodes[i]++;
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-10-27 02:44:47 +08:00
|
|
|
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
|
2018-03-20 22:58:17 +08:00
|
|
|
* @pi: port information structure
|
2018-09-20 08:23:13 +08:00
|
|
|
* @vsi_handle: software VSI handle
|
2018-03-20 22:58:17 +08:00
|
|
|
* @tc_node: pointer to TC node
|
|
|
|
* @num_nodes: pointer to num nodes array
|
|
|
|
*
|
2018-10-27 02:44:47 +08:00
|
|
|
* This function adds the VSI supported nodes into Tx tree including the
|
2018-03-20 22:58:17 +08:00
|
|
|
* VSI, its parent and intermediate nodes in below layers
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
2018-09-20 08:23:13 +08:00
|
|
|
ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
2018-03-20 22:58:17 +08:00
|
|
|
struct ice_sched_node *tc_node, u16 *num_nodes)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *parent = tc_node;
|
|
|
|
enum ice_status status;
|
|
|
|
u32 first_node_teid;
|
|
|
|
u16 num_added = 0;
|
|
|
|
u8 i, vsil;
|
|
|
|
|
|
|
|
if (!pi)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
|
|
|
|
vsil = ice_sched_get_vsi_layer(pi->hw);
|
|
|
|
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
|
|
|
|
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
|
|
|
|
i, num_nodes[i],
|
|
|
|
&first_node_teid,
|
|
|
|
&num_added);
|
|
|
|
if (status || num_nodes[i] != num_added)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
|
|
|
|
/* The newly added node can be a new parent for the next
|
|
|
|
* layer nodes
|
|
|
|
*/
|
|
|
|
if (num_added)
|
|
|
|
parent = ice_sched_find_node_by_teid(tc_node,
|
|
|
|
first_node_teid);
|
|
|
|
else
|
|
|
|
parent = parent->children[0];
|
|
|
|
|
|
|
|
if (!parent)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
|
|
|
|
if (i == vsil)
|
2018-09-20 08:23:13 +08:00
|
|
|
parent->vsi_handle = vsi_handle;
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
2018-08-09 21:29:45 +08:00
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_add_vsi_to_topo - add a new VSI into tree
|
|
|
|
* @pi: port information structure
|
2018-09-20 08:23:13 +08:00
|
|
|
* @vsi_handle: software VSI handle
|
2018-03-20 22:58:17 +08:00
|
|
|
* @tc: TC number
|
|
|
|
*
|
|
|
|
* This function adds a new VSI into scheduler tree
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
2018-09-20 08:23:13 +08:00
|
|
|
ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
|
2018-03-20 22:58:17 +08:00
|
|
|
{
|
|
|
|
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
|
|
|
|
struct ice_sched_node *tc_node;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
|
|
|
|
tc_node = ice_sched_get_tc_node(pi, tc);
|
|
|
|
if (!tc_node)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
|
|
|
|
/* calculate number of supported nodes needed for this VSI */
|
|
|
|
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
|
|
|
|
|
2019-02-20 07:04:13 +08:00
|
|
|
/* add VSI supported nodes to TC subtree */
|
2018-09-20 08:23:13 +08:00
|
|
|
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
|
|
|
|
num_nodes);
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_update_vsi_child_nodes - update VSI child nodes
|
|
|
|
* @pi: port information structure
|
2018-09-20 08:23:13 +08:00
|
|
|
* @vsi_handle: software VSI handle
|
2018-03-20 22:58:17 +08:00
|
|
|
* @tc: TC number
|
|
|
|
* @new_numqs: new number of max queues
|
|
|
|
* @owner: owner of this subtree
|
|
|
|
*
|
|
|
|
* This function updates the VSI child nodes based on the number of queues
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
2018-09-20 08:23:13 +08:00
|
|
|
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
|
|
|
|
u8 tc, u16 new_numqs, u8 owner)
|
2018-03-20 22:58:17 +08:00
|
|
|
{
|
|
|
|
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
|
|
|
|
struct ice_sched_node *vsi_node;
|
|
|
|
struct ice_sched_node *tc_node;
|
2018-09-20 08:23:13 +08:00
|
|
|
struct ice_vsi_ctx *vsi_ctx;
|
2018-03-20 22:58:17 +08:00
|
|
|
enum ice_status status = 0;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
u16 prev_numqs;
|
|
|
|
|
|
|
|
tc_node = ice_sched_get_tc_node(pi, tc);
|
|
|
|
if (!tc_node)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
|
2018-09-20 08:23:13 +08:00
|
|
|
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
|
2018-03-20 22:58:17 +08:00
|
|
|
if (!vsi_node)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
|
2018-09-20 08:23:13 +08:00
|
|
|
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
|
|
|
|
if (!vsi_ctx)
|
|
|
|
return ICE_ERR_PARAM;
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2019-03-01 07:25:48 +08:00
|
|
|
prev_numqs = vsi_ctx->sched.max_lanq[tc];
|
2019-02-27 08:35:20 +08:00
|
|
|
/* num queues are not changed or less than the previous number */
|
|
|
|
if (new_numqs <= prev_numqs)
|
2018-03-20 22:58:17 +08:00
|
|
|
return status;
|
2019-03-01 07:25:48 +08:00
|
|
|
status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
2018-03-20 22:58:17 +08:00
|
|
|
if (new_numqs)
|
|
|
|
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
|
2019-02-27 08:35:20 +08:00
|
|
|
/* Keep the max number of queue configuration all the time. Update the
|
|
|
|
* tree only if number of queues > previous number of queues. This may
|
|
|
|
* leave some extra nodes in the tree if number of queues < previous
|
|
|
|
* number but that wouldn't harm anything. Removing those extra nodes
|
|
|
|
* may complicate the code if those nodes are part of SRL or
|
|
|
|
* individually rate limited.
|
|
|
|
*/
|
|
|
|
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
|
|
|
|
new_num_nodes, owner);
|
|
|
|
if (status)
|
|
|
|
return status;
|
2018-09-20 08:23:13 +08:00
|
|
|
vsi_ctx->sched.max_lanq[tc] = new_numqs;
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2019-02-27 08:35:07 +08:00
|
|
|
return 0;
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-10-27 01:41:02 +08:00
|
|
|
* ice_sched_cfg_vsi - configure the new/existing VSI
|
2018-03-20 22:58:17 +08:00
|
|
|
* @pi: port information structure
|
2018-09-20 08:23:13 +08:00
|
|
|
* @vsi_handle: software VSI handle
|
2018-03-20 22:58:17 +08:00
|
|
|
* @tc: TC number
|
|
|
|
* @maxqs: max number of queues
|
2019-02-20 07:04:13 +08:00
|
|
|
* @owner: LAN or RDMA
|
2018-03-20 22:58:17 +08:00
|
|
|
* @enable: TC enabled or disabled
|
|
|
|
*
|
|
|
|
* This function adds/updates VSI nodes based on the number of queues. If TC is
|
|
|
|
* enabled and VSI is in suspended state then resume the VSI back. If TC is
|
|
|
|
* disabled then suspend the VSI if it is not already.
|
|
|
|
*/
|
|
|
|
enum ice_status
|
2018-09-20 08:23:13 +08:00
|
|
|
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
|
2018-03-20 22:58:17 +08:00
|
|
|
u8 owner, bool enable)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *vsi_node, *tc_node;
|
2018-09-20 08:23:13 +08:00
|
|
|
struct ice_vsi_ctx *vsi_ctx;
|
2018-03-20 22:58:17 +08:00
|
|
|
enum ice_status status = 0;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
|
2019-02-27 08:35:13 +08:00
|
|
|
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
|
2018-03-20 22:58:17 +08:00
|
|
|
tc_node = ice_sched_get_tc_node(pi, tc);
|
|
|
|
if (!tc_node)
|
|
|
|
return ICE_ERR_PARAM;
|
2018-09-20 08:23:13 +08:00
|
|
|
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
|
|
|
|
if (!vsi_ctx)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
|
2018-03-20 22:58:17 +08:00
|
|
|
|
2019-02-20 07:04:13 +08:00
|
|
|
/* suspend the VSI if TC is not enabled */
|
2018-03-20 22:58:17 +08:00
|
|
|
if (!enable) {
|
|
|
|
if (vsi_node && vsi_node->in_use) {
|
|
|
|
u32 teid = le32_to_cpu(vsi_node->info.node_teid);
|
|
|
|
|
|
|
|
status = ice_sched_suspend_resume_elems(hw, 1, &teid,
|
|
|
|
true);
|
|
|
|
if (!status)
|
|
|
|
vsi_node->in_use = false;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TC is enabled, if it is a new VSI then add it to the tree */
|
|
|
|
if (!vsi_node) {
|
2018-09-20 08:23:13 +08:00
|
|
|
status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
|
2018-03-20 22:58:17 +08:00
|
|
|
if (status)
|
|
|
|
return status;
|
2018-08-09 21:29:45 +08:00
|
|
|
|
2018-09-20 08:23:13 +08:00
|
|
|
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
|
2018-03-20 22:58:17 +08:00
|
|
|
if (!vsi_node)
|
|
|
|
return ICE_ERR_CFG;
|
2018-08-09 21:29:45 +08:00
|
|
|
|
2018-09-20 08:23:13 +08:00
|
|
|
vsi_ctx->sched.vsi_node[tc] = vsi_node;
|
2018-03-20 22:58:17 +08:00
|
|
|
vsi_node->in_use = true;
|
2018-09-20 08:23:13 +08:00
|
|
|
/* invalidate the max queues whenever VSI gets added first time
|
|
|
|
* into the scheduler tree (boot or after reset). We need to
|
|
|
|
* recreate the child nodes all the time in these cases.
|
|
|
|
*/
|
|
|
|
vsi_ctx->sched.max_lanq[tc] = 0;
|
2018-03-20 22:58:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* update the VSI child nodes */
|
2018-09-20 08:23:13 +08:00
|
|
|
status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
|
|
|
|
owner);
|
2018-03-20 22:58:17 +08:00
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* TC is enabled, resume the VSI if it is in the suspend state */
|
|
|
|
if (!vsi_node->in_use) {
|
|
|
|
u32 teid = le32_to_cpu(vsi_node->info.node_teid);
|
|
|
|
|
|
|
|
status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
|
|
|
|
if (!status)
|
|
|
|
vsi_node->in_use = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
2018-10-27 01:41:02 +08:00
|
|
|
|
|
|
|
/**
|
2019-02-20 07:04:13 +08:00
|
|
|
* ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
|
2018-10-27 01:41:02 +08:00
|
|
|
* @pi: port information structure
|
|
|
|
* @vsi_handle: software VSI handle
|
|
|
|
*
|
|
|
|
* This function removes single aggregator VSI info entry from
|
|
|
|
* aggregator list.
|
|
|
|
*/
|
2020-05-16 08:55:02 +08:00
|
|
|
static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
|
2018-10-27 01:41:02 +08:00
|
|
|
{
|
|
|
|
struct ice_sched_agg_info *agg_info;
|
|
|
|
struct ice_sched_agg_info *atmp;
|
|
|
|
|
2018-12-20 02:03:28 +08:00
|
|
|
list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
|
|
|
|
list_entry) {
|
2018-10-27 01:41:02 +08:00
|
|
|
struct ice_sched_agg_vsi_info *agg_vsi_info;
|
|
|
|
struct ice_sched_agg_vsi_info *vtmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(agg_vsi_info, vtmp,
|
|
|
|
&agg_info->agg_vsi_list, list_entry)
|
|
|
|
if (agg_vsi_info->vsi_handle == vsi_handle) {
|
|
|
|
list_del(&agg_vsi_info->list_entry);
|
|
|
|
devm_kfree(ice_hw_to_dev(pi->hw),
|
|
|
|
agg_vsi_info);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-09 04:50:40 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
|
|
|
|
* @node: pointer to the sub-tree node
|
|
|
|
*
|
|
|
|
* This function checks for a leaf node presence in a given sub-tree node.
|
|
|
|
*/
|
|
|
|
static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
|
|
|
|
{
|
|
|
|
u8 i;
|
|
|
|
|
|
|
|
for (i = 0; i < node->num_children; i++)
|
|
|
|
if (ice_sched_is_leaf_node_present(node->children[i]))
|
|
|
|
return true;
|
|
|
|
/* check for a leaf node */
|
|
|
|
return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
|
|
|
|
}
|
|
|
|
|
2018-10-27 01:41:02 +08:00
|
|
|
/**
|
|
|
|
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
|
|
|
|
* @pi: port information structure
|
|
|
|
* @vsi_handle: software VSI handle
|
|
|
|
* @owner: LAN or RDMA
|
|
|
|
*
|
|
|
|
* This function removes the VSI and its LAN or RDMA children nodes from the
|
|
|
|
* scheduler tree.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
|
|
|
|
{
|
|
|
|
enum ice_status status = ICE_ERR_PARAM;
|
|
|
|
struct ice_vsi_ctx *vsi_ctx;
|
2019-02-27 08:35:13 +08:00
|
|
|
u8 i;
|
2018-10-27 01:41:02 +08:00
|
|
|
|
2019-02-27 08:35:13 +08:00
|
|
|
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
|
2018-10-27 01:41:02 +08:00
|
|
|
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
|
|
|
|
return status;
|
|
|
|
mutex_lock(&pi->sched_lock);
|
|
|
|
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
|
|
|
|
if (!vsi_ctx)
|
|
|
|
goto exit_sched_rm_vsi_cfg;
|
|
|
|
|
2019-02-14 02:51:10 +08:00
|
|
|
ice_for_each_traffic_class(i) {
|
2018-10-27 01:41:02 +08:00
|
|
|
struct ice_sched_node *vsi_node, *tc_node;
|
2019-02-27 08:35:13 +08:00
|
|
|
u8 j = 0;
|
2018-10-27 01:41:02 +08:00
|
|
|
|
|
|
|
tc_node = ice_sched_get_tc_node(pi, i);
|
|
|
|
if (!tc_node)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
|
|
|
|
if (!vsi_node)
|
|
|
|
continue;
|
|
|
|
|
2019-02-09 04:50:40 +08:00
|
|
|
if (ice_sched_is_leaf_node_present(vsi_node)) {
|
|
|
|
ice_debug(pi->hw, ICE_DBG_SCHED,
|
|
|
|
"VSI has leaf nodes in TC %d\n", i);
|
|
|
|
status = ICE_ERR_IN_USE;
|
|
|
|
goto exit_sched_rm_vsi_cfg;
|
|
|
|
}
|
2018-10-27 01:41:02 +08:00
|
|
|
while (j < vsi_node->num_children) {
|
|
|
|
if (vsi_node->children[j]->owner == owner) {
|
|
|
|
ice_free_sched_node(pi, vsi_node->children[j]);
|
|
|
|
|
|
|
|
/* reset the counter again since the num
|
|
|
|
* children will be updated after node removal
|
|
|
|
*/
|
|
|
|
j = 0;
|
|
|
|
} else {
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* remove the VSI if it has no children */
|
|
|
|
if (!vsi_node->num_children) {
|
|
|
|
ice_free_sched_node(pi, vsi_node);
|
|
|
|
vsi_ctx->sched.vsi_node[i] = NULL;
|
|
|
|
|
2019-02-20 07:04:13 +08:00
|
|
|
/* clean up aggregator related VSI info if any */
|
2018-10-27 01:41:02 +08:00
|
|
|
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
|
|
|
|
}
|
|
|
|
if (owner == ICE_SCHED_NODE_OWNER_LAN)
|
|
|
|
vsi_ctx->sched.max_lanq[i] = 0;
|
|
|
|
}
|
|
|
|
status = 0;
|
|
|
|
|
|
|
|
exit_sched_rm_vsi_cfg:
|
|
|
|
mutex_unlock(&pi->sched_lock);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
|
|
|
|
* @pi: port information structure
|
|
|
|
* @vsi_handle: software VSI handle
|
|
|
|
*
|
|
|
|
* This function clears the VSI and its LAN children nodes from scheduler tree
|
|
|
|
* for all TCs.
|
|
|
|
*/
|
|
|
|
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
|
|
|
|
{
|
|
|
|
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
|
|
|
|
}
|
2019-11-06 18:05:28 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_rm_unused_rl_prof - remove unused RL profile
|
|
|
|
* @pi: port information structure
|
|
|
|
*
|
|
|
|
* This function removes unused rate limit profiles from the HW and
|
|
|
|
* SW DB. The caller needs to hold scheduler lock.
|
|
|
|
*/
|
|
|
|
static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
|
|
|
|
{
|
|
|
|
u16 ln;
|
|
|
|
|
|
|
|
for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
|
|
|
|
struct ice_aqc_rl_profile_info *rl_prof_elem;
|
|
|
|
struct ice_aqc_rl_profile_info *rl_prof_tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
|
|
|
|
&pi->rl_prof_list[ln], list_entry) {
|
|
|
|
if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
|
|
|
|
ice_debug(pi->hw, ICE_DBG_SCHED,
|
|
|
|
"Removed rl profile\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_update_elem - update element
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @node: pointer to node
|
|
|
|
* @info: node info to update
|
|
|
|
*
|
2020-06-30 08:27:45 +08:00
|
|
|
* Update the HW DB, and local SW DB of node. Update the scheduling
|
2019-11-06 18:05:28 +08:00
|
|
|
* parameters of node from argument info data buffer (Info->data buf) and
|
|
|
|
* returns success or error on config sched element failure. The caller
|
|
|
|
* needs to hold scheduler lock.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
|
|
|
|
struct ice_aqc_txsched_elem_data *info)
|
|
|
|
{
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_txsched_elem_data buf;
|
2019-11-06 18:05:28 +08:00
|
|
|
enum ice_status status;
|
|
|
|
u16 elem_cfgd = 0;
|
|
|
|
u16 num_elems = 1;
|
|
|
|
|
2020-06-30 08:27:45 +08:00
|
|
|
buf = *info;
|
2019-11-06 18:05:28 +08:00
|
|
|
/* Parent TEID is reserved field in this aq call */
|
2020-06-30 08:27:45 +08:00
|
|
|
buf.parent_teid = 0;
|
2019-11-06 18:05:28 +08:00
|
|
|
/* Element type is reserved field in this aq call */
|
2020-06-30 08:27:45 +08:00
|
|
|
buf.data.elem_type = 0;
|
2019-11-06 18:05:28 +08:00
|
|
|
/* Flags is reserved field in this aq call */
|
2020-06-30 08:27:45 +08:00
|
|
|
buf.data.flags = 0;
|
2019-11-06 18:05:28 +08:00
|
|
|
|
|
|
|
/* Update HW DB */
|
|
|
|
/* Configure element node */
|
|
|
|
status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
|
|
|
|
&elem_cfgd, NULL);
|
|
|
|
if (status || elem_cfgd != num_elems) {
|
|
|
|
ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Config success case */
|
|
|
|
/* Now update local SW DB */
|
|
|
|
/* Only copy the data portion of info buffer */
|
|
|
|
node->info.data = info->data;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @node: sched node to configure
|
|
|
|
* @rl_type: rate limit type CIR, EIR, or shared
|
|
|
|
* @bw_alloc: BW weight/allocation
|
|
|
|
*
|
|
|
|
* This function configures node element's BW allocation.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
|
2020-05-08 08:41:05 +08:00
|
|
|
enum ice_rl_type rl_type, u16 bw_alloc)
|
2019-11-06 18:05:28 +08:00
|
|
|
{
|
|
|
|
struct ice_aqc_txsched_elem_data buf;
|
|
|
|
struct ice_aqc_txsched_elem *data;
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
buf = node->info;
|
|
|
|
data = &buf.data;
|
|
|
|
if (rl_type == ICE_MIN_BW) {
|
|
|
|
data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
|
|
|
|
data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
|
|
|
|
} else if (rl_type == ICE_MAX_BW) {
|
|
|
|
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
|
|
|
|
data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
|
|
|
|
} else {
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure element */
|
|
|
|
status = ice_sched_update_elem(hw, node, &buf);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_set_clear_cir_bw - set or clear CIR BW
|
|
|
|
* @bw_t_info: bandwidth type information structure
|
|
|
|
* @bw: bandwidth in Kbps - Kilo bits per sec
|
|
|
|
*
|
|
|
|
* Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
|
|
|
|
*/
|
2020-05-16 08:55:02 +08:00
|
|
|
static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
|
2019-11-06 18:05:28 +08:00
|
|
|
{
|
|
|
|
if (bw == ICE_SCHED_DFLT_BW) {
|
|
|
|
clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->cir_bw.bw = 0;
|
|
|
|
} else {
|
|
|
|
/* Save type of BW information */
|
|
|
|
set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->cir_bw.bw = bw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_set_clear_eir_bw - set or clear EIR BW
|
|
|
|
* @bw_t_info: bandwidth type information structure
|
|
|
|
* @bw: bandwidth in Kbps - Kilo bits per sec
|
|
|
|
*
|
|
|
|
* Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
|
|
|
|
*/
|
2020-05-16 08:55:02 +08:00
|
|
|
static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
|
2019-11-06 18:05:28 +08:00
|
|
|
{
|
|
|
|
if (bw == ICE_SCHED_DFLT_BW) {
|
|
|
|
clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->eir_bw.bw = 0;
|
|
|
|
} else {
|
|
|
|
/* EIR BW and Shared BW profiles are mutually exclusive and
|
|
|
|
* hence only one of them may be set for any given element.
|
|
|
|
* First clear earlier saved shared BW information.
|
|
|
|
*/
|
|
|
|
clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->shared_bw = 0;
|
|
|
|
/* save EIR BW information */
|
|
|
|
set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->eir_bw.bw = bw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_set_clear_shared_bw - set or clear shared BW
|
|
|
|
* @bw_t_info: bandwidth type information structure
|
|
|
|
* @bw: bandwidth in Kbps - Kilo bits per sec
|
|
|
|
*
|
|
|
|
* Save or clear shared bandwidth (BW) in the passed param bw_t_info.
|
|
|
|
*/
|
2020-05-16 08:55:02 +08:00
|
|
|
static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
|
2019-11-06 18:05:28 +08:00
|
|
|
{
|
|
|
|
if (bw == ICE_SCHED_DFLT_BW) {
|
|
|
|
clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->shared_bw = 0;
|
|
|
|
} else {
|
|
|
|
/* EIR BW and Shared BW profiles are mutually exclusive and
|
|
|
|
* hence only one of them may be set for any given element.
|
|
|
|
* First clear earlier saved EIR BW information.
|
|
|
|
*/
|
|
|
|
clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->eir_bw.bw = 0;
|
|
|
|
/* save shared BW information */
|
|
|
|
set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
|
|
|
|
bw_t_info->shared_bw = bw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
|
|
|
|
* @bw: bandwidth in Kbps
|
|
|
|
*
|
|
|
|
* This function calculates the wakeup parameter of RL profile.
|
|
|
|
*/
|
|
|
|
static u16 ice_sched_calc_wakeup(s32 bw)
|
|
|
|
{
|
|
|
|
s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
|
|
|
|
s32 wakeup_f_int;
|
|
|
|
u16 wakeup = 0;
|
|
|
|
|
|
|
|
/* Get the wakeup integer value */
|
|
|
|
bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
|
|
|
|
wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
|
|
|
|
if (wakeup_int > 63) {
|
|
|
|
wakeup = (u16)((1 << 15) | wakeup_int);
|
|
|
|
} else {
|
|
|
|
/* Calculate fraction value up to 4 decimals
|
|
|
|
* Convert Integer value to a constant multiplier
|
|
|
|
*/
|
|
|
|
wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
|
|
|
|
wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
|
|
|
|
ICE_RL_PROF_FREQUENCY,
|
|
|
|
bytes_per_sec);
|
|
|
|
|
|
|
|
/* Get Fraction value */
|
|
|
|
wakeup_f = wakeup_a - wakeup_b;
|
|
|
|
|
|
|
|
/* Round up the Fractional value via Ceil(Fractional value) */
|
|
|
|
if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
|
|
|
|
wakeup_f += 1;
|
|
|
|
|
|
|
|
wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
|
|
|
|
ICE_RL_PROF_MULTIPLIER);
|
|
|
|
wakeup |= (u16)(wakeup_int << 9);
|
|
|
|
wakeup |= (u16)(0x1ff & wakeup_f_int);
|
|
|
|
}
|
|
|
|
|
|
|
|
return wakeup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_bw_to_rl_profile - convert BW to profile parameters
|
|
|
|
* @bw: bandwidth in Kbps
|
|
|
|
* @profile: profile parameters to return
|
|
|
|
*
|
|
|
|
* This function converts the BW to profile structure format.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
|
|
|
|
{
|
|
|
|
enum ice_status status = ICE_ERR_PARAM;
|
|
|
|
s64 bytes_per_sec, ts_rate, mv_tmp;
|
|
|
|
bool found = false;
|
|
|
|
s32 encode = 0;
|
|
|
|
s64 mv = 0;
|
|
|
|
s32 i;
|
|
|
|
|
|
|
|
/* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
|
|
|
|
if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* Bytes per second from Kbps */
|
|
|
|
bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
|
|
|
|
|
|
|
|
/* encode is 6 bits but really useful are 5 bits */
|
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
u64 pow_result = BIT_ULL(i);
|
|
|
|
|
|
|
|
ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
|
|
|
|
pow_result * ICE_RL_PROF_TS_MULTIPLIER);
|
|
|
|
if (ts_rate <= 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Multiplier value */
|
|
|
|
mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
|
|
|
|
ts_rate);
|
|
|
|
|
|
|
|
/* Round to the nearest ICE_RL_PROF_MULTIPLIER */
|
|
|
|
mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
|
|
|
|
|
|
|
|
/* First multiplier value greater than the given
|
|
|
|
* accuracy bytes
|
|
|
|
*/
|
|
|
|
if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
|
|
|
|
encode = i;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found) {
|
|
|
|
u16 wm;
|
|
|
|
|
|
|
|
wm = ice_sched_calc_wakeup(bw);
|
|
|
|
profile->rl_multiply = cpu_to_le16(mv);
|
|
|
|
profile->wake_up_calc = cpu_to_le16(wm);
|
|
|
|
profile->rl_encode = cpu_to_le16(encode);
|
|
|
|
status = 0;
|
|
|
|
} else {
|
|
|
|
status = ICE_ERR_DOES_NOT_EXIST;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_add_rl_profile - add RL profile
|
|
|
|
* @pi: port information structure
|
|
|
|
* @rl_type: type of rate limit BW - min, max, or shared
|
|
|
|
* @bw: bandwidth in Kbps - Kilo bits per sec
|
|
|
|
* @layer_num: specifies in which layer to create profile
|
|
|
|
*
|
|
|
|
* This function first checks the existing list for corresponding BW
|
|
|
|
* parameter. If it exists, it returns the associated profile otherwise
|
|
|
|
* it creates a new rate limit profile for requested BW, and adds it to
|
|
|
|
* the HW DB and local list. It returns the new profile or null on error.
|
|
|
|
* The caller needs to hold the scheduler lock.
|
|
|
|
*/
|
|
|
|
static struct ice_aqc_rl_profile_info *
|
|
|
|
ice_sched_add_rl_profile(struct ice_port_info *pi,
|
|
|
|
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
|
|
|
|
{
|
|
|
|
struct ice_aqc_rl_profile_info *rl_prof_elem;
|
|
|
|
u16 profiles_added = 0, num_profiles = 1;
|
2020-06-30 08:27:45 +08:00
|
|
|
struct ice_aqc_rl_profile_elem *buf;
|
2019-11-06 18:05:28 +08:00
|
|
|
enum ice_status status;
|
|
|
|
struct ice_hw *hw;
|
|
|
|
u8 profile_type;
|
|
|
|
|
|
|
|
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
|
|
|
|
return NULL;
|
|
|
|
switch (rl_type) {
|
|
|
|
case ICE_MIN_BW:
|
|
|
|
profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
|
|
|
|
break;
|
|
|
|
case ICE_MAX_BW:
|
|
|
|
profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
|
|
|
|
break;
|
|
|
|
case ICE_SHARED_BW:
|
|
|
|
profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pi)
|
|
|
|
return NULL;
|
|
|
|
hw = pi->hw;
|
|
|
|
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
|
|
|
|
list_entry)
|
2020-07-14 04:53:10 +08:00
|
|
|
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
|
|
|
|
profile_type && rl_prof_elem->bw == bw)
|
2019-11-06 18:05:28 +08:00
|
|
|
/* Return existing profile ID info */
|
|
|
|
return rl_prof_elem;
|
|
|
|
|
|
|
|
/* Create new profile ID */
|
|
|
|
rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!rl_prof_elem)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
|
|
|
|
if (status)
|
|
|
|
goto exit_add_rl_prof;
|
|
|
|
|
|
|
|
rl_prof_elem->bw = bw;
|
|
|
|
/* layer_num is zero relative, and fw expects level from 1 to 9 */
|
|
|
|
rl_prof_elem->profile.level = layer_num + 1;
|
|
|
|
rl_prof_elem->profile.flags = profile_type;
|
|
|
|
rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
|
|
|
|
|
|
|
|
/* Create new entry in HW DB */
|
2020-06-30 08:27:45 +08:00
|
|
|
buf = &rl_prof_elem->profile;
|
2019-11-06 18:05:28 +08:00
|
|
|
status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
|
|
|
|
&profiles_added, NULL);
|
|
|
|
if (status || profiles_added != num_profiles)
|
|
|
|
goto exit_add_rl_prof;
|
|
|
|
|
|
|
|
/* Good entry - add in the list */
|
|
|
|
rl_prof_elem->prof_id_ref = 0;
|
|
|
|
list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
|
|
|
|
return rl_prof_elem;
|
|
|
|
|
|
|
|
exit_add_rl_prof:
|
|
|
|
devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_cfg_node_bw_lmt - configure node sched params
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @node: sched node to configure
|
|
|
|
* @rl_type: rate limit type CIR, EIR, or shared
|
|
|
|
* @rl_prof_id: rate limit profile ID
|
|
|
|
*
|
|
|
|
* This function configures node element's BW limit.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
|
|
|
|
enum ice_rl_type rl_type, u16 rl_prof_id)
|
|
|
|
{
|
|
|
|
struct ice_aqc_txsched_elem_data buf;
|
|
|
|
struct ice_aqc_txsched_elem *data;
|
|
|
|
|
|
|
|
buf = node->info;
|
|
|
|
data = &buf.data;
|
|
|
|
switch (rl_type) {
|
|
|
|
case ICE_MIN_BW:
|
|
|
|
data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
|
|
|
|
data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
|
|
|
|
break;
|
|
|
|
case ICE_MAX_BW:
|
|
|
|
/* EIR BW and Shared BW profiles are mutually exclusive and
|
|
|
|
* hence only one of them may be set for any given element
|
|
|
|
*/
|
|
|
|
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
|
|
|
|
data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
|
|
|
|
break;
|
|
|
|
case ICE_SHARED_BW:
|
|
|
|
/* Check for removing shared BW */
|
|
|
|
if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
|
|
|
|
/* remove shared profile */
|
|
|
|
data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
|
|
|
|
data->srl_id = 0; /* clear SRL field */
|
|
|
|
|
|
|
|
/* enable back EIR to default profile */
|
|
|
|
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
|
|
|
|
data->eir_bw.bw_profile_idx =
|
|
|
|
cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* EIR BW and Shared BW profiles are mutually exclusive and
|
|
|
|
* hence only one of them may be set for any given element
|
|
|
|
*/
|
|
|
|
if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
|
|
|
|
(le16_to_cpu(data->eir_bw.bw_profile_idx) !=
|
|
|
|
ICE_SCHED_DFLT_RL_PROF_ID))
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
/* EIR BW is set to default, disable it */
|
|
|
|
data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
|
|
|
|
/* Okay to enable shared BW now */
|
|
|
|
data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
|
|
|
|
data->srl_id = cpu_to_le16(rl_prof_id);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Unknown rate limit type */
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure element */
|
|
|
|
return ice_sched_update_elem(hw, node, &buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
|
|
|
|
* @node: sched node
|
|
|
|
* @rl_type: rate limit type
|
|
|
|
*
|
|
|
|
* If existing profile matches, it returns the corresponding rate
|
|
|
|
* limit profile ID, otherwise it returns an invalid ID as error.
|
|
|
|
*/
|
|
|
|
static u16
|
|
|
|
ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
|
|
|
|
enum ice_rl_type rl_type)
|
|
|
|
{
|
|
|
|
u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
|
|
|
|
struct ice_aqc_txsched_elem *data;
|
|
|
|
|
|
|
|
data = &node->info.data;
|
|
|
|
switch (rl_type) {
|
|
|
|
case ICE_MIN_BW:
|
|
|
|
if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
|
|
|
|
rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
|
|
|
|
break;
|
|
|
|
case ICE_MAX_BW:
|
|
|
|
if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
|
|
|
|
rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
|
|
|
|
break;
|
|
|
|
case ICE_SHARED_BW:
|
|
|
|
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
|
|
|
|
rl_prof_id = le16_to_cpu(data->srl_id);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rl_prof_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
|
|
|
|
* @pi: port information structure
|
|
|
|
* @rl_type: type of rate limit BW - min, max, or shared
|
|
|
|
* @layer_index: layer index
|
|
|
|
*
|
|
|
|
* This function returns requested profile creation layer.
|
|
|
|
*/
|
|
|
|
static u8
|
|
|
|
ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
|
|
|
|
u8 layer_index)
|
|
|
|
{
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
|
|
|
|
if (layer_index >= hw->num_tx_sched_layers)
|
|
|
|
return ICE_SCHED_INVAL_LAYER_NUM;
|
|
|
|
switch (rl_type) {
|
|
|
|
case ICE_MIN_BW:
|
|
|
|
if (hw->layer_info[layer_index].max_cir_rl_profiles)
|
|
|
|
return layer_index;
|
|
|
|
break;
|
|
|
|
case ICE_MAX_BW:
|
|
|
|
if (hw->layer_info[layer_index].max_eir_rl_profiles)
|
|
|
|
return layer_index;
|
|
|
|
break;
|
|
|
|
case ICE_SHARED_BW:
|
|
|
|
/* if current layer doesn't support SRL profile creation
|
|
|
|
* then try a layer up or down.
|
|
|
|
*/
|
|
|
|
if (hw->layer_info[layer_index].max_srl_profiles)
|
|
|
|
return layer_index;
|
|
|
|
else if (layer_index < hw->num_tx_sched_layers - 1 &&
|
|
|
|
hw->layer_info[layer_index + 1].max_srl_profiles)
|
|
|
|
return layer_index + 1;
|
|
|
|
else if (layer_index > 0 &&
|
|
|
|
hw->layer_info[layer_index - 1].max_srl_profiles)
|
|
|
|
return layer_index - 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ICE_SCHED_INVAL_LAYER_NUM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_get_srl_node - get shared rate limit node
|
|
|
|
* @node: tree node
|
|
|
|
* @srl_layer: shared rate limit layer
|
|
|
|
*
|
|
|
|
* This function returns SRL node to be used for shared rate limit purpose.
|
|
|
|
* The caller needs to hold scheduler lock.
|
|
|
|
*/
|
|
|
|
static struct ice_sched_node *
|
|
|
|
ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
|
|
|
|
{
|
|
|
|
if (srl_layer > node->tx_sched_layer)
|
|
|
|
return node->children[0];
|
|
|
|
else if (srl_layer < node->tx_sched_layer)
|
|
|
|
/* Node can't be created without a parent. It will always
|
|
|
|
* have a valid parent except root node.
|
|
|
|
*/
|
|
|
|
return node->parent;
|
|
|
|
else
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_rm_rl_profile - remove RL profile ID
|
|
|
|
* @pi: port information structure
|
|
|
|
* @layer_num: layer number where profiles are saved
|
|
|
|
* @profile_type: profile type like EIR, CIR, or SRL
|
|
|
|
* @profile_id: profile ID to remove
|
|
|
|
*
|
|
|
|
* This function removes rate limit profile from layer 'layer_num' of type
|
|
|
|
* 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
|
|
|
|
* scheduler lock.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
|
|
|
|
u16 profile_id)
|
|
|
|
{
|
|
|
|
struct ice_aqc_rl_profile_info *rl_prof_elem;
|
|
|
|
enum ice_status status = 0;
|
|
|
|
|
|
|
|
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
/* Check the existing list for RL profile */
|
|
|
|
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
|
|
|
|
list_entry)
|
2020-07-14 04:53:10 +08:00
|
|
|
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
|
|
|
|
profile_type &&
|
2019-11-06 18:05:28 +08:00
|
|
|
le16_to_cpu(rl_prof_elem->profile.profile_id) ==
|
|
|
|
profile_id) {
|
|
|
|
if (rl_prof_elem->prof_id_ref)
|
|
|
|
rl_prof_elem->prof_id_ref--;
|
|
|
|
|
|
|
|
/* Remove old profile ID from database */
|
|
|
|
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
|
|
|
|
if (status && status != ICE_ERR_IN_USE)
|
|
|
|
ice_debug(pi->hw, ICE_DBG_SCHED,
|
|
|
|
"Remove rl profile failed\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (status == ICE_ERR_IN_USE)
|
|
|
|
status = 0;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
|
|
|
|
* @pi: port information structure
|
|
|
|
* @node: pointer to node structure
|
|
|
|
* @rl_type: rate limit type min, max, or shared
|
|
|
|
* @layer_num: layer number where RL profiles are saved
|
|
|
|
*
|
|
|
|
* This function configures node element's BW rate limit profile ID of
|
|
|
|
* type CIR, EIR, or SRL to default. This function needs to be called
|
|
|
|
* with the scheduler lock held.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
|
|
|
|
struct ice_sched_node *node,
|
|
|
|
enum ice_rl_type rl_type, u8 layer_num)
|
|
|
|
{
|
|
|
|
enum ice_status status;
|
|
|
|
struct ice_hw *hw;
|
|
|
|
u8 profile_type;
|
|
|
|
u16 rl_prof_id;
|
|
|
|
u16 old_id;
|
|
|
|
|
|
|
|
hw = pi->hw;
|
|
|
|
switch (rl_type) {
|
|
|
|
case ICE_MIN_BW:
|
|
|
|
profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
|
|
|
|
rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
|
|
|
|
break;
|
|
|
|
case ICE_MAX_BW:
|
|
|
|
profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
|
|
|
|
rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
|
|
|
|
break;
|
|
|
|
case ICE_SHARED_BW:
|
|
|
|
profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
|
|
|
|
/* No SRL is configured for default case */
|
|
|
|
rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
}
|
|
|
|
/* Save existing RL prof ID for later clean up */
|
|
|
|
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
|
|
|
|
/* Configure BW scheduling parameters */
|
|
|
|
status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* Remove stale RL profile ID */
|
|
|
|
if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
|
|
|
|
old_id == ICE_SCHED_INVAL_PROF_ID)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
|
|
|
|
* @pi: port information structure
|
|
|
|
* @node: pointer to node structure
|
|
|
|
* @layer_num: layer number where rate limit profiles are saved
|
|
|
|
* @rl_type: rate limit type min, max, or shared
|
|
|
|
* @bw: bandwidth value
|
|
|
|
*
|
|
|
|
* This function prepares node element's bandwidth to SRL or EIR exclusively.
|
|
|
|
* EIR BW and Shared BW profiles are mutually exclusive and hence only one of
|
|
|
|
* them may be set for any given element. This function needs to be called
|
|
|
|
* with the scheduler lock held.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
|
|
|
|
struct ice_sched_node *node,
|
|
|
|
u8 layer_num, enum ice_rl_type rl_type, u32 bw)
|
|
|
|
{
|
|
|
|
if (rl_type == ICE_SHARED_BW) {
|
|
|
|
/* SRL node passed in this case, it may be different node */
|
|
|
|
if (bw == ICE_SCHED_DFLT_BW)
|
|
|
|
/* SRL being removed, ice_sched_cfg_node_bw_lmt()
|
|
|
|
* enables EIR to default. EIR is not set in this
|
|
|
|
* case, so no additional action is required.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* SRL being configured, set EIR to default here.
|
|
|
|
* ice_sched_cfg_node_bw_lmt() disables EIR when it
|
|
|
|
* configures SRL
|
|
|
|
*/
|
|
|
|
return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
|
|
|
|
layer_num);
|
|
|
|
} else if (rl_type == ICE_MAX_BW &&
|
|
|
|
node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
|
|
|
|
/* Remove Shared profile. Set default shared BW call
|
|
|
|
* removes shared profile for a node.
|
|
|
|
*/
|
|
|
|
return ice_sched_set_node_bw_dflt(pi, node,
|
|
|
|
ICE_SHARED_BW,
|
|
|
|
layer_num);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_set_node_bw - set node's bandwidth
|
|
|
|
* @pi: port information structure
|
|
|
|
* @node: tree node
|
|
|
|
* @rl_type: rate limit type min, max, or shared
|
|
|
|
* @bw: bandwidth in Kbps - Kilo bits per sec
|
|
|
|
* @layer_num: layer number
|
|
|
|
*
|
|
|
|
* This function adds new profile corresponding to requested BW, configures
|
|
|
|
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
|
|
|
|
* ID from local database. The caller needs to hold scheduler lock.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
|
|
|
|
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
|
|
|
|
{
|
|
|
|
struct ice_aqc_rl_profile_info *rl_prof_info;
|
|
|
|
enum ice_status status = ICE_ERR_PARAM;
|
|
|
|
struct ice_hw *hw = pi->hw;
|
|
|
|
u16 old_id, rl_prof_id;
|
|
|
|
|
|
|
|
rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
|
|
|
|
if (!rl_prof_info)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
|
|
|
|
|
|
|
|
/* Save existing RL prof ID for later clean up */
|
|
|
|
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
|
|
|
|
/* Configure BW scheduling parameters */
|
|
|
|
status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* New changes has been applied */
|
|
|
|
/* Increment the profile ID reference count */
|
|
|
|
rl_prof_info->prof_id_ref++;
|
|
|
|
|
|
|
|
/* Check for old ID removal */
|
|
|
|
if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
|
|
|
|
old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ice_sched_rm_rl_profile(pi, layer_num,
|
2020-07-14 04:53:10 +08:00
|
|
|
rl_prof_info->profile.flags &
|
|
|
|
ICE_AQC_RL_PROFILE_TYPE_M, old_id);
|
2019-11-06 18:05:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_set_node_bw_lmt - set node's BW limit
|
|
|
|
* @pi: port information structure
|
|
|
|
* @node: tree node
|
|
|
|
* @rl_type: rate limit type min, max, or shared
|
|
|
|
* @bw: bandwidth in Kbps - Kilo bits per sec
|
|
|
|
*
|
|
|
|
* It updates node's BW limit parameters like BW RL profile ID of type CIR,
|
|
|
|
* EIR, or SRL. The caller needs to hold scheduler lock.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
|
|
|
|
enum ice_rl_type rl_type, u32 bw)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *cfg_node = node;
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
struct ice_hw *hw;
|
|
|
|
u8 layer_num;
|
|
|
|
|
|
|
|
if (!pi)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
hw = pi->hw;
|
|
|
|
/* Remove unused RL profile IDs from HW and SW DB */
|
|
|
|
ice_sched_rm_unused_rl_prof(pi);
|
|
|
|
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
|
|
|
|
node->tx_sched_layer);
|
|
|
|
if (layer_num >= hw->num_tx_sched_layers)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
|
|
|
|
if (rl_type == ICE_SHARED_BW) {
|
|
|
|
/* SRL node may be different */
|
|
|
|
cfg_node = ice_sched_get_srl_node(node, layer_num);
|
|
|
|
if (!cfg_node)
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
}
|
|
|
|
/* EIR BW and Shared BW profiles are mutually exclusive and
|
|
|
|
* hence only one of them may be set for any given element
|
|
|
|
*/
|
|
|
|
status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
|
|
|
|
bw);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
if (bw == ICE_SCHED_DFLT_BW)
|
|
|
|
return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
|
|
|
|
layer_num);
|
|
|
|
return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
|
|
|
|
* @pi: port information structure
|
|
|
|
* @node: pointer to node structure
|
|
|
|
* @rl_type: rate limit type min, max, or shared
|
|
|
|
*
|
|
|
|
* This function configures node element's BW rate limit profile ID of
|
|
|
|
* type CIR, EIR, or SRL to default. This function needs to be called
|
|
|
|
* with the scheduler lock held.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
|
|
|
|
struct ice_sched_node *node,
|
|
|
|
enum ice_rl_type rl_type)
|
|
|
|
{
|
|
|
|
return ice_sched_set_node_bw_lmt(pi, node, rl_type,
|
|
|
|
ICE_SCHED_DFLT_BW);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_validate_srl_node - Check node for SRL applicability
|
|
|
|
* @node: sched node to configure
|
|
|
|
* @sel_layer: selected SRL layer
|
|
|
|
*
|
|
|
|
* This function checks if the SRL can be applied to a selected layer node on
|
|
|
|
* behalf of the requested node (first argument). This function needs to be
|
|
|
|
* called with scheduler lock held.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
|
|
|
|
{
|
|
|
|
/* SRL profiles are not available on all layers. Check if the
|
|
|
|
* SRL profile can be applied to a node above or below the
|
|
|
|
* requested node. SRL configuration is possible only if the
|
|
|
|
* selected layer's node has single child.
|
|
|
|
*/
|
|
|
|
if (sel_layer == node->tx_sched_layer ||
|
|
|
|
((sel_layer == node->tx_sched_layer + 1) &&
|
|
|
|
node->num_children == 1) ||
|
|
|
|
((sel_layer == node->tx_sched_layer - 1) &&
|
|
|
|
(node->parent && node->parent->num_children == 1)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ICE_ERR_CFG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_save_q_bw - save queue node's BW information
|
|
|
|
* @q_ctx: queue context structure
|
|
|
|
* @rl_type: rate limit type min, max, or shared
|
|
|
|
* @bw: bandwidth in Kbps - Kilo bits per sec
|
|
|
|
*
|
|
|
|
* Save BW information of queue type node for post replay use.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
|
|
|
|
{
|
|
|
|
switch (rl_type) {
|
|
|
|
case ICE_MIN_BW:
|
|
|
|
ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
|
|
|
|
break;
|
|
|
|
case ICE_MAX_BW:
|
|
|
|
ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
|
|
|
|
break;
|
|
|
|
case ICE_SHARED_BW:
|
|
|
|
ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_set_q_bw_lmt - sets queue BW limit
|
|
|
|
* @pi: port information structure
|
|
|
|
* @vsi_handle: sw VSI handle
|
|
|
|
* @tc: traffic class
|
|
|
|
* @q_handle: software queue handle
|
|
|
|
* @rl_type: min, max, or shared
|
|
|
|
* @bw: bandwidth in Kbps
|
|
|
|
*
|
|
|
|
* This function sets BW limit of queue scheduling node.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
|
|
|
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
|
|
|
|
{
|
|
|
|
enum ice_status status = ICE_ERR_PARAM;
|
|
|
|
struct ice_sched_node *node;
|
|
|
|
struct ice_q_ctx *q_ctx;
|
|
|
|
|
|
|
|
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
mutex_lock(&pi->sched_lock);
|
|
|
|
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
|
|
|
|
if (!q_ctx)
|
|
|
|
goto exit_q_bw_lmt;
|
|
|
|
node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
|
|
|
|
if (!node) {
|
|
|
|
ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
|
|
|
|
goto exit_q_bw_lmt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return error if it is not a leaf node */
|
|
|
|
if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
|
|
|
|
goto exit_q_bw_lmt;
|
|
|
|
|
|
|
|
/* SRL bandwidth layer selection */
|
|
|
|
if (rl_type == ICE_SHARED_BW) {
|
|
|
|
u8 sel_layer; /* selected layer */
|
|
|
|
|
|
|
|
sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
|
|
|
|
node->tx_sched_layer);
|
|
|
|
if (sel_layer >= pi->hw->num_tx_sched_layers) {
|
|
|
|
status = ICE_ERR_PARAM;
|
|
|
|
goto exit_q_bw_lmt;
|
|
|
|
}
|
|
|
|
status = ice_sched_validate_srl_node(node, sel_layer);
|
|
|
|
if (status)
|
|
|
|
goto exit_q_bw_lmt;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bw == ICE_SCHED_DFLT_BW)
|
|
|
|
status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
|
|
|
|
else
|
|
|
|
status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
|
|
|
|
|
|
|
|
if (!status)
|
|
|
|
status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
|
|
|
|
|
|
|
|
exit_q_bw_lmt:
|
|
|
|
mutex_unlock(&pi->sched_lock);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_cfg_q_bw_lmt - configure queue BW limit
|
|
|
|
* @pi: port information structure
|
|
|
|
* @vsi_handle: sw VSI handle
|
|
|
|
* @tc: traffic class
|
|
|
|
* @q_handle: software queue handle
|
|
|
|
* @rl_type: min, max, or shared
|
|
|
|
* @bw: bandwidth in Kbps
|
|
|
|
*
|
|
|
|
* This function configures BW limit of queue scheduling node.
|
|
|
|
*/
|
|
|
|
enum ice_status
|
|
|
|
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
|
|
|
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
|
|
|
|
{
|
|
|
|
return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
|
|
|
|
bw);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
|
|
|
|
* @pi: port information structure
|
|
|
|
* @vsi_handle: sw VSI handle
|
|
|
|
* @tc: traffic class
|
|
|
|
* @q_handle: software queue handle
|
|
|
|
* @rl_type: min, max, or shared
|
|
|
|
*
|
|
|
|
* This function configures BW default limit of queue scheduling node.
|
|
|
|
*/
|
|
|
|
enum ice_status
|
|
|
|
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
|
|
|
|
u16 q_handle, enum ice_rl_type rl_type)
|
|
|
|
{
|
|
|
|
return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
|
|
|
|
ICE_SCHED_DFLT_BW);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_cfg_rl_burst_size - Set burst size value
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @bytes: burst size in bytes
|
|
|
|
*
|
|
|
|
* This function configures/set the burst size to requested new value. The new
|
|
|
|
* burst size value is used for future rate limit calls. It doesn't change the
|
|
|
|
* existing or previously created RL profiles.
|
|
|
|
*/
|
|
|
|
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
|
|
|
|
{
|
|
|
|
u16 burst_size_to_prog;
|
|
|
|
|
|
|
|
if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
|
|
|
|
bytes > ICE_MAX_BURST_SIZE_ALLOWED)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
if (ice_round_to_num(bytes, 64) <=
|
|
|
|
ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
|
|
|
|
/* 64 byte granularity case */
|
|
|
|
/* Disable MSB granularity bit */
|
|
|
|
burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
|
|
|
|
/* round number to nearest 64 byte granularity */
|
|
|
|
bytes = ice_round_to_num(bytes, 64);
|
|
|
|
/* The value is in 64 byte chunks */
|
|
|
|
burst_size_to_prog |= (u16)(bytes / 64);
|
|
|
|
} else {
|
|
|
|
/* k bytes granularity case */
|
|
|
|
/* Enable MSB granularity bit */
|
|
|
|
burst_size_to_prog = ICE_KBYTE_GRANULARITY;
|
|
|
|
/* round number to nearest 1024 granularity */
|
|
|
|
bytes = ice_round_to_num(bytes, 1024);
|
|
|
|
/* check rounding doesn't go beyond allowed */
|
|
|
|
if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
|
|
|
|
bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
|
|
|
|
/* The value is in k bytes */
|
|
|
|
burst_size_to_prog |= (u16)(bytes / 1024);
|
|
|
|
}
|
|
|
|
hw->max_burst_size = burst_size_to_prog;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_replay_node_prio - re-configure node priority
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @node: sched node to configure
|
|
|
|
* @priority: priority value
|
|
|
|
*
|
|
|
|
* This function configures node element's priority value. It
|
|
|
|
* needs to be called with scheduler lock held.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
|
|
|
|
u8 priority)
|
|
|
|
{
|
|
|
|
struct ice_aqc_txsched_elem_data buf;
|
|
|
|
struct ice_aqc_txsched_elem *data;
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
buf = node->info;
|
|
|
|
data = &buf.data;
|
|
|
|
data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
|
|
|
|
data->generic = priority;
|
|
|
|
|
|
|
|
/* Configure element */
|
|
|
|
status = ice_sched_update_elem(hw, node, &buf);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_replay_node_bw - replay node(s) BW
|
|
|
|
* @hw: pointer to the HW struct
|
|
|
|
* @node: sched node to configure
|
|
|
|
* @bw_t_info: BW type information
|
|
|
|
*
|
|
|
|
* This function restores node's BW from bw_t_info. The caller needs
|
|
|
|
* to hold the scheduler lock.
|
|
|
|
*/
|
|
|
|
static enum ice_status
|
|
|
|
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
|
|
|
|
struct ice_bw_type_info *bw_t_info)
|
|
|
|
{
|
|
|
|
struct ice_port_info *pi = hw->port_info;
|
|
|
|
enum ice_status status = ICE_ERR_PARAM;
|
|
|
|
u16 bw_alloc;
|
|
|
|
|
|
|
|
if (!node)
|
|
|
|
return status;
|
|
|
|
if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
|
|
|
|
return 0;
|
|
|
|
if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
|
|
|
|
status = ice_sched_replay_node_prio(hw, node,
|
|
|
|
bw_t_info->generic);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
|
|
|
|
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
|
|
|
|
bw_t_info->cir_bw.bw);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
|
|
|
|
bw_alloc = bw_t_info->cir_bw.bw_alloc;
|
|
|
|
status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
|
|
|
|
bw_alloc);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
|
|
|
|
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
|
|
|
|
bw_t_info->eir_bw.bw);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
|
|
|
|
bw_alloc = bw_t_info->eir_bw.bw_alloc;
|
|
|
|
status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
|
|
|
|
bw_alloc);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
|
|
|
|
status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
|
|
|
|
bw_t_info->shared_bw);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_sched_replay_q_bw - replay queue type node BW
|
|
|
|
* @pi: port information structure
|
|
|
|
* @q_ctx: queue context structure
|
|
|
|
*
|
|
|
|
* This function replays queue type node bandwidth. This function needs to be
|
|
|
|
* called with scheduler lock held.
|
|
|
|
*/
|
|
|
|
enum ice_status
|
|
|
|
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
|
|
|
|
{
|
|
|
|
struct ice_sched_node *q_node;
|
|
|
|
|
|
|
|
/* Following also checks the presence of node in tree */
|
|
|
|
q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
|
|
|
|
if (!q_node)
|
|
|
|
return ICE_ERR_PARAM;
|
|
|
|
return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
|
|
|
|
}
|