Merge branch 'qede-netpoll-coalesce'

Bhaskar Upadhaya says:

====================
qede: add netpoll and per-queue coalesce support

This is a followup implementation after series

https://patchwork.kernel.org/project/netdevbpf/cover/1610701570-29496-1-git-send-email-bupadhaya@marvell.com/

Patch 1: Add net poll controller support to transmit kernel printks
         over UDP
Patch 2: QLogic card support multiple queues and each queue can be
         configured with respective coalescing parameters, this patch
         add per queue rx-usecs, tx-usecs coalescing parameters
Patch 3: set default per queue rx-usecs, tx-usecs coalescing parameters and
         preserve coalesce parameters across interface up and down

v3: fixed warnings reported by Dan Carpenter
v2: comments from jakub
 - p1: remove poll_controller ndo and add budget 0 support in qede_poll
 - p3: preserve coalesce parameters across interface up and down
===================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-02-11 14:25:06 -08:00
commit 6bda2f6fae
4 changed files with 171 additions and 5 deletions

View File

@ -168,6 +168,12 @@ struct qede_dump_info {
u32 args[QEDE_DUMP_MAX_ARGS];
};
struct qede_coalesce {
bool isvalid;
u16 rxc;
u16 txc;
};
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@ -194,6 +200,7 @@ struct qede_dev {
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
struct qede_coalesce *coal_entry;
u8 req_num_tx;
u8 fp_num_tx;
u8 req_num_rx;
@ -581,6 +588,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f);
void qede_forced_speed_maps_init(void);
int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal);
int qede_set_per_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))

View File

@ -819,8 +819,7 @@ out:
return rc;
}
static int qede_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_fastpath *fp;
@ -855,6 +854,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set RX coalesce error, rc = %d\n", rc);
return rc;
}
edev->coal_entry[i].rxc = rxc;
edev->coal_entry[i].isvalid = true;
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
@ -874,6 +875,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set TX coalesce error, rc = %d\n", rc);
return rc;
}
edev->coal_entry[i].txc = txc;
edev->coal_entry[i].isvalid = true;
}
}
@ -2105,6 +2108,129 @@ err:
return rc;
}
int qede_set_per_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_fastpath *fp;
u16 rxc, txc;
int rc = 0;
if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
DP_INFO(edev,
"Can't support requested %s coalesce value [max supported value %d]\n",
coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
: "tx",
QED_COALESCE_MAX);
return -EINVAL;
}
rxc = (u16)coal->rx_coalesce_usecs;
txc = (u16)coal->tx_coalesce_usecs;
__qede_lock(edev);
if (queue >= edev->num_queues) {
DP_INFO(edev, "Invalid queue\n");
rc = -EINVAL;
goto out;
}
if (edev->state != QEDE_STATE_OPEN) {
rc = -EINVAL;
goto out;
}
fp = &edev->fp_array[queue];
if (edev->fp_array[queue].type & QEDE_FASTPATH_RX) {
rc = edev->ops->common->set_coalesce(edev->cdev,
rxc, 0,
fp->rxq->handle);
if (rc) {
DP_INFO(edev,
"Set RX coalesce error, rc = %d\n", rc);
goto out;
}
edev->coal_entry[queue].rxc = rxc;
edev->coal_entry[queue].isvalid = true;
}
if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) {
rc = edev->ops->common->set_coalesce(edev->cdev,
0, txc,
fp->txq->handle);
if (rc) {
DP_INFO(edev,
"Set TX coalesce error, rc = %d\n", rc);
goto out;
}
edev->coal_entry[queue].txc = txc;
edev->coal_entry[queue].isvalid = true;
}
out:
__qede_unlock(edev);
return rc;
}
static int qede_get_per_coalesce(struct net_device *dev,
u32 queue,
struct ethtool_coalesce *coal)
{
void *rx_handle = NULL, *tx_handle = NULL;
struct qede_dev *edev = netdev_priv(dev);
struct qede_fastpath *fp;
u16 rx_coal, tx_coal;
int rc = 0;
rx_coal = QED_DEFAULT_RX_USECS;
tx_coal = QED_DEFAULT_TX_USECS;
memset(coal, 0, sizeof(struct ethtool_coalesce));
__qede_lock(edev);
if (queue >= edev->num_queues) {
DP_INFO(edev, "Invalid queue\n");
rc = -EINVAL;
goto out;
}
if (edev->state != QEDE_STATE_OPEN) {
rc = -EINVAL;
goto out;
}
fp = &edev->fp_array[queue];
if (fp->type & QEDE_FASTPATH_RX)
rx_handle = fp->rxq->handle;
rc = edev->ops->get_coalesce(edev->cdev, &rx_coal,
rx_handle);
if (rc) {
DP_INFO(edev, "Read Rx coalesce error\n");
goto out;
}
fp = &edev->fp_array[queue];
if (fp->type & QEDE_FASTPATH_TX)
tx_handle = fp->txq->handle;
rc = edev->ops->get_coalesce(edev->cdev, &tx_coal,
tx_handle);
if (rc)
DP_INFO(edev, "Read Tx coalesce error\n");
out:
__qede_unlock(edev);
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
return rc;
}
static const struct ethtool_ops qede_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_link_ksettings = qede_get_link_ksettings,
@ -2148,6 +2274,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_fecparam = qede_set_fecparam,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
.get_per_queue_coalesce = qede_get_per_coalesce,
.set_per_queue_coalesce = qede_set_per_coalesce,
.flash_device = qede_flash_device,
.get_dump_flag = qede_get_dump_flag,
.get_dump_data = qede_get_dump_data,
@ -2177,6 +2305,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.get_per_queue_coalesce = qede_get_per_coalesce,
.set_per_queue_coalesce = qede_set_per_coalesce,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
};

View File

@ -1450,7 +1450,8 @@ int qede_poll(struct napi_struct *napi, int budget)
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
/* Handle case where we are called by netpoll with a budget of 0 */
if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
napi_complete_done(napi, rx_work_done);

View File

@ -904,6 +904,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
{
u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp;
void *mem;
int i;
edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
@ -913,6 +914,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
sizeof(*edev->coal_entry), GFP_KERNEL);
if (!mem) {
DP_ERR(edev, "coalesce entry allocation failed\n");
kfree(edev->coal_entry);
goto err;
}
edev->coal_entry = mem;
fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
/* Allocate the FP elements for Rx queues followed by combined and then
@ -1320,8 +1330,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
if (mode != QEDE_REMOVE_RECOVERY)
if (mode != QEDE_REMOVE_RECOVERY) {
kfree(edev->coal_entry);
free_netdev(ndev);
}
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@ -2328,8 +2340,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
struct ethtool_coalesce coal = {};
u8 num_tc;
int rc;
int rc, i;
DP_INFO(edev, "Starting qede load\n");
@ -2390,6 +2403,18 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
edev->state = QEDE_STATE_OPEN;
coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
for_each_queue(i) {
if (edev->coal_entry[i].isvalid) {
coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
}
__qede_unlock(edev);
qede_set_per_coalesce(edev->ndev, i, &coal);
__qede_lock(edev);
}
DP_INFO(edev, "Ending successfully qede load\n");
goto out;