net: enetc: preserve TX ring priority across reconfiguration

In the blamed commit, a rudimentary reallocation procedure for RX buffer
descriptors was implemented, for the situation when their format changes
between normal (no PTP) and extended (PTP).

enetc_hwtstamp_set() calls enetc_close() and enetc_open() in a sequence,
and this sequence loses information which was previously configured in
the TX BDR Mode Register, specifically via the enetc_set_bdr_prio() call.
The TX ring priority is configured by tc-mqprio and tc-taprio, and
affects important things for TSN such as the TX time of packets. The
issue manifests itself most visibly by the fact that isochron --txtime
reports premature packet transmissions when PTP is first enabled on an
enetc interface.

Save the TX ring priority in a new field in struct enetc_bdr (occupies a
2 byte hole on arm64) in order to make this survive a ring reconfiguration.

Fixes: 434cebabd3 ("enetc: Add dynamic allocation of extended Rx BD rings")
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/r/20221122130936.1704151-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Vladimir Oltean 2022-11-22 15:09:36 +02:00 committed by Jakub Kicinski
parent 9a234a2a08
commit 290b5fe096
3 changed files with 19 additions and 11 deletions

View File

@ -2058,7 +2058,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
tbmr = ENETC_TBMR_EN;
tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@ -2461,7 +2461,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
enetc_set_bdr_prio(hw, tx_ring->index, 0);
tx_ring->prio = 0;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
return 0;
@ -2480,7 +2481,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
enetc_set_bdr_prio(hw, tx_ring->index, i);
tx_ring->prio = i;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
/* Reset the number of netdev queues based on the TC count */

View File

@ -95,6 +95,7 @@ struct enetc_bdr {
void __iomem *rcir;
};
u16 index;
u16 prio;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;

View File

@ -137,6 +137,7 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int err;
int i;
@ -145,16 +146,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? i : 0);
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
tx_ring->prio = taprio->enable ? i : 0;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
err = enetc_setup_taprio(ndev, taprio);
if (err)
for (i = 0; i < priv->num_tx_rings; i++)
enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? 0 : i);
if (err) {
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
tx_ring->prio = taprio->enable ? 0 : i;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
}
return err;
}