mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 14:14:24 +08:00
Merge branch 'move-est-lock-and-est-structure-to-struct-stmmac_priv'
Xiaolei Wang says: ==================== Move EST lock and EST structure to struct stmmac_priv 1. Pulling the mutex protecting the EST structure out to avoid clearing it during reinit/memset of the EST structure,and reacquire the mutex lock when doing this initialization. 2. Moving the EST structure to a more logical location ==================== Link: https://lore.kernel.org/r/20240513014346.1718740-1-xiaolei.wang@windriver.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
b08191d860
@ -221,6 +221,20 @@ struct stmmac_dma_conf {
|
||||
unsigned int dma_tx_size;
|
||||
};
|
||||
|
||||
#define EST_GCL 1024
|
||||
struct stmmac_est {
|
||||
int enable;
|
||||
u32 btr_reserve[2];
|
||||
u32 btr_offset[2];
|
||||
u32 btr[2];
|
||||
u32 ctr[2];
|
||||
u32 ter;
|
||||
u32 gcl_unaligned[EST_GCL];
|
||||
u32 gcl[EST_GCL];
|
||||
u32 gcl_size;
|
||||
u32 max_sdu[MTL_MAX_TX_QUEUES];
|
||||
};
|
||||
|
||||
struct stmmac_priv {
|
||||
/* Frequently used values are kept adjacent for cache effect */
|
||||
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
|
||||
@ -261,6 +275,9 @@ struct stmmac_priv {
|
||||
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
|
||||
struct stmmac_safety_stats sstats;
|
||||
struct plat_stmmacenet_data *plat;
|
||||
/* Protect est parameters */
|
||||
struct mutex est_lock;
|
||||
struct stmmac_est *est;
|
||||
struct dma_features dma_cap;
|
||||
struct stmmac_counters mmc;
|
||||
int hw_cap_support;
|
||||
|
@ -2498,9 +2498,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
|
||||
if (!xsk_tx_peek_desc(pool, &xdp_desc))
|
||||
break;
|
||||
|
||||
if (priv->plat->est && priv->plat->est->enable &&
|
||||
priv->plat->est->max_sdu[queue] &&
|
||||
xdp_desc.len > priv->plat->est->max_sdu[queue]) {
|
||||
if (priv->est && priv->est->enable &&
|
||||
priv->est->max_sdu[queue] &&
|
||||
xdp_desc.len > priv->est->max_sdu[queue]) {
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
continue;
|
||||
}
|
||||
@ -4538,9 +4538,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return stmmac_tso_xmit(skb, dev);
|
||||
}
|
||||
|
||||
if (priv->plat->est && priv->plat->est->enable &&
|
||||
priv->plat->est->max_sdu[queue] &&
|
||||
skb->len > priv->plat->est->max_sdu[queue]){
|
||||
if (priv->est && priv->est->enable &&
|
||||
priv->est->max_sdu[queue] &&
|
||||
skb->len > priv->est->max_sdu[queue]){
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
goto max_sdu_err;
|
||||
}
|
||||
@ -4919,9 +4919,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
|
||||
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
|
||||
return STMMAC_XDP_CONSUMED;
|
||||
|
||||
if (priv->plat->est && priv->plat->est->enable &&
|
||||
priv->plat->est->max_sdu[queue] &&
|
||||
xdpf->len > priv->plat->est->max_sdu[queue]) {
|
||||
if (priv->est && priv->est->enable &&
|
||||
priv->est->max_sdu[queue] &&
|
||||
xdpf->len > priv->est->max_sdu[queue]) {
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
return STMMAC_XDP_CONSUMED;
|
||||
}
|
||||
|
@ -68,13 +68,13 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
|
||||
nsec = reminder;
|
||||
|
||||
/* If EST is enabled, disabled it before adjust ptp time. */
|
||||
if (priv->plat->est && priv->plat->est->enable) {
|
||||
if (priv->est && priv->est->enable) {
|
||||
est_rst = true;
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
priv->plat->est->enable = false;
|
||||
stmmac_est_configure(priv, priv, priv->plat->est,
|
||||
mutex_lock(&priv->est_lock);
|
||||
priv->est->enable = false;
|
||||
stmmac_est_configure(priv, priv, priv->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
mutex_unlock(&priv->est_lock);
|
||||
}
|
||||
|
||||
write_lock_irqsave(&priv->ptp_lock, flags);
|
||||
@ -87,24 +87,24 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
|
||||
ktime_t current_time_ns, basetime;
|
||||
u64 cycle_time;
|
||||
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
mutex_lock(&priv->est_lock);
|
||||
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
|
||||
current_time_ns = timespec64_to_ktime(current_time);
|
||||
time.tv_nsec = priv->plat->est->btr_reserve[0];
|
||||
time.tv_sec = priv->plat->est->btr_reserve[1];
|
||||
time.tv_nsec = priv->est->btr_reserve[0];
|
||||
time.tv_sec = priv->est->btr_reserve[1];
|
||||
basetime = timespec64_to_ktime(time);
|
||||
cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
|
||||
priv->plat->est->ctr[0];
|
||||
cycle_time = (u64)priv->est->ctr[1] * NSEC_PER_SEC +
|
||||
priv->est->ctr[0];
|
||||
time = stmmac_calc_tas_basetime(basetime,
|
||||
current_time_ns,
|
||||
cycle_time);
|
||||
|
||||
priv->plat->est->btr[0] = (u32)time.tv_nsec;
|
||||
priv->plat->est->btr[1] = (u32)time.tv_sec;
|
||||
priv->plat->est->enable = true;
|
||||
ret = stmmac_est_configure(priv, priv, priv->plat->est,
|
||||
priv->est->btr[0] = (u32)time.tv_nsec;
|
||||
priv->est->btr[1] = (u32)time.tv_sec;
|
||||
priv->est->enable = true;
|
||||
ret = stmmac_est_configure(priv, priv, priv->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
mutex_unlock(&priv->est_lock);
|
||||
if (ret)
|
||||
netdev_err(priv->dev, "failed to configure EST\n");
|
||||
}
|
||||
|
@ -918,7 +918,6 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
|
||||
static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
{
|
||||
struct plat_stmmacenet_data *plat = priv->plat;
|
||||
u32 num_tc = qopt->mqprio.qopt.num_tc;
|
||||
u32 offset, count, i, j;
|
||||
|
||||
@ -933,7 +932,7 @@ static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
|
||||
count = qopt->mqprio.qopt.count[i];
|
||||
|
||||
for (j = offset; j < offset + count; j++)
|
||||
plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
|
||||
priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
|
||||
}
|
||||
}
|
||||
|
||||
@ -941,7 +940,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
struct tc_taprio_qopt_offload *qopt)
|
||||
{
|
||||
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
|
||||
struct plat_stmmacenet_data *plat = priv->plat;
|
||||
struct timespec64 time, current_time, qopt_time;
|
||||
ktime_t current_time_ns;
|
||||
bool fpe = false;
|
||||
@ -998,23 +996,25 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
if (qopt->cycle_time_extension >= BIT(wid + 7))
|
||||
return -ERANGE;
|
||||
|
||||
if (!plat->est) {
|
||||
plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
|
||||
if (!priv->est) {
|
||||
priv->est = devm_kzalloc(priv->device, sizeof(*priv->est),
|
||||
GFP_KERNEL);
|
||||
if (!plat->est)
|
||||
if (!priv->est)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&priv->plat->est->lock);
|
||||
mutex_init(&priv->est_lock);
|
||||
} else {
|
||||
memset(plat->est, 0, sizeof(*plat->est));
|
||||
mutex_lock(&priv->est_lock);
|
||||
memset(priv->est, 0, sizeof(*priv->est));
|
||||
mutex_unlock(&priv->est_lock);
|
||||
}
|
||||
|
||||
size = qopt->num_entries;
|
||||
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
priv->plat->est->gcl_size = size;
|
||||
priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
mutex_lock(&priv->est_lock);
|
||||
priv->est->gcl_size = size;
|
||||
priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
|
||||
mutex_unlock(&priv->est_lock);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
s64 delta_ns = qopt->entries[i].interval;
|
||||
@ -1042,33 +1042,33 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
|
||||
priv->est->gcl[i] = delta_ns | (gates << wid);
|
||||
}
|
||||
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
mutex_lock(&priv->est_lock);
|
||||
/* Adjust for real system time */
|
||||
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
|
||||
current_time_ns = timespec64_to_ktime(current_time);
|
||||
time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
|
||||
qopt->cycle_time);
|
||||
|
||||
priv->plat->est->btr[0] = (u32)time.tv_nsec;
|
||||
priv->plat->est->btr[1] = (u32)time.tv_sec;
|
||||
priv->est->btr[0] = (u32)time.tv_nsec;
|
||||
priv->est->btr[1] = (u32)time.tv_sec;
|
||||
|
||||
qopt_time = ktime_to_timespec64(qopt->base_time);
|
||||
priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
|
||||
priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
|
||||
priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
|
||||
priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
|
||||
|
||||
ctr = qopt->cycle_time;
|
||||
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
|
||||
priv->plat->est->ctr[1] = (u32)ctr;
|
||||
priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
|
||||
priv->est->ctr[1] = (u32)ctr;
|
||||
|
||||
priv->plat->est->ter = qopt->cycle_time_extension;
|
||||
priv->est->ter = qopt->cycle_time_extension;
|
||||
|
||||
tc_taprio_map_maxsdu_txq(priv, qopt);
|
||||
|
||||
if (fpe && !priv->dma_cap.fpesel) {
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
mutex_unlock(&priv->est_lock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -1077,9 +1077,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
*/
|
||||
priv->plat->fpe_cfg->enable = fpe;
|
||||
|
||||
ret = stmmac_est_configure(priv, priv, priv->plat->est,
|
||||
ret = stmmac_est_configure(priv, priv, priv->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
mutex_unlock(&priv->est_lock);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "failed to configure EST\n");
|
||||
goto disable;
|
||||
@ -1095,17 +1095,17 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
return 0;
|
||||
|
||||
disable:
|
||||
if (priv->plat->est) {
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
priv->plat->est->enable = false;
|
||||
stmmac_est_configure(priv, priv, priv->plat->est,
|
||||
if (priv->est) {
|
||||
mutex_lock(&priv->est_lock);
|
||||
priv->est->enable = false;
|
||||
stmmac_est_configure(priv, priv, priv->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
/* Reset taprio status */
|
||||
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
|
||||
priv->xstats.max_sdu_txq_drop[i] = 0;
|
||||
priv->xstats.mtl_est_txq_hlbf[i] = 0;
|
||||
}
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
mutex_unlock(&priv->est_lock);
|
||||
}
|
||||
|
||||
priv->plat->fpe_cfg->enable = false;
|
||||
|
@ -115,21 +115,6 @@ struct stmmac_axi {
|
||||
bool axi_rb;
|
||||
};
|
||||
|
||||
#define EST_GCL 1024
|
||||
struct stmmac_est {
|
||||
struct mutex lock;
|
||||
int enable;
|
||||
u32 btr_reserve[2];
|
||||
u32 btr_offset[2];
|
||||
u32 btr[2];
|
||||
u32 ctr[2];
|
||||
u32 ter;
|
||||
u32 gcl_unaligned[EST_GCL];
|
||||
u32 gcl[EST_GCL];
|
||||
u32 gcl_size;
|
||||
u32 max_sdu[MTL_MAX_TX_QUEUES];
|
||||
};
|
||||
|
||||
struct stmmac_rxq_cfg {
|
||||
u8 mode_to_use;
|
||||
u32 chan;
|
||||
@ -246,7 +231,6 @@ struct plat_stmmacenet_data {
|
||||
struct fwnode_handle *port_node;
|
||||
struct device_node *mdio_node;
|
||||
struct stmmac_dma_cfg *dma_cfg;
|
||||
struct stmmac_est *est;
|
||||
struct stmmac_fpe_cfg *fpe_cfg;
|
||||
struct stmmac_safety_feature_cfg *safety_feat_cfg;
|
||||
int clk_csr;
|
||||
|
Loading…
Reference in New Issue
Block a user