mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-04 04:44:37 +08:00
can: c_can: don't cache TX messages for C_CAN cores
As Jacob noticed, the optimization introduced in387da6bc7a
("can: c_can: cache frames to operate as a true FIFO") doesn't properly work on C_CAN, but on D_CAN IP cores. The exact reasons are still unknown. For now disable caching if CAN frames in the TX path for C_CAN cores. Fixes:387da6bc7a
("can: c_can: cache frames to operate as a true FIFO") Link: https://lore.kernel.org/all/20220928083354.1062321-1-mkl@pengutronix.de Link: https://lore.kernel.org/all/15a8084b-9617-2da1-6704-d7e39d60643b@gmail.com Reported-by: Jacob Kroon <jacob.kroon@gmail.com> Tested-by: Jacob Kroon <jacob.kroon@gmail.com> Cc: stable@vger.kernel.org # v5.15 Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
This commit is contained in:
parent
44d70bb561
commit
81d192c2ce
@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
|
||||
return ring->tail & (ring->obj_num - 1);
|
||||
}
|
||||
|
||||
static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
|
||||
static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
|
||||
const struct c_can_tx_ring *ring)
|
||||
{
|
||||
return ring->obj_num - (ring->head - ring->tail);
|
||||
u8 head = c_can_get_tx_head(ring);
|
||||
u8 tail = c_can_get_tx_tail(ring);
|
||||
|
||||
if (priv->type == BOSCH_D_CAN)
|
||||
return ring->obj_num - (ring->head - ring->tail);
|
||||
|
||||
/* This is not a FIFO. C/D_CAN sends out the buffers
|
||||
* prioritized. The lowest buffer number wins.
|
||||
*/
|
||||
if (head < tail)
|
||||
return 0;
|
||||
|
||||
return ring->obj_num - head;
|
||||
}
|
||||
|
||||
#endif /* C_CAN_H */
|
||||
|
@ -429,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
|
||||
static bool c_can_tx_busy(const struct c_can_priv *priv,
|
||||
const struct c_can_tx_ring *tx_ring)
|
||||
{
|
||||
if (c_can_get_tx_free(tx_ring) > 0)
|
||||
if (c_can_get_tx_free(priv, tx_ring) > 0)
|
||||
return false;
|
||||
|
||||
netif_stop_queue(priv->dev);
|
||||
@ -437,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
|
||||
/* Memory barrier before checking tx_free (head and tail) */
|
||||
smp_mb();
|
||||
|
||||
if (c_can_get_tx_free(tx_ring) == 0) {
|
||||
if (c_can_get_tx_free(priv, tx_ring) == 0) {
|
||||
netdev_dbg(priv->dev,
|
||||
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
|
||||
tx_ring->head, tx_ring->tail,
|
||||
@ -465,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
|
||||
|
||||
idx = c_can_get_tx_head(tx_ring);
|
||||
tx_ring->head++;
|
||||
if (c_can_get_tx_free(tx_ring) == 0)
|
||||
if (c_can_get_tx_free(priv, tx_ring) == 0)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
if (idx < c_can_get_tx_tail(tx_ring))
|
||||
@ -748,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
return;
|
||||
|
||||
tx_ring->tail += pkts;
|
||||
if (c_can_get_tx_free(tx_ring)) {
|
||||
if (c_can_get_tx_free(priv, tx_ring)) {
|
||||
/* Make sure that anybody stopping the queue after
|
||||
* this sees the new tx_ring->tail.
|
||||
*/
|
||||
@ -760,8 +760,7 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
stats->tx_packets += pkts;
|
||||
|
||||
tail = c_can_get_tx_tail(tx_ring);
|
||||
|
||||
if (tail == 0) {
|
||||
if (priv->type == BOSCH_D_CAN && tail == 0) {
|
||||
u8 head = c_can_get_tx_head(tx_ring);
|
||||
|
||||
/* Start transmission for all cached messages */
|
||||
|
Loading…
Reference in New Issue
Block a user