wifi: mt76: fix crash with WED rx support enabled

commit cd607f2cbb upstream.

If WED rx is enabled, rx buffers are added to a buffer pool that can be
filled from multiple page pools. Because buffers freed from rx poll are
not guaranteed to belong to the processed queue's page pool, lockless
caching must not be used in this case.

Cc: stable@vger.kernel.org
Fixes: 2f5c3c77fc ("wifi: mt76: switch to page_pool allocator")
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Kalle Valo <kvalo@kernel.org>
Link: https://lore.kernel.org/r/20231208075004.69843-1-nbd@nbd.name
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Felix Fietkau 2023-12-08 08:50:04 +01:00 committed by Greg Kroah-Hartman
parent af60d63b86
commit e4006c5a5c

View File

@ -776,7 +776,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more, u32 info)
int len, bool more, u32 info, bool allow_direct)
{
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
@ -788,7 +788,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
} else {
mt76_put_page_pool_buf(data, true);
mt76_put_page_pool_buf(data, allow_direct);
}
if (more)
@ -808,6 +808,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
struct sk_buff *skb;
unsigned char *data;
bool check_ddone = false;
bool allow_direct = !mt76_queue_is_wed_rx(q);
bool more;
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
@ -848,7 +849,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
}
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more, info);
mt76_add_fragment(dev, q, data, len, more, info,
allow_direct);
continue;
}
@ -877,7 +879,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue;
free_frag:
mt76_put_page_pool_buf(data, true);
mt76_put_page_pool_buf(data, allow_direct);
}
mt76_dma_rx_fill(dev, q, true);