mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 07:34:06 +08:00
2ddd6bfe7b
A CAN driver, using the rx-offload infrastructure, is reading CAN frames (usually in IRQ context) from the hardware and placing it into the rx-offload queue to be delivered to the networking stack via NAPI. In case the rx-offload queue is full, trying to add more skbs results in the skbs being dropped using kfree_skb(). If done from hard-IRQ context this results in the following warning: [ 682.552693] ------------[ cut here ]------------ [ 682.557360] WARNING: CPU: 0 PID: 3057 at net/core/skbuff.c:650 skb_release_head_state+0x74/0x84 [ 682.566075] Modules linked in: can_raw can coda_vpu flexcan dw_hdmi_ahb_audio v4l2_jpeg imx_vdoa can_dev [ 682.575597] CPU: 0 PID: 3057 Comm: cansend Tainted: G W 5.7.0+ #18 [ 682.583098] Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) [ 682.589657] [<c0112628>] (unwind_backtrace) from [<c010c1c4>] (show_stack+0x10/0x14) [ 682.597423] [<c010c1c4>] (show_stack) from [<c06c481c>] (dump_stack+0xe0/0x114) [ 682.604759] [<c06c481c>] (dump_stack) from [<c0128f10>] (__warn+0xc0/0x10c) [ 682.611742] [<c0128f10>] (__warn) from [<c0129314>] (warn_slowpath_fmt+0x5c/0xc0) [ 682.619248] [<c0129314>] (warn_slowpath_fmt) from [<c0b95dec>] (skb_release_head_state+0x74/0x84) [ 682.628143] [<c0b95dec>] (skb_release_head_state) from [<c0b95e08>] (skb_release_all+0xc/0x24) [ 682.636774] [<c0b95e08>] (skb_release_all) from [<c0b95eac>] (kfree_skb+0x74/0x1c8) [ 682.644479] [<c0b95eac>] (kfree_skb) from [<bf001d1c>] (can_rx_offload_queue_sorted+0xe0/0xe8 [can_dev]) [ 682.654051] [<bf001d1c>] (can_rx_offload_queue_sorted [can_dev]) from [<bf001d6c>] (can_rx_offload_get_echo_skb+0x48/0x94 [can_dev]) [ 682.666007] [<bf001d6c>] (can_rx_offload_get_echo_skb [can_dev]) from [<bf01efe4>] (flexcan_irq+0x194/0x5dc [flexcan]) [ 682.676734] [<bf01efe4>] (flexcan_irq [flexcan]) from [<c019c1ec>] (__handle_irq_event_percpu+0x4c/0x3ec) [ 682.686322] [<c019c1ec>] (__handle_irq_event_percpu) from [<c019c5b8>] (handle_irq_event_percpu+0x2c/0x88) [ 682.695993] [<c019c5b8>] (handle_irq_event_percpu) from [<c019c64c>] (handle_irq_event+0x38/0x5c) [ 682.704887] [<c019c64c>] (handle_irq_event) from [<c01a1058>] (handle_fasteoi_irq+0xc8/0x180) [ 682.713432] [<c01a1058>] (handle_fasteoi_irq) from [<c019b2c0>] (generic_handle_irq+0x30/0x44) [ 682.722063] [<c019b2c0>] (generic_handle_irq) from [<c019b8f8>] (__handle_domain_irq+0x64/0xdc) [ 682.730783] [<c019b8f8>] (__handle_domain_irq) from [<c06df4a4>] (gic_handle_irq+0x48/0x9c) [ 682.739158] [<c06df4a4>] (gic_handle_irq) from [<c0100b30>] (__irq_svc+0x70/0x98) [ 682.746656] Exception stack(0xe80e9dd8 to 0xe80e9e20) [ 682.751725] 9dc0: 00000001 e80e8000 [ 682.759922] 9de0: e820cf80 00000000 ffffe000 00000000 eaf08fe4 00000000 600d0013 00000000 [ 682.768117] 9e00: c1732e3c c16093a8 e820d4c0 e80e9e28 c018a57c c018b870 600d0013 ffffffff [ 682.776315] [<c0100b30>] (__irq_svc) from [<c018b870>] (lock_acquire+0x108/0x4e8) [ 682.783821] [<c018b870>] (lock_acquire) from [<c0e938e4>] (down_write+0x48/0xa8) [ 682.791242] [<c0e938e4>] (down_write) from [<c02818dc>] (unlink_file_vma+0x24/0x40) [ 682.798922] [<c02818dc>] (unlink_file_vma) from [<c027a258>] (free_pgtables+0x34/0xb8) [ 682.806858] [<c027a258>] (free_pgtables) from [<c02835a4>] (exit_mmap+0xe4/0x170) [ 682.814361] [<c02835a4>] (exit_mmap) from [<c01248e0>] (mmput+0x5c/0x110) [ 682.821171] [<c01248e0>] (mmput) from [<c012e910>] (do_exit+0x374/0xbe4) [ 682.827892] [<c012e910>] (do_exit) from [<c0130888>] (do_group_exit+0x38/0xb4) [ 682.835132] [<c0130888>] (do_group_exit) from [<c0130914>] (__wake_up_parent+0x0/0x14) [ 682.843063] irq event stamp: 1936 [ 682.846399] hardirqs last enabled at (1935): [<c02938b0>] rmqueue+0xf4/0xc64 [ 682.853553] hardirqs last disabled at (1936): [<c0100b20>] __irq_svc+0x60/0x98 [ 682.860799] softirqs last enabled at (1878): [<bf04cdcc>] raw_release+0x108/0x1f0 [can_raw] [ 682.869256] softirqs last disabled at (1876): [<c0b8f478>] release_sock+0x18/0x98 [ 682.876753] ---[ end trace 7bca4751ce44c444 ]--- This patch fixes the problem by replacing the kfree_skb() by dev_kfree_skb_any(), as rx-offload might be called from threaded IRQ handlers as well. Fixes:ca913f1ac0
("can: rx-offload: can_rx_offload_queue_sorted(): fix error handling, avoid skb mem leak") Fixes:6caf8a6d65
("can: rx-offload: can_rx_offload_queue_tail(): fix error handling, avoid skb mem leak") Link: http://lore.kernel.org/r/20201019190524.1285319-3-mkl@pengutronix.de Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
377 lines
9.0 KiB
C
377 lines
9.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Copyright (c) 2014 Protonic Holland,
|
|
* David Jander
|
|
* Copyright (C) 2014-2017 Pengutronix,
|
|
* Marc Kleine-Budde <kernel@pengutronix.de>
|
|
*/
|
|
|
|
#include <linux/can/dev.h>
|
|
#include <linux/can/rx-offload.h>
|
|
|
|
struct can_rx_offload_cb {
|
|
u32 timestamp;
|
|
};
|
|
|
|
static inline struct can_rx_offload_cb *
|
|
can_rx_offload_get_cb(struct sk_buff *skb)
|
|
{
|
|
BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
|
|
|
|
return (struct can_rx_offload_cb *)skb->cb;
|
|
}
|
|
|
|
static inline bool
|
|
can_rx_offload_le(struct can_rx_offload *offload,
|
|
unsigned int a, unsigned int b)
|
|
{
|
|
if (offload->inc)
|
|
return a <= b;
|
|
else
|
|
return a >= b;
|
|
}
|
|
|
|
static inline unsigned int
|
|
can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
|
|
{
|
|
if (offload->inc)
|
|
return (*val)++;
|
|
else
|
|
return (*val)--;
|
|
}
|
|
|
|
static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
|
|
{
|
|
struct can_rx_offload *offload = container_of(napi,
|
|
struct can_rx_offload,
|
|
napi);
|
|
struct net_device *dev = offload->dev;
|
|
struct net_device_stats *stats = &dev->stats;
|
|
struct sk_buff *skb;
|
|
int work_done = 0;
|
|
|
|
while ((work_done < quota) &&
|
|
(skb = skb_dequeue(&offload->skb_queue))) {
|
|
struct can_frame *cf = (struct can_frame *)skb->data;
|
|
|
|
work_done++;
|
|
stats->rx_packets++;
|
|
stats->rx_bytes += cf->can_dlc;
|
|
netif_receive_skb(skb);
|
|
}
|
|
|
|
if (work_done < quota) {
|
|
napi_complete_done(napi, work_done);
|
|
|
|
/* Check if there was another interrupt */
|
|
if (!skb_queue_empty(&offload->skb_queue))
|
|
napi_reschedule(&offload->napi);
|
|
}
|
|
|
|
can_led_event(offload->dev, CAN_LED_EVENT_RX);
|
|
|
|
return work_done;
|
|
}
|
|
|
|
static inline void
|
|
__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
|
|
int (*compare)(struct sk_buff *a, struct sk_buff *b))
|
|
{
|
|
struct sk_buff *pos, *insert = NULL;
|
|
|
|
skb_queue_reverse_walk(head, pos) {
|
|
const struct can_rx_offload_cb *cb_pos, *cb_new;
|
|
|
|
cb_pos = can_rx_offload_get_cb(pos);
|
|
cb_new = can_rx_offload_get_cb(new);
|
|
|
|
netdev_dbg(new->dev,
|
|
"%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
|
|
__func__,
|
|
cb_pos->timestamp, cb_new->timestamp,
|
|
cb_new->timestamp - cb_pos->timestamp,
|
|
skb_queue_len(head));
|
|
|
|
if (compare(pos, new) < 0)
|
|
continue;
|
|
insert = pos;
|
|
break;
|
|
}
|
|
if (!insert)
|
|
__skb_queue_head(head, new);
|
|
else
|
|
__skb_queue_after(head, insert, new);
|
|
}
|
|
|
|
static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
|
|
{
|
|
const struct can_rx_offload_cb *cb_a, *cb_b;
|
|
|
|
cb_a = can_rx_offload_get_cb(a);
|
|
cb_b = can_rx_offload_get_cb(b);
|
|
|
|
/* Subtract two u32 and return result as int, to keep
|
|
* difference steady around the u32 overflow.
|
|
*/
|
|
return cb_b->timestamp - cb_a->timestamp;
|
|
}
|
|
|
|
/**
|
|
* can_rx_offload_offload_one() - Read one CAN frame from HW
|
|
* @offload: pointer to rx_offload context
|
|
* @n: number of mailbox to read
|
|
*
|
|
* The task of this function is to read a CAN frame from mailbox @n
|
|
* from the device and return the mailbox's content as a struct
|
|
* sk_buff.
|
|
*
|
|
* If the struct can_rx_offload::skb_queue exceeds the maximal queue
|
|
* length (struct can_rx_offload::skb_queue_len_max) or no skb can be
|
|
* allocated, the mailbox contents is discarded by reading it into an
|
|
* overflow buffer. This way the mailbox is marked as free by the
|
|
* driver.
|
|
*
|
|
* Return: A pointer to skb containing the CAN frame on success.
|
|
*
|
|
* NULL if the mailbox @n is empty.
|
|
*
|
|
* ERR_PTR() in case of an error
|
|
*/
|
|
static struct sk_buff *
|
|
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
|
|
{
|
|
struct sk_buff *skb;
|
|
struct can_rx_offload_cb *cb;
|
|
bool drop = false;
|
|
u32 timestamp;
|
|
|
|
/* If queue is full drop frame */
|
|
if (unlikely(skb_queue_len(&offload->skb_queue) >
|
|
offload->skb_queue_len_max))
|
|
drop = true;
|
|
|
|
skb = offload->mailbox_read(offload, n, ×tamp, drop);
|
|
/* Mailbox was empty. */
|
|
if (unlikely(!skb))
|
|
return NULL;
|
|
|
|
/* There was a problem reading the mailbox, propagate
|
|
* error value.
|
|
*/
|
|
if (unlikely(IS_ERR(skb))) {
|
|
offload->dev->stats.rx_dropped++;
|
|
offload->dev->stats.rx_fifo_errors++;
|
|
|
|
return skb;
|
|
}
|
|
|
|
/* Mailbox was read. */
|
|
cb = can_rx_offload_get_cb(skb);
|
|
cb->timestamp = timestamp;
|
|
|
|
return skb;
|
|
}
|
|
|
|
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
|
|
u64 pending)
|
|
{
|
|
struct sk_buff_head skb_queue;
|
|
unsigned int i;
|
|
|
|
__skb_queue_head_init(&skb_queue);
|
|
|
|
for (i = offload->mb_first;
|
|
can_rx_offload_le(offload, i, offload->mb_last);
|
|
can_rx_offload_inc(offload, &i)) {
|
|
struct sk_buff *skb;
|
|
|
|
if (!(pending & BIT_ULL(i)))
|
|
continue;
|
|
|
|
skb = can_rx_offload_offload_one(offload, i);
|
|
if (IS_ERR_OR_NULL(skb))
|
|
continue;
|
|
|
|
__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
|
|
}
|
|
|
|
if (!skb_queue_empty(&skb_queue)) {
|
|
unsigned long flags;
|
|
u32 queue_len;
|
|
|
|
spin_lock_irqsave(&offload->skb_queue.lock, flags);
|
|
skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
|
|
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
|
|
|
|
queue_len = skb_queue_len(&offload->skb_queue);
|
|
if (queue_len > offload->skb_queue_len_max / 8)
|
|
netdev_dbg(offload->dev, "%s: queue_len=%d\n",
|
|
__func__, queue_len);
|
|
|
|
can_rx_offload_schedule(offload);
|
|
}
|
|
|
|
return skb_queue_len(&skb_queue);
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
|
|
|
|
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
|
|
{
|
|
struct sk_buff *skb;
|
|
int received = 0;
|
|
|
|
while (1) {
|
|
skb = can_rx_offload_offload_one(offload, 0);
|
|
if (IS_ERR(skb))
|
|
continue;
|
|
if (!skb)
|
|
break;
|
|
|
|
skb_queue_tail(&offload->skb_queue, skb);
|
|
received++;
|
|
}
|
|
|
|
if (received)
|
|
can_rx_offload_schedule(offload);
|
|
|
|
return received;
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
|
|
|
|
int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
|
|
struct sk_buff *skb, u32 timestamp)
|
|
{
|
|
struct can_rx_offload_cb *cb;
|
|
unsigned long flags;
|
|
|
|
if (skb_queue_len(&offload->skb_queue) >
|
|
offload->skb_queue_len_max) {
|
|
dev_kfree_skb_any(skb);
|
|
return -ENOBUFS;
|
|
}
|
|
|
|
cb = can_rx_offload_get_cb(skb);
|
|
cb->timestamp = timestamp;
|
|
|
|
spin_lock_irqsave(&offload->skb_queue.lock, flags);
|
|
__skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
|
|
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
|
|
|
|
can_rx_offload_schedule(offload);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
|
|
|
|
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
|
|
unsigned int idx, u32 timestamp)
|
|
{
|
|
struct net_device *dev = offload->dev;
|
|
struct net_device_stats *stats = &dev->stats;
|
|
struct sk_buff *skb;
|
|
u8 len;
|
|
int err;
|
|
|
|
skb = __can_get_echo_skb(dev, idx, &len);
|
|
if (!skb)
|
|
return 0;
|
|
|
|
err = can_rx_offload_queue_sorted(offload, skb, timestamp);
|
|
if (err) {
|
|
stats->rx_errors++;
|
|
stats->tx_fifo_errors++;
|
|
}
|
|
|
|
return len;
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
|
|
|
|
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
|
|
struct sk_buff *skb)
|
|
{
|
|
if (skb_queue_len(&offload->skb_queue) >
|
|
offload->skb_queue_len_max) {
|
|
dev_kfree_skb_any(skb);
|
|
return -ENOBUFS;
|
|
}
|
|
|
|
skb_queue_tail(&offload->skb_queue, skb);
|
|
can_rx_offload_schedule(offload);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
|
|
|
|
static int can_rx_offload_init_queue(struct net_device *dev,
|
|
struct can_rx_offload *offload,
|
|
unsigned int weight)
|
|
{
|
|
offload->dev = dev;
|
|
|
|
/* Limit queue len to 4x the weight (rounted to next power of two) */
|
|
offload->skb_queue_len_max = 2 << fls(weight);
|
|
offload->skb_queue_len_max *= 4;
|
|
skb_queue_head_init(&offload->skb_queue);
|
|
|
|
netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
|
|
|
|
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
|
|
__func__, offload->skb_queue_len_max);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int can_rx_offload_add_timestamp(struct net_device *dev,
|
|
struct can_rx_offload *offload)
|
|
{
|
|
unsigned int weight;
|
|
|
|
if (offload->mb_first > BITS_PER_LONG_LONG ||
|
|
offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
|
|
return -EINVAL;
|
|
|
|
if (offload->mb_first < offload->mb_last) {
|
|
offload->inc = true;
|
|
weight = offload->mb_last - offload->mb_first;
|
|
} else {
|
|
offload->inc = false;
|
|
weight = offload->mb_first - offload->mb_last;
|
|
}
|
|
|
|
return can_rx_offload_init_queue(dev, offload, weight);
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
|
|
|
|
int can_rx_offload_add_fifo(struct net_device *dev,
|
|
struct can_rx_offload *offload, unsigned int weight)
|
|
{
|
|
if (!offload->mailbox_read)
|
|
return -EINVAL;
|
|
|
|
return can_rx_offload_init_queue(dev, offload, weight);
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
|
|
|
|
int can_rx_offload_add_manual(struct net_device *dev,
|
|
struct can_rx_offload *offload,
|
|
unsigned int weight)
|
|
{
|
|
if (offload->mailbox_read)
|
|
return -EINVAL;
|
|
|
|
return can_rx_offload_init_queue(dev, offload, weight);
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
|
|
|
|
void can_rx_offload_enable(struct can_rx_offload *offload)
|
|
{
|
|
napi_enable(&offload->napi);
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_enable);
|
|
|
|
void can_rx_offload_del(struct can_rx_offload *offload)
|
|
{
|
|
netif_napi_del(&offload->napi);
|
|
skb_queue_purge(&offload->skb_queue);
|
|
}
|
|
EXPORT_SYMBOL_GPL(can_rx_offload_del);
|