tile: fix panic bug in napi support for tilegx network driver

The code used to call napi_disable() in an interrupt handler
(from smp_call_function), which in turn could call msleep().
Unfortunately you can't sleep in an interrupt context.

Luckily it turns out all the NAPI support functions are
just operating on data structures and not on any deeply
per-cpu data, so we can arrange to set up and tear down all
the NAPI state on the core driving the process, and just
do the IRQ enable/disable as a smp_call_function thing.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Chris Metcalf 2013-08-01 11:36:42 -04:00 committed by David S. Miller
parent ad0181855a
commit 5e7a54a2a7

View File

@ -650,37 +650,13 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
/* Helper function for "tile_net_update()". /* Helper function for "tile_net_update()". */
* "dev" (i.e. arg) is the device being brought up or down, static void manage_ingress_irq(void *enable)
* or NULL if all devices are now down.
*/
static void tile_net_update_cpu(void *arg)
{ {
struct tile_net_info *info = &__get_cpu_var(per_cpu_info); if (enable)
struct net_device *dev = arg;
if (!info->has_iqueue)
return;
if (dev != NULL) {
if (!info->napi_added) {
netif_napi_add(dev, &info->napi,
tile_net_poll, TILE_NET_WEIGHT);
info->napi_added = true;
}
if (!info->napi_enabled) {
napi_enable(&info->napi);
info->napi_enabled = true;
}
enable_percpu_irq(ingress_irq, 0); enable_percpu_irq(ingress_irq, 0);
} else { else
disable_percpu_irq(ingress_irq); disable_percpu_irq(ingress_irq);
if (info->napi_enabled) {
napi_disable(&info->napi);
info->napi_enabled = false;
}
/* FIXME: Drain the iqueue. */
}
} }
/* Helper function for tile_net_open() and tile_net_stop(). /* Helper function for tile_net_open() and tile_net_stop().
@ -717,10 +693,35 @@ static int tile_net_update(struct net_device *dev)
return -EIO; return -EIO;
} }
/* Update all cpus, sequentially (to protect "netif_napi_add()"). */ /* Update all cpus, sequentially (to protect "netif_napi_add()").
for_each_online_cpu(cpu) * We use on_each_cpu to handle the IPI mask or unmask.
smp_call_function_single(cpu, tile_net_update_cpu, */
(saw_channel ? dev : NULL), 1); if (!saw_channel)
on_each_cpu(manage_ingress_irq, (void *)0, 1);
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
if (!info->has_iqueue)
continue;
if (saw_channel) {
if (!info->napi_added) {
netif_napi_add(dev, &info->napi,
tile_net_poll, TILE_NET_WEIGHT);
info->napi_added = true;
}
if (!info->napi_enabled) {
napi_enable(&info->napi);
info->napi_enabled = true;
}
} else {
if (info->napi_enabled) {
napi_disable(&info->napi);
info->napi_enabled = false;
}
/* FIXME: Drain the iqueue. */
}
}
if (saw_channel)
on_each_cpu(manage_ingress_irq, (void *)1, 1);
/* HACK: Allow packets to flow in the simulator. */ /* HACK: Allow packets to flow in the simulator. */
if (saw_channel) if (saw_channel)