2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 11:44:01 +08:00

staging: octeon: support enabling multiple rx groups

Support enabling multiple RX groups.

Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Aaro Koskinen 2016-08-31 23:57:43 +03:00 committed by Greg Kroah-Hartman
parent 942bab48e6
commit e971a119f7
3 changed files with 77 additions and 49 deletions

View File

@ -47,7 +47,7 @@ static struct oct_rx_group {
int irq;
int group;
struct napi_struct napi;
} oct_rx_group;
} oct_rx_group[16];
/**
* cvm_oct_do_interrupt - interrupt handler.
@ -442,7 +442,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
void cvm_oct_poll_controller(struct net_device *dev)
{
cvm_oct_poll(&oct_rx_group, 16);
int i;
for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
if (!(pow_receive_groups & BIT(i)))
continue;
cvm_oct_poll(&oct_rx_group[i], 16);
}
}
#endif
@ -461,65 +470,80 @@ void cvm_oct_rx_initialize(void)
if (!dev_for_napi)
panic("No net_devices were allocated.");
netif_napi_add(dev_for_napi, &oct_rx_group.napi, cvm_oct_napi_poll,
rx_napi_weight);
napi_enable(&oct_rx_group.napi);
for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
int ret;
oct_rx_group.irq = OCTEON_IRQ_WORKQ0 + pow_receive_group;
oct_rx_group.group = pow_receive_group;
if (!(pow_receive_groups & BIT(i)))
continue;
/* Register an IRQ handler to receive POW interrupts */
i = request_irq(oct_rx_group.irq, cvm_oct_do_interrupt, 0, "Ethernet",
&oct_rx_group.napi);
netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
cvm_oct_napi_poll, rx_napi_weight);
napi_enable(&oct_rx_group[i].napi);
if (i)
panic("Could not acquire Ethernet IRQ %d\n", oct_rx_group.irq);
oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
oct_rx_group[i].group = i;
disable_irq_nosync(oct_rx_group.irq);
/* Register an IRQ handler to receive POW interrupts */
ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
"Ethernet", &oct_rx_group[i].napi);
if (ret)
panic("Could not acquire Ethernet IRQ %d\n",
oct_rx_group[i].irq);
/* Enable POW interrupt when our port has at least one packet */
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_sso_wq_int_thrx int_thr;
union cvmx_pow_wq_int_pc int_pc;
disable_irq_nosync(oct_rx_group[i].irq);
int_thr.u64 = 0;
int_thr.s.tc_en = 1;
int_thr.s.tc_thr = 1;
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
int_thr.u64);
/* Enable POW interrupt when our port has at least one packet */
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_sso_wq_int_thrx int_thr;
union cvmx_pow_wq_int_pc int_pc;
int_pc.u64 = 0;
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
} else {
union cvmx_pow_wq_int_thrx int_thr;
union cvmx_pow_wq_int_pc int_pc;
int_thr.u64 = 0;
int_thr.s.tc_en = 1;
int_thr.s.tc_thr = 1;
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
int_thr.u64 = 0;
int_thr.s.tc_en = 1;
int_thr.s.tc_thr = 1;
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
int_thr.u64);
int_pc.u64 = 0;
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
} else {
union cvmx_pow_wq_int_thrx int_thr;
union cvmx_pow_wq_int_pc int_pc;
int_pc.u64 = 0;
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
int_thr.u64 = 0;
int_thr.s.tc_en = 1;
int_thr.s.tc_thr = 1;
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
int_pc.u64 = 0;
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
}
/* Schedule NAPI now. This will indirectly enable the
* interrupt.
*/
napi_schedule(&oct_rx_group[i].napi);
}
/* Schedule NAPI now. This will indirectly enable the interrupt. */
napi_schedule(&oct_rx_group.napi);
}
void cvm_oct_rx_shutdown(void)
{
/* Disable POW interrupt */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
else
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
int i;
/* Free the interrupt handler */
free_irq(oct_rx_group.irq, cvm_oct_device);
for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
netif_napi_del(&oct_rx_group.napi);
if (!(pow_receive_groups & BIT(i)))
continue;
/* Disable POW interrupt */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
else
cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
/* Free the interrupt handler */
free_irq(oct_rx_group[i].irq, cvm_oct_device);
netif_napi_del(&oct_rx_group[i].napi);
}
}

View File

@ -45,7 +45,7 @@ MODULE_PARM_DESC(num_packet_buffers, "\n"
"\tNumber of packet buffers to allocate and store in the\n"
"\tFPA. By default, 1024 packet buffers are used.\n");
int pow_receive_group = 15;
static int pow_receive_group = 15;
module_param(pow_receive_group, int, 0444);
MODULE_PARM_DESC(pow_receive_group, "\n"
"\tPOW group to receive packets from. All ethernet hardware\n"
@ -86,6 +86,8 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
/* Mask indicating which receive groups are in use. */
int pow_receive_groups;
/*
* cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
@ -678,6 +680,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
cvmx_helper_initialize_packet_io_global();
pow_receive_groups = BIT(pow_receive_group);
/* Change the input group for all ports before input is enabled */
num_interfaces = cvmx_helper_get_number_of_interfaces();
for (interface = 0; interface < num_interfaces; interface++) {

View File

@ -72,7 +72,7 @@ void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow;
extern int pow_send_group;
extern int pow_receive_group;
extern int pow_receive_groups;
extern char pow_send_list[];
extern struct net_device *cvm_oct_device[];
extern atomic_t cvm_oct_poll_queue_stopping;