mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-26 12:34:41 +08:00
13b5b7fd6a
This change adds the defines and structures necessary to support both Tx and Rx descriptor rings. Signed-off-by: Sasha Neftin <sasha.neftin@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
84 lines
2.1 KiB
C
84 lines
2.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2018 Intel Corporation */
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include "igc_hw.h"
|
|
#include "igc_i225.h"
|
|
|
|
/**
|
|
* igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
|
|
* @hw: pointer to the HW structure
|
|
*
|
|
* After Rx enable, if manageability is enabled then there is likely some
|
|
* bad data at the start of the fifo and possibly in the DMA fifo. This
|
|
* function clears the fifos and flushes any packets that came in as rx was
|
|
* being enabled.
|
|
*/
|
|
void igc_rx_fifo_flush_base(struct igc_hw *hw)
|
|
{
|
|
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
|
|
int i, ms_wait;
|
|
|
|
/* disable IPv6 options as per hardware errata */
|
|
rfctl = rd32(IGC_RFCTL);
|
|
rfctl |= IGC_RFCTL_IPV6_EX_DIS;
|
|
wr32(IGC_RFCTL, rfctl);
|
|
|
|
if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
|
|
return;
|
|
|
|
/* Disable all Rx queues */
|
|
for (i = 0; i < 4; i++) {
|
|
rxdctl[i] = rd32(IGC_RXDCTL(i));
|
|
wr32(IGC_RXDCTL(i),
|
|
rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
|
|
}
|
|
/* Poll all queues to verify they have shut down */
|
|
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
|
|
usleep_range(1000, 2000);
|
|
rx_enabled = 0;
|
|
for (i = 0; i < 4; i++)
|
|
rx_enabled |= rd32(IGC_RXDCTL(i));
|
|
if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
|
|
break;
|
|
}
|
|
|
|
if (ms_wait == 10)
|
|
pr_debug("Queue disable timed out after 10ms\n");
|
|
|
|
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
|
|
* incoming packets are rejected. Set enable and wait 2ms so that
|
|
* any packet that was coming in as RCTL.EN was set is flushed
|
|
*/
|
|
wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
|
|
|
|
rlpml = rd32(IGC_RLPML);
|
|
wr32(IGC_RLPML, 0);
|
|
|
|
rctl = rd32(IGC_RCTL);
|
|
temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
|
|
temp_rctl |= IGC_RCTL_LPE;
|
|
|
|
wr32(IGC_RCTL, temp_rctl);
|
|
wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
|
|
wrfl();
|
|
usleep_range(2000, 3000);
|
|
|
|
/* Enable Rx queues that were previously enabled and restore our
|
|
* previous state
|
|
*/
|
|
for (i = 0; i < 4; i++)
|
|
wr32(IGC_RXDCTL(i), rxdctl[i]);
|
|
wr32(IGC_RCTL, rctl);
|
|
wrfl();
|
|
|
|
wr32(IGC_RLPML, rlpml);
|
|
wr32(IGC_RFCTL, rfctl);
|
|
|
|
/* Flush receive errors generated by workaround */
|
|
rd32(IGC_ROC);
|
|
rd32(IGC_RNBC);
|
|
rd32(IGC_MPC);
|
|
}
|