mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Networking fixes for 6.0-rc8, including fixes from wifi and can.
Current release - regressions: - phy: don't WARN for PHY_UP state in mdio_bus_phy_resume() - wifi: fix locking in mac80211 mlme - eth: - revert "net: mvpp2: debugfs: fix memory leak when using debugfs_lookup()" - mlxbf_gige: fix an IS_ERR() vs NULL bug in mlxbf_gige_mdio_probe Previous releases - regressions: - wifi: fix regression with non-QoS drivers Previous releases - always broken: - mptcp: fix unreleased socket in accept queue - wifi: - don't start TX with fq->lock to fix deadlock - fix memory corruption in minstrel_ht_update_rates() - eth: - macb: fix ZynqMP SGMII non-wakeup source resume failure - mt7531: only do PLL once after the reset - usbnet: fix memory leak in usbnet_disconnect() Misc: - usb: qmi_wwan: add new usb-id for Dell branded EM7455 Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmM1cawSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkyn4P/3sP2MK9yjBNWZ+NcjGAapXqm5MPttDX pTihRgVVR0ldAQCvLaKS6NtB9W/o2KnQ6znsNvba5fEE8MskKCv+kh3kIe5kzq6B JrpEQUHOS7XlIgQnNIUITym1n9A79CHvPWvuQzbSr+5TbaEncM2KN/0UFi+sqkrY Gz+2BUvJqeJShqQYtZCRQDrNxOmpKtRLHuXmskS0XlSHp0bp8nz/8zQOLEIHMnqB xLHRzOgpRBIXMPO3IWTP8AHkYmuyh7Pdf1IZ5uPgVhBmcfVR7UvXQUSCXt21WlhT SoLbOuT/zAFTbehkGY5B2S40h9qUvw2WBcHO3go59PwT9NOP2a2V1qcj6C75/rt2 5Gnw75vT0Z5+VyuCHlyK4K2OVdiSpe/OMY8ZTYIRy8cGKXycAlK3AS5/m7Y+UG37 SG+DrfkrBjC1GYKcFugC3zjLW1eQ+KKWY6z9j8PgWbZ3hgmWo5g9DXsRep7cDFUF 6bzspxCQDn53WSLKnDRxIdFGPKR6bzn7Nys/qhyxaBdW59xLohXPqaF+n92K7bV8 lkrDzq0knAsNWDKUT8sDs0ATMHx7MgzOlKwEMkkvhV9F3psiob9ISdU1Mwn4dbi1 guWrSm5YtFxSu5iuAeOOZs0gZCtXuWtq0cJVsjnyZHnselcqO6Tfuvs2vzamI1KI MFnI5EZ6nY48 =WVqg -----END PGP SIGNATURE----- Merge tag 'net-6.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from wifi and can. Current release - regressions: - phy: don't WARN for PHY_UP state in mdio_bus_phy_resume() - wifi: fix locking in mac80211 mlme - eth: - revert "net: mvpp2: debugfs: fix memory leak when using debugfs_lookup()" - mlxbf_gige: fix an IS_ERR() vs NULL bug in mlxbf_gige_mdio_probe Previous releases - regressions: - wifi: fix regression with non-QoS drivers Previous releases - always broken: - mptcp: fix unreleased socket in accept queue - wifi: - don't start TX with fq->lock to fix deadlock - fix memory corruption in minstrel_ht_update_rates() - eth: - macb: fix ZynqMP SGMII non-wakeup source resume failure - mt7531: only do PLL once after the reset - usbnet: fix memory leak in usbnet_disconnect() Misc: - usb: qmi_wwan: add new usb-id for Dell branded EM7455" * tag 'net-6.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (30 commits) mptcp: fix unreleased socket in accept queue mptcp: factor out __mptcp_close() without socket lock net: ethernet: mtk_eth_soc: fix mask of RX_DMA_GET_SPORT{,_V2} net: mscc: ocelot: fix tagged VLAN refusal while under a VLAN-unaware bridge can: c_can: don't cache TX messages for C_CAN cores ice: xsk: drop power of 2 ring size restriction for AF_XDP ice: xsk: change batched Tx descriptor cleaning net: usb: qmi_wwan: Add new usb-id for Dell branded EM7455 selftests: Fix the if conditions of in test_extra_filter() net: phy: Don't WARN for PHY_UP state in mdio_bus_phy_resume() net: stmmac: power up/down serdes in stmmac_open/release wifi: mac80211: mlme: Fix double unlock on assoc success handling wifi: mac80211: mlme: Fix missing unlock on beacon RX wifi: mac80211: fix memory corruption in minstrel_ht_update_rates() wifi: mac80211: fix regression with non-QoS drivers wifi: mac80211: ensure vif queues are operational after start wifi: mac80211: don't start TX with fq->lock to fix deadlock wifi: cfg80211: fix MCS divisor value net: hippi: Add missing pci_disable_device() in rr_init_one() net/mlxbf_gige: Fix an IS_ERR() vs NULL bug in mlxbf_gige_mdio_probe ...
This commit is contained in:
commit
511cce163b
@ -19961,7 +19961,7 @@ S: Supported
|
||||
F: drivers/net/team/
|
||||
F: include/linux/if_team.h
|
||||
F: include/uapi/linux/if_team.h
|
||||
F: tools/testing/selftests/net/team/
|
||||
F: tools/testing/selftests/drivers/net/team/
|
||||
|
||||
TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT
|
||||
M: "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
|
||||
|
@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
|
||||
return ring->tail & (ring->obj_num - 1);
|
||||
}
|
||||
|
||||
static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
|
||||
static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
|
||||
const struct c_can_tx_ring *ring)
|
||||
{
|
||||
return ring->obj_num - (ring->head - ring->tail);
|
||||
u8 head = c_can_get_tx_head(ring);
|
||||
u8 tail = c_can_get_tx_tail(ring);
|
||||
|
||||
if (priv->type == BOSCH_D_CAN)
|
||||
return ring->obj_num - (ring->head - ring->tail);
|
||||
|
||||
/* This is not a FIFO. C/D_CAN sends out the buffers
|
||||
* prioritized. The lowest buffer number wins.
|
||||
*/
|
||||
if (head < tail)
|
||||
return 0;
|
||||
|
||||
return ring->obj_num - head;
|
||||
}
|
||||
|
||||
#endif /* C_CAN_H */
|
||||
|
@ -429,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
|
||||
static bool c_can_tx_busy(const struct c_can_priv *priv,
|
||||
const struct c_can_tx_ring *tx_ring)
|
||||
{
|
||||
if (c_can_get_tx_free(tx_ring) > 0)
|
||||
if (c_can_get_tx_free(priv, tx_ring) > 0)
|
||||
return false;
|
||||
|
||||
netif_stop_queue(priv->dev);
|
||||
@ -437,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
|
||||
/* Memory barrier before checking tx_free (head and tail) */
|
||||
smp_mb();
|
||||
|
||||
if (c_can_get_tx_free(tx_ring) == 0) {
|
||||
if (c_can_get_tx_free(priv, tx_ring) == 0) {
|
||||
netdev_dbg(priv->dev,
|
||||
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
|
||||
tx_ring->head, tx_ring->tail,
|
||||
@ -465,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
|
||||
|
||||
idx = c_can_get_tx_head(tx_ring);
|
||||
tx_ring->head++;
|
||||
if (c_can_get_tx_free(tx_ring) == 0)
|
||||
if (c_can_get_tx_free(priv, tx_ring) == 0)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
if (idx < c_can_get_tx_tail(tx_ring))
|
||||
@ -748,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
return;
|
||||
|
||||
tx_ring->tail += pkts;
|
||||
if (c_can_get_tx_free(tx_ring)) {
|
||||
if (c_can_get_tx_free(priv, tx_ring)) {
|
||||
/* Make sure that anybody stopping the queue after
|
||||
* this sees the new tx_ring->tail.
|
||||
*/
|
||||
@ -760,8 +760,7 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
stats->tx_packets += pkts;
|
||||
|
||||
tail = c_can_get_tx_tail(tx_ring);
|
||||
|
||||
if (tail == 0) {
|
||||
if (priv->type == BOSCH_D_CAN && tail == 0) {
|
||||
u8 head = c_can_get_tx_head(tx_ring);
|
||||
|
||||
/* Start transmission for all cached messages */
|
||||
|
@ -506,14 +506,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
|
||||
static int
|
||||
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
|
||||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mt7531_pll_setup(struct mt7530_priv *priv)
|
||||
{
|
||||
u32 top_sig;
|
||||
u32 hwstrap;
|
||||
u32 xtal;
|
||||
u32 val;
|
||||
|
||||
if (mt7531_dual_sgmii_supported(priv))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
val = mt7530_read(priv, MT7531_CREV);
|
||||
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
|
||||
@ -592,8 +597,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
|
||||
val |= EN_COREPLL;
|
||||
mt7530_write(priv, MT7531_PLLGP_EN, val);
|
||||
usleep_range(25, 35);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2326,11 +2329,17 @@ mt7531_setup(struct dsa_switch *ds)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* all MACs must be forced link-down before sw reset */
|
||||
for (i = 0; i < MT7530_NUM_PORTS; i++)
|
||||
mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
|
||||
|
||||
/* Reset the switch through internal reset */
|
||||
mt7530_write(priv, MT7530_SYS_CTRL,
|
||||
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
|
||||
SYS_CTRL_REG_RST);
|
||||
|
||||
mt7531_pll_setup(priv);
|
||||
|
||||
if (mt7531_dual_sgmii_supported(priv)) {
|
||||
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
|
||||
|
||||
@ -2887,8 +2896,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
|
||||
case 6:
|
||||
interface = PHY_INTERFACE_MODE_2500BASEX;
|
||||
|
||||
mt7531_pad_setup(ds, interface);
|
||||
|
||||
priv->p6_interface = interface;
|
||||
break;
|
||||
default:
|
||||
|
@ -5109,6 +5109,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
|
||||
if (!(bp->wol & MACB_WOL_ENABLED)) {
|
||||
rtnl_lock();
|
||||
phylink_stop(bp->phylink);
|
||||
phy_exit(bp->sgmii_phy);
|
||||
rtnl_unlock();
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
macb_reset_hw(bp);
|
||||
@ -5198,6 +5199,9 @@ static int __maybe_unused macb_resume(struct device *dev)
|
||||
macb_set_rx_mode(netdev);
|
||||
macb_restore_features(bp);
|
||||
rtnl_lock();
|
||||
if (!device_may_wakeup(&bp->dev->dev))
|
||||
phy_init(bp->sgmii_phy);
|
||||
|
||||
phylink_start(bp->phylink);
|
||||
rtnl_unlock();
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "cudbg_entity.h"
|
||||
#include "cudbg_lib.h"
|
||||
#include "cudbg_zlib.h"
|
||||
#include "cxgb4_tc_mqprio.h"
|
||||
|
||||
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
|
||||
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
|
||||
@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
|
||||
for (i = 0; i < utxq->ntxq; i++)
|
||||
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
|
||||
cudbg_uld_txq_to_qtype(j),
|
||||
out_unlock);
|
||||
out_unlock_uld);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
|
||||
for (i = 0; i < urxq->nrxq; i++)
|
||||
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
|
||||
cudbg_uld_rxq_to_qtype(j),
|
||||
out_unlock);
|
||||
out_unlock_uld);
|
||||
}
|
||||
|
||||
/* ULD FLQ */
|
||||
@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
|
||||
for (i = 0; i < urxq->nrxq; i++)
|
||||
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
|
||||
cudbg_uld_flq_to_qtype(j),
|
||||
out_unlock);
|
||||
out_unlock_uld);
|
||||
}
|
||||
|
||||
/* ULD CIQ */
|
||||
@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
|
||||
for (i = 0; i < urxq->nciq; i++)
|
||||
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
|
||||
cudbg_uld_ciq_to_qtype(j),
|
||||
out_unlock);
|
||||
out_unlock_uld);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&uld_mutex);
|
||||
|
||||
if (!padap->tc_mqprio)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&padap->tc_mqprio->mqprio_mutex);
|
||||
/* ETHOFLD TXQ */
|
||||
if (s->eohw_txq)
|
||||
for (i = 0; i < s->eoqsets; i++)
|
||||
QDESC_GET_TXQ(&s->eohw_txq[i].q,
|
||||
CUDBG_QTYPE_ETHOFLD_TXQ, out);
|
||||
CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
|
||||
|
||||
/* ETHOFLD RXQ and FLQ */
|
||||
if (s->eohw_rxq) {
|
||||
for (i = 0; i < s->eoqsets; i++)
|
||||
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
|
||||
CUDBG_QTYPE_ETHOFLD_RXQ, out);
|
||||
CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
|
||||
|
||||
for (i = 0; i < s->eoqsets; i++)
|
||||
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
|
||||
CUDBG_QTYPE_ETHOFLD_FLQ, out);
|
||||
CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&uld_mutex);
|
||||
out_unlock_mqprio:
|
||||
mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
|
||||
|
||||
out:
|
||||
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
|
||||
@ -3559,6 +3565,10 @@ out_free:
|
||||
#undef QDESC_GET
|
||||
|
||||
return rc;
|
||||
|
||||
out_unlock_uld:
|
||||
mutex_unlock(&uld_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
||||
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
|
||||
|
@ -1467,7 +1467,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
|
||||
bool wd;
|
||||
|
||||
if (tx_ring->xsk_pool)
|
||||
wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
|
||||
wd = ice_xmit_zc(tx_ring);
|
||||
else if (ice_ring_is_xdp(tx_ring))
|
||||
wd = true;
|
||||
else
|
||||
|
@ -392,13 +392,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
|
||||
!is_power_of_2(vsi->tx_rings[qid]->count)) {
|
||||
netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
|
||||
pool_failure = -EINVAL;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
|
||||
|
||||
if (if_running) {
|
||||
@ -534,11 +527,10 @@ exit:
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
{
|
||||
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
|
||||
u16 batched, leftover, i, tail_bumps;
|
||||
u16 leftover, i, tail_bumps;
|
||||
|
||||
batched = ALIGN_DOWN(count, rx_thresh);
|
||||
tail_bumps = batched / rx_thresh;
|
||||
leftover = count & (rx_thresh - 1);
|
||||
tail_bumps = count / rx_thresh;
|
||||
leftover = count - (tail_bumps * rx_thresh);
|
||||
|
||||
for (i = 0; i < tail_bumps; i++)
|
||||
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
|
||||
@ -788,69 +780,57 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
|
||||
* @xdp_ring: XDP ring to clean
|
||||
* @napi_budget: amount of descriptors that NAPI allows us to clean
|
||||
*
|
||||
* Returns count of cleaned descriptors
|
||||
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
|
||||
* @xdp_ring: XDP Tx ring
|
||||
*/
|
||||
static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
|
||||
static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
int budget = napi_budget / tx_thresh;
|
||||
u16 next_dd = xdp_ring->next_dd;
|
||||
u16 ntc, cleared_dds = 0;
|
||||
u16 ntc = xdp_ring->next_to_clean;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
u16 cnt = xdp_ring->count;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
u16 xsk_frames = 0;
|
||||
u16 last_rs;
|
||||
int i;
|
||||
|
||||
do {
|
||||
struct ice_tx_desc *next_dd_desc;
|
||||
u16 desc_cnt = xdp_ring->count;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
u32 xsk_frames;
|
||||
u16 i;
|
||||
last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
|
||||
if ((tx_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
|
||||
if (last_rs >= ntc)
|
||||
xsk_frames = last_rs - ntc + 1;
|
||||
else
|
||||
xsk_frames = last_rs + cnt - ntc + 1;
|
||||
}
|
||||
|
||||
next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
|
||||
if (!(next_dd_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
|
||||
break;
|
||||
if (!xsk_frames)
|
||||
return;
|
||||
|
||||
cleared_dds++;
|
||||
xsk_frames = 0;
|
||||
if (likely(!xdp_ring->xdp_tx_active)) {
|
||||
xsk_frames = tx_thresh;
|
||||
goto skip;
|
||||
if (likely(!xdp_ring->xdp_tx_active))
|
||||
goto skip;
|
||||
|
||||
ntc = xdp_ring->next_to_clean;
|
||||
for (i = 0; i < xsk_frames; i++) {
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
if (tx_buf->raw_buf) {
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
tx_buf->raw_buf = NULL;
|
||||
} else {
|
||||
xsk_frames++;
|
||||
}
|
||||
|
||||
ntc = xdp_ring->next_to_clean;
|
||||
|
||||
for (i = 0; i < tx_thresh; i++) {
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
if (tx_buf->raw_buf) {
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
tx_buf->raw_buf = NULL;
|
||||
} else {
|
||||
xsk_frames++;
|
||||
}
|
||||
|
||||
ntc++;
|
||||
if (ntc >= xdp_ring->count)
|
||||
ntc = 0;
|
||||
}
|
||||
ntc++;
|
||||
if (ntc >= xdp_ring->count)
|
||||
ntc = 0;
|
||||
}
|
||||
skip:
|
||||
xdp_ring->next_to_clean += tx_thresh;
|
||||
if (xdp_ring->next_to_clean >= desc_cnt)
|
||||
xdp_ring->next_to_clean -= desc_cnt;
|
||||
if (xsk_frames)
|
||||
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
|
||||
next_dd_desc->cmd_type_offset_bsz = 0;
|
||||
next_dd = next_dd + tx_thresh;
|
||||
if (next_dd >= desc_cnt)
|
||||
next_dd = tx_thresh - 1;
|
||||
} while (--budget);
|
||||
|
||||
xdp_ring->next_dd = next_dd;
|
||||
|
||||
return cleared_dds * tx_thresh;
|
||||
tx_desc->cmd_type_offset_bsz = 0;
|
||||
xdp_ring->next_to_clean += xsk_frames;
|
||||
if (xdp_ring->next_to_clean >= cnt)
|
||||
xdp_ring->next_to_clean -= cnt;
|
||||
if (xsk_frames)
|
||||
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -885,7 +865,6 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
|
||||
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
|
||||
unsigned int *total_bytes)
|
||||
{
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u16 ntu = xdp_ring->next_to_use;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
u32 i;
|
||||
@ -905,13 +884,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
|
||||
}
|
||||
|
||||
xdp_ring->next_to_use = ntu;
|
||||
|
||||
if (xdp_ring->next_to_use > xdp_ring->next_rs) {
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs += tx_thresh;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -924,7 +896,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
|
||||
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
|
||||
u32 nb_pkts, unsigned int *total_bytes)
|
||||
{
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u32 batched, leftover, i;
|
||||
|
||||
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
|
||||
@ -933,54 +904,54 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
|
||||
ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
|
||||
for (; i < batched + leftover; i++)
|
||||
ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
|
||||
}
|
||||
|
||||
if (xdp_ring->next_to_use > xdp_ring->next_rs) {
|
||||
struct ice_tx_desc *tx_desc;
|
||||
/**
|
||||
* ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
*/
|
||||
static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs += tx_thresh;
|
||||
}
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
* @budget: number of free descriptors on HW Tx ring that can be used
|
||||
* @napi_budget: amount of descriptors that NAPI allows us to clean
|
||||
*
|
||||
* Returns true if there is no more work that needs to be done, false otherwise
|
||||
*/
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
|
||||
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
|
||||
u32 nb_pkts, nb_processed = 0;
|
||||
unsigned int total_bytes = 0;
|
||||
int budget;
|
||||
|
||||
if (budget < tx_thresh)
|
||||
budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
|
||||
ice_clean_xdp_irq_zc(xdp_ring);
|
||||
|
||||
budget = ICE_DESC_UNUSED(xdp_ring);
|
||||
budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
|
||||
|
||||
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
|
||||
if (!nb_pkts)
|
||||
return true;
|
||||
|
||||
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
|
||||
struct ice_tx_desc *tx_desc;
|
||||
|
||||
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
|
||||
ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
|
||||
xdp_ring->next_rs = tx_thresh - 1;
|
||||
xdp_ring->next_to_use = 0;
|
||||
}
|
||||
|
||||
ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
|
||||
&total_bytes);
|
||||
|
||||
ice_set_rs_bit(xdp_ring);
|
||||
ice_xdp_ring_update_tail(xdp_ring);
|
||||
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
|
||||
|
||||
@ -1058,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
|
||||
*/
|
||||
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
u16 count_mask = rx_ring->count - 1;
|
||||
u16 ntc = rx_ring->next_to_clean;
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
|
||||
for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
|
||||
while (ntc != ntu) {
|
||||
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
|
||||
|
||||
xsk_buff_free(xdp);
|
||||
ntc++;
|
||||
if (ntc >= rx_ring->count)
|
||||
ntc = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,13 +26,10 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
|
||||
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
|
||||
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
|
||||
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
|
||||
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
|
||||
#else
|
||||
static inline bool
|
||||
ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
|
||||
u32 __always_unused budget,
|
||||
int __always_unused napi_budget)
|
||||
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
|
||||
|
||||
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
|
||||
{
|
||||
static struct dentry *mvpp2_root;
|
||||
struct dentry *mvpp2_dir;
|
||||
struct dentry *mvpp2_dir, *mvpp2_root;
|
||||
int ret, i;
|
||||
|
||||
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
|
||||
if (!mvpp2_root)
|
||||
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
|
||||
|
||||
|
@ -319,8 +319,8 @@
|
||||
#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
|
||||
#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
|
||||
|
||||
#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
|
||||
#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
|
||||
#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
|
||||
#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
|
||||
|
||||
/* PDMA V2 descriptor rxd3 */
|
||||
#define RX_DMA_VTAG_V2 BIT(0)
|
||||
|
@ -246,8 +246,8 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
|
||||
}
|
||||
|
||||
priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
|
||||
if (IS_ERR(priv->clk_io))
|
||||
return PTR_ERR(priv->clk_io);
|
||||
if (!priv->clk_io)
|
||||
return -ENOMEM;
|
||||
|
||||
mlxbf_gige_mdio_cfg(priv);
|
||||
|
||||
|
@ -290,6 +290,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
|
||||
if (!(vlan->portmask & BIT(port)))
|
||||
continue;
|
||||
|
||||
/* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
|
||||
* because this is never active in hardware at the same time as
|
||||
* the bridge VLANs, which only matter in VLAN-aware mode.
|
||||
*/
|
||||
if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
|
||||
continue;
|
||||
|
||||
if (vlan->untagged & BIT(port))
|
||||
num_untagged++;
|
||||
}
|
||||
|
@ -4213,7 +4213,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
|
||||
.ev_test_generate = efx_ef10_ev_test_generate,
|
||||
.filter_table_probe = efx_ef10_filter_table_probe,
|
||||
.filter_table_restore = efx_mcdi_filter_table_restore,
|
||||
.filter_table_remove = efx_mcdi_filter_table_remove,
|
||||
.filter_table_remove = efx_ef10_filter_table_remove,
|
||||
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
|
||||
.filter_insert = efx_mcdi_filter_insert,
|
||||
.filter_remove_safe = efx_mcdi_filter_remove_safe,
|
||||
|
@ -3801,6 +3801,15 @@ static int __stmmac_open(struct net_device *dev,
|
||||
|
||||
stmmac_reset_queues_param(priv);
|
||||
|
||||
if (priv->plat->serdes_powerup) {
|
||||
ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
|
||||
if (ret < 0) {
|
||||
netdev_err(priv->dev, "%s: Serdes powerup failed\n",
|
||||
__func__);
|
||||
goto init_error;
|
||||
}
|
||||
}
|
||||
|
||||
ret = stmmac_hw_setup(dev, true);
|
||||
if (ret < 0) {
|
||||
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
|
||||
@ -3904,6 +3913,10 @@ static int stmmac_release(struct net_device *dev)
|
||||
/* Disable the MAC Rx/Tx */
|
||||
stmmac_mac_set(priv, priv->ioaddr, false);
|
||||
|
||||
/* Powerdown Serdes if there is */
|
||||
if (priv->plat->serdes_powerdown)
|
||||
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
stmmac_release_ptp(priv);
|
||||
@ -7293,14 +7306,6 @@ int stmmac_dvr_probe(struct device *device,
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
if (priv->plat->serdes_powerup) {
|
||||
ret = priv->plat->serdes_powerup(ndev,
|
||||
priv->plat->bsp_priv);
|
||||
|
||||
if (ret < 0)
|
||||
goto error_serdes_powerup;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
stmmac_init_fs(ndev);
|
||||
#endif
|
||||
@ -7315,8 +7320,6 @@ int stmmac_dvr_probe(struct device *device,
|
||||
|
||||
return ret;
|
||||
|
||||
error_serdes_powerup:
|
||||
unregister_netdev(ndev);
|
||||
error_netdev_register:
|
||||
phylink_destroy(priv->phylink);
|
||||
error_xpcs_setup:
|
||||
|
@ -213,6 +213,7 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
pci_iounmap(pdev, rrpriv->regs);
|
||||
if (pdev)
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
out2:
|
||||
free_netdev(dev);
|
||||
out3:
|
||||
|
@ -316,11 +316,13 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
|
||||
|
||||
phydev->suspended_by_mdio_bus = 0;
|
||||
|
||||
/* If we manged to get here with the PHY state machine in a state neither
|
||||
* PHY_HALTED nor PHY_READY this is an indication that something went wrong
|
||||
* and we should most likely be using MAC managed PM and we are not.
|
||||
/* If we managed to get here with the PHY state machine in a state
|
||||
* neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication
|
||||
* that something went wrong and we should most likely be using
|
||||
* MAC managed PM, but we are not.
|
||||
*/
|
||||
WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
|
||||
WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY &&
|
||||
phydev->state != PHY_UP);
|
||||
|
||||
ret = phy_init_hw(phydev);
|
||||
if (ret < 0)
|
||||
|
@ -2828,7 +2828,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
rcu_assign_pointer(tfile->tun, tun);
|
||||
}
|
||||
|
||||
netif_carrier_on(tun->dev);
|
||||
if (ifr->ifr_flags & IFF_NO_CARRIER)
|
||||
netif_carrier_off(tun->dev);
|
||||
else
|
||||
netif_carrier_on(tun->dev);
|
||||
|
||||
/* Make sure persistent devices do not get stuck in
|
||||
* xoff state.
|
||||
@ -3056,8 +3059,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
||||
* This is needed because we never checked for invalid flags on
|
||||
* TUNSETIFF.
|
||||
*/
|
||||
return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
|
||||
(unsigned int __user*)argp);
|
||||
return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
|
||||
TUN_FEATURES, (unsigned int __user*)argp);
|
||||
} else if (cmd == TUNSETQUEUE) {
|
||||
return tun_set_queue(file, &ifr);
|
||||
} else if (cmd == SIOCGSKNS) {
|
||||
|
@ -1402,6 +1402,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
|
||||
|
@ -1598,6 +1598,7 @@ void usbnet_disconnect (struct usb_interface *intf)
|
||||
struct usbnet *dev;
|
||||
struct usb_device *xdev;
|
||||
struct net_device *net;
|
||||
struct urb *urb;
|
||||
|
||||
dev = usb_get_intfdata(intf);
|
||||
usb_set_intfdata(intf, NULL);
|
||||
@ -1614,7 +1615,11 @@ void usbnet_disconnect (struct usb_interface *intf)
|
||||
net = dev->net;
|
||||
unregister_netdev (net);
|
||||
|
||||
usb_scuttle_anchored_urbs(&dev->deferred);
|
||||
while ((urb = usb_get_from_anchor(&dev->deferred))) {
|
||||
dev_kfree_skb(urb->context);
|
||||
kfree(urb->sg);
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
||||
if (dev->driver_info->unbind)
|
||||
dev->driver_info->unbind(dev, intf);
|
||||
|
@ -67,6 +67,8 @@
|
||||
#define IFF_TAP 0x0002
|
||||
#define IFF_NAPI 0x0010
|
||||
#define IFF_NAPI_FRAGS 0x0020
|
||||
/* Used in TUNSETIFF to bring up tun/tap without carrier */
|
||||
#define IFF_NO_CARRIER 0x0040
|
||||
#define IFF_NO_PI 0x1000
|
||||
/* This flag has no real effect */
|
||||
#define IFF_ONE_QUEUE 0x2000
|
||||
|
@ -4040,7 +4040,6 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
|
||||
|
||||
if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
|
||||
(!elems->he_cap || !elems->he_operation)) {
|
||||
mutex_unlock(&sdata->local->sta_mtx);
|
||||
sdata_info(sdata,
|
||||
"HE AP is missing HE capability/operation\n");
|
||||
ret = false;
|
||||
@ -5589,12 +5588,16 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
|
||||
|
||||
mutex_lock(&local->sta_mtx);
|
||||
sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
|
||||
if (WARN_ON(!sta))
|
||||
if (WARN_ON(!sta)) {
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
goto free;
|
||||
}
|
||||
link_sta = rcu_dereference_protected(sta->link[link->link_id],
|
||||
lockdep_is_held(&local->sta_mtx));
|
||||
if (WARN_ON(!link_sta))
|
||||
if (WARN_ON(!link_sta)) {
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
goto free;
|
||||
}
|
||||
|
||||
changed |= ieee80211_recalc_twt_req(link, link_sta, elems);
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/ieee80211.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <net/mac80211.h>
|
||||
#include "rate.h"
|
||||
#include "sta_info.h"
|
||||
@ -1550,6 +1551,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
|
||||
{
|
||||
struct ieee80211_sta_rates *rates;
|
||||
int i = 0;
|
||||
int max_rates = min_t(int, mp->hw->max_rates, IEEE80211_TX_RATE_TABLE_SIZE);
|
||||
|
||||
rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
|
||||
if (!rates)
|
||||
@ -1559,10 +1561,10 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
|
||||
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
|
||||
|
||||
/* Fill up remaining, keep one entry for max_probe_rate */
|
||||
for (; i < (mp->hw->max_rates - 1); i++)
|
||||
for (; i < (max_rates - 1); i++)
|
||||
minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]);
|
||||
|
||||
if (i < mp->hw->max_rates)
|
||||
if (i < max_rates)
|
||||
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
|
||||
|
||||
if (i < IEEE80211_TX_RATE_TABLE_SIZE)
|
||||
|
@ -729,7 +729,7 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
|
||||
|
||||
if (!sdata) {
|
||||
skb->dev = NULL;
|
||||
} else {
|
||||
} else if (!dropped) {
|
||||
unsigned int hdr_size =
|
||||
ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
|
@ -5878,6 +5878,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
|
||||
skb_reset_network_header(skb);
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
if (local->hw.queues < IEEE80211_NUM_ACS)
|
||||
goto start_xmit;
|
||||
|
||||
/* update QoS header to prioritize control port frames if possible,
|
||||
* priorization also happens for control port frames send over
|
||||
* AF_PACKET
|
||||
@ -5905,6 +5908,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
start_xmit:
|
||||
/* mutex lock is only needed for incrementing the cookie counter */
|
||||
mutex_lock(&local->mtx);
|
||||
|
||||
|
@ -301,14 +301,14 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
|
||||
local_bh_disable();
|
||||
spin_lock(&fq->lock);
|
||||
|
||||
sdata->vif.txqs_stopped[ac] = false;
|
||||
|
||||
if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
|
||||
goto out;
|
||||
|
||||
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
||||
ps = &sdata->bss->ps;
|
||||
|
||||
sdata->vif.txqs_stopped[ac] = false;
|
||||
|
||||
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
||||
if (sdata != sta->sdata)
|
||||
continue;
|
||||
|
@ -2662,7 +2662,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
|
||||
dfrag_clear(sk, dfrag);
|
||||
}
|
||||
|
||||
static void mptcp_cancel_work(struct sock *sk)
|
||||
void mptcp_cancel_work(struct sock *sk)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
|
||||
@ -2802,13 +2802,12 @@ static void __mptcp_destroy_sock(struct sock *sk)
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
static void mptcp_close(struct sock *sk, long timeout)
|
||||
bool __mptcp_close(struct sock *sk, long timeout)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow;
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
bool do_cancel_work = false;
|
||||
|
||||
lock_sock(sk);
|
||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||
|
||||
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
|
||||
@ -2850,6 +2849,17 @@ cleanup:
|
||||
} else {
|
||||
mptcp_reset_timeout(msk, 0);
|
||||
}
|
||||
|
||||
return do_cancel_work;
|
||||
}
|
||||
|
||||
static void mptcp_close(struct sock *sk, long timeout)
|
||||
{
|
||||
bool do_cancel_work;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
do_cancel_work = __mptcp_close(sk, timeout);
|
||||
release_sock(sk);
|
||||
if (do_cancel_work)
|
||||
mptcp_cancel_work(sk);
|
||||
|
@ -612,6 +612,8 @@ void mptcp_subflow_reset(struct sock *ssk);
|
||||
void mptcp_subflow_queue_clean(struct sock *ssk);
|
||||
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
|
||||
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
|
||||
bool __mptcp_close(struct sock *sk, long timeout);
|
||||
void mptcp_cancel_work(struct sock *sk);
|
||||
|
||||
bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
|
||||
const struct mptcp_addr_info *b, bool use_port);
|
||||
|
@ -602,30 +602,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
|
||||
return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
|
||||
}
|
||||
|
||||
static void mptcp_sock_destruct(struct sock *sk)
|
||||
{
|
||||
/* if new mptcp socket isn't accepted, it is free'd
|
||||
* from the tcp listener sockets request queue, linked
|
||||
* from req->sk. The tcp socket is released.
|
||||
* This calls the ULP release function which will
|
||||
* also remove the mptcp socket, via
|
||||
* sock_put(ctx->conn).
|
||||
*
|
||||
* Problem is that the mptcp socket will be in
|
||||
* ESTABLISHED state and will not have the SOCK_DEAD flag.
|
||||
* Both result in warnings from inet_sock_destruct.
|
||||
*/
|
||||
if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
|
||||
sk->sk_state = TCP_CLOSE;
|
||||
WARN_ON_ONCE(sk->sk_socket);
|
||||
sock_orphan(sk);
|
||||
}
|
||||
|
||||
/* We don't need to clear msk->subflow, as it's still NULL at this point */
|
||||
mptcp_destroy_common(mptcp_sk(sk), 0);
|
||||
inet_sock_destruct(sk);
|
||||
}
|
||||
|
||||
static void mptcp_force_close(struct sock *sk)
|
||||
{
|
||||
/* the msk is not yet exposed to user-space */
|
||||
@ -768,7 +744,6 @@ create_child:
|
||||
/* new mpc subflow takes ownership of the newly
|
||||
* created mptcp socket
|
||||
*/
|
||||
new_msk->sk_destruct = mptcp_sock_destruct;
|
||||
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
|
||||
mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
|
||||
mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
|
||||
@ -1763,13 +1738,19 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
|
||||
|
||||
for (msk = head; msk; msk = next) {
|
||||
struct sock *sk = (struct sock *)msk;
|
||||
bool slow;
|
||||
bool slow, do_cancel_work;
|
||||
|
||||
sock_hold(sk);
|
||||
slow = lock_sock_fast_nested(sk);
|
||||
next = msk->dl_next;
|
||||
msk->first = NULL;
|
||||
msk->dl_next = NULL;
|
||||
|
||||
do_cancel_work = __mptcp_close(sk, 0);
|
||||
unlock_sock_fast(sk, slow);
|
||||
if (do_cancel_work)
|
||||
mptcp_cancel_work(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
/* we are still under the listener msk socket lock */
|
||||
|
@ -1394,7 +1394,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
err = tcf_ct_flow_table_get(net, params);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
goto cleanup_params;
|
||||
|
||||
spin_lock_bh(&c->tcf_lock);
|
||||
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
|
||||
@ -1409,6 +1409,9 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
return res;
|
||||
|
||||
cleanup_params:
|
||||
if (params->tmpl)
|
||||
nf_ct_put(params->tmpl);
|
||||
cleanup:
|
||||
if (goto_ch)
|
||||
tcf_chain_put_by_act(goto_ch);
|
||||
|
@ -1361,7 +1361,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
|
||||
25599, /* 4.166666... */
|
||||
17067, /* 2.777777... */
|
||||
12801, /* 2.083333... */
|
||||
11769, /* 1.851851... */
|
||||
11377, /* 1.851725... */
|
||||
10239, /* 1.666666... */
|
||||
8532, /* 1.388888... */
|
||||
7680, /* 1.250000... */
|
||||
@ -1444,7 +1444,7 @@ static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate)
|
||||
25599, /* 4.166666... */
|
||||
17067, /* 2.777777... */
|
||||
12801, /* 2.083333... */
|
||||
11769, /* 1.851851... */
|
||||
11377, /* 1.851725... */
|
||||
10239, /* 1.666666... */
|
||||
8532, /* 1.388888... */
|
||||
7680, /* 1.250000... */
|
||||
|
@ -328,7 +328,7 @@ static void test_extra_filter(const struct test_params p)
|
||||
if (bind(fd1, addr, sockaddr_size()))
|
||||
error(1, errno, "failed to bind recv socket 1");
|
||||
|
||||
if (!bind(fd2, addr, sockaddr_size()) && errno != EADDRINUSE)
|
||||
if (!bind(fd2, addr, sockaddr_size()) || errno != EADDRINUSE)
|
||||
error(1, errno, "bind socket 2 should fail with EADDRINUSE");
|
||||
|
||||
free(addr);
|
||||
|
Loading…
Reference in New Issue
Block a user