mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
Including fixes from wireless, bluetooth, bpf and netfilter.
Current release - regressions: - Revert "net: team: use IFF_NO_ADDRCONF flag to prevent ipv6 addrconf", fix nsna_ping mode of team - wifi: mt76: fix bugs in Rx queue handling and DMA mapping - eth: mlx5: - add missing mutex_unlock in error reporter - protect global IPsec ASO with a lock Current release - new code bugs: - rxrpc: fix wrong error return in rxrpc_connect_call() Previous releases - regressions: - bluetooth: hci_sync: fix use of HCI_OP_LE_READ_BUFFER_SIZE_V2 - wifi: - mac80211: fix crashes on Rx due to incorrect initialization of rx->link and rx->link_sta - mac80211: fix bugs in iTXQ conversion - Tx stalls, incorrect aggregation handling, crashes - brcmfmac: fix regression for Broadcom PCIe wifi devices - rndis_wlan: prevent buffer overflow in rndis_query_oid - netfilter: conntrack: handle tcp challenge acks during connection reuse - sched: avoid grafting on htb_destroy_class_offload when destroying - virtio-net: correctly enable callback during start_xmit, fix stalls - tcp: avoid the lookup process failing to get sk in ehash table - ipa: disable ipa interrupt during suspend - eth: stmmac: enable all safety features by default Previous releases - always broken: - bpf: - fix pointer-leak due to insufficient speculative store bypass mitigation (Spectre v4) - skip task with pid=1 in send_signal_common() to avoid a splat - fix BPF program ID information in BPF_AUDIT_UNLOAD as well as PERF_BPF_EVENT_PROG_UNLOAD events - fix potential deadlock in htab_lock_bucket from same bucket index but different map_locked index - bluetooth: - fix a buffer overflow in mgmt_mesh_add() - hci_qca: fix driver shutdown on closed serdev - ISO: fix possible circular locking dependency - CIS: hci_event: fix invalid wait context - wifi: brcmfmac: fixes for survey dump handling - mptcp: explicitly specify sock family at subflow creation time - netfilter: nft_payload: incorrect arithmetics when fetching VLAN header bits - tcp: fix rate_app_limited to default to 1 - l2tp: close all race conditions in l2tp_tunnel_register() - eth: mlx5: fixes for QoS config and eswitch configuration - eth: enetc: avoid deadlock in enetc_tx_onestep_tstamp() - eth: stmmac: fix invalid call to mdiobus_get_phy() Misc: - ethtool: add netlink attr in rss get reply only if the value is not empty Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmPKxFAACgkQMUZtbf5S Irtdlg/+OUggv3sKhcpUv39SnmMyiIhnNj9KhG+25Iiy7MJxaoNntCGsW3KXXTGo JylOGMesociz+hCv4xHl9J61uwmz+qrUPKqqi7hSbEoHlAa3OrubQb+LW4/x0Jp0 bNJqqAYt04C+txhvsuF9odfZbKtvQ7RIU0XEzqEoES4UXTYQoCHEAwfn5twKDNNS /x9OLSZnctAv1pinKZ8QTLjz0IZwHaBAbWNXkLe/HEu9nGrUndFtA5rJjyzrjw10 ZltTDfV2lr3SWVHsJShnTJ64u+aPBGmJmVzeNw64qRrmnYdFMCpUVoH222IurexO aVPY9WUOwgUovetB8fmhPF0+n5Aa6pbTb4toQB1oVZ8X0h7WNrdfXZug1QDQOMbC eGzsNdk6hvOeqBhbIKPLQXzaIxbPyXM+KUUbOxi+V4dahG79vG2BaQsrpFymueVs cna7pL8dE1S9dR3SEB0KW4nyoWIugukZrzuX0efv1hxovuWn4yNJBt2lp8gQwY6v yTk93Ou2LYDrm4yXLrHHWYNXU1u68Pq0o14xbx7tOYGan/evqfaaa1lmAvj7b1bq g19FB4IrwA/1ZBoaOIMV8Ml7u5ww9LAFzJRAClEptOopADN4Gro2jgUYWjmxm+uV RdlpQ2mI8iEeEH0FOITmdlFy7cbh7TWIkoiXHcCWifgfUE7sxnY= =F3be -----END PGP SIGNATURE----- Merge tag 'net-6.2-rc5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from wireless, bluetooth, bpf and netfilter. Current release - regressions: - Revert "net: team: use IFF_NO_ADDRCONF flag to prevent ipv6 addrconf", fix nsna_ping mode of team - wifi: mt76: fix bugs in Rx queue handling and DMA mapping - eth: mlx5: - add missing mutex_unlock in error reporter - protect global IPsec ASO with a lock Current release - new code bugs: - rxrpc: fix wrong error return in rxrpc_connect_call() Previous releases - regressions: - bluetooth: hci_sync: fix use of HCI_OP_LE_READ_BUFFER_SIZE_V2 - wifi: - mac80211: fix crashes on Rx due to incorrect initialization of rx->link and rx->link_sta - mac80211: fix bugs in iTXQ conversion - Tx stalls, incorrect aggregation handling, crashes - brcmfmac: fix regression for Broadcom PCIe wifi devices - rndis_wlan: prevent buffer overflow in rndis_query_oid - netfilter: conntrack: handle tcp challenge acks during connection reuse - sched: avoid grafting on htb_destroy_class_offload when destroying - virtio-net: correctly enable callback during start_xmit, fix stalls - tcp: avoid the lookup process failing to get sk in ehash table - ipa: disable ipa interrupt during suspend - eth: stmmac: enable all safety features by default Previous releases - always broken: - bpf: - fix pointer-leak due to insufficient speculative store bypass mitigation (Spectre v4) - skip task with pid=1 in send_signal_common() to avoid a splat - fix BPF program ID information in BPF_AUDIT_UNLOAD as well as PERF_BPF_EVENT_PROG_UNLOAD events - fix potential deadlock in htab_lock_bucket from same bucket index but different map_locked index - bluetooth: - fix a buffer overflow in mgmt_mesh_add() - hci_qca: fix driver shutdown on closed serdev - ISO: fix possible circular locking dependency - CIS: hci_event: fix invalid wait context - wifi: brcmfmac: fixes for survey dump handling - mptcp: explicitly specify sock family at subflow creation time - netfilter: nft_payload: incorrect arithmetics when fetching VLAN header bits - tcp: fix rate_app_limited to default to 1 - l2tp: close all race conditions in l2tp_tunnel_register() - eth: mlx5: fixes for QoS config and eswitch configuration - eth: enetc: avoid deadlock in enetc_tx_onestep_tstamp() - eth: stmmac: fix invalid call to mdiobus_get_phy() Misc: - ethtool: add netlink attr in rss get reply only if the value is not empty" * tag 'net-6.2-rc5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (88 commits) Revert "Merge branch 'octeontx2-af-CPT'" tcp: fix rate_app_limited to default to 1 bnxt: Do not read past the end of test names net: stmmac: enable all safety features by default octeontx2-af: add mbox to return CPT_AF_FLT_INT info octeontx2-af: update cpt lf alloc mailbox octeontx2-af: restore rxc conf after teardown sequence octeontx2-af: optimize cpt pf identification octeontx2-af: modify FLR sequence for CPT octeontx2-af: add mbox for CPT LF reset octeontx2-af: recover CPT engine when it gets fault net: dsa: microchip: ksz9477: port map correction in ALU table entry register selftests/net: toeplitz: fix race on tpacket_v3 block close net/ulp: use consistent error code when blocking ULP octeontx2-pf: Fix the use of GFP_KERNEL in atomic context on rt tcp: avoid the lookup process failing to get sk in ehash table Revert "net: team: use IFF_NO_ADDRCONF flag to prevent ipv6 addrconf" MAINTAINERS: add networking entries for Willem net: sched: gred: prevent races when adding offloads to stats l2tp: prevent lockdep issue in l2tp_tunnel_register() ...
This commit is contained in:
commit
5deaa98587
21
MAINTAINERS
21
MAINTAINERS
@ -1104,7 +1104,6 @@ S: Supported
|
||||
F: arch/arm64/boot/dts/amd/
|
||||
|
||||
AMD XGBE DRIVER
|
||||
M: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
M: "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -15750,6 +15749,12 @@ S: Maintained
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
|
||||
F: drivers/net/wireless/intersil/p54/
|
||||
|
||||
PACKET SOCKETS
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
S: Maintained
|
||||
F: include/uapi/linux/if_packet.h
|
||||
F: net/packet/af_packet.c
|
||||
|
||||
PACKING
|
||||
M: Vladimir Oltean <olteanv@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -19326,6 +19331,13 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Orphan
|
||||
F: sound/soc/uniphier/
|
||||
|
||||
SOCKET TIMESTAMPING
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
S: Maintained
|
||||
F: Documentation/networking/timestamping.rst
|
||||
F: include/uapi/linux/net_tstamp.h
|
||||
F: tools/testing/selftests/net/so_txtime.c
|
||||
|
||||
SOEKRIS NET48XX LED SUPPORT
|
||||
M: Chris Boot <bootc@bootc.net>
|
||||
S: Maintained
|
||||
@ -21746,6 +21758,13 @@ T: git git://linuxtv.org/media_tree.git
|
||||
F: Documentation/admin-guide/media/zr364xx*
|
||||
F: drivers/staging/media/deprecated/zr364xx/
|
||||
|
||||
USER DATAGRAM PROTOCOL (UDP)
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
S: Maintained
|
||||
F: include/linux/udp.h
|
||||
F: net/ipv4/udp.c
|
||||
F: net/ipv6/udp.c
|
||||
|
||||
USER-MODE LINUX (UML)
|
||||
M: Richard Weinberger <richard@nod.at>
|
||||
M: Anton Ivanov <anton.ivanov@cambridgegreys.com>
|
||||
|
@ -2164,10 +2164,17 @@ static void qca_serdev_shutdown(struct device *dev)
|
||||
int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
|
||||
struct serdev_device *serdev = to_serdev_device(dev);
|
||||
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
|
||||
struct hci_uart *hu = &qcadev->serdev_hu;
|
||||
struct hci_dev *hdev = hu->hdev;
|
||||
struct qca_data *qca = hu->priv;
|
||||
const u8 ibs_wake_cmd[] = { 0xFD };
|
||||
const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
|
||||
|
||||
if (qcadev->btsoc_type == QCA_QCA6390) {
|
||||
if (test_bit(QCA_BT_OFF, &qca->flags) ||
|
||||
!test_bit(HCI_RUNNING, &hdev->flags))
|
||||
return;
|
||||
|
||||
serdev_device_write_flush(serdev);
|
||||
ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
|
||||
sizeof(ibs_wake_cmd));
|
||||
|
@ -540,10 +540,10 @@ int ksz9477_fdb_del(struct ksz_device *dev, int port,
|
||||
ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
|
||||
|
||||
/* clear forwarding port */
|
||||
alu_table[2] &= ~BIT(port);
|
||||
alu_table[1] &= ~BIT(port);
|
||||
|
||||
/* if there is no port to forward, clear table */
|
||||
if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
|
||||
if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
|
||||
alu_table[0] = 0;
|
||||
alu_table[1] = 0;
|
||||
alu_table[2] = 0;
|
||||
|
@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
|
||||
netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
|
||||
}
|
||||
|
||||
static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
|
||||
|
||||
/* From MAC ver 30H the TFCR is per priority, instead of per queue */
|
||||
if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
|
||||
return max_q_count;
|
||||
else
|
||||
return min_t(unsigned int, pdata->tx_q_count, max_q_count);
|
||||
}
|
||||
|
||||
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
unsigned int max_q_count, q_count;
|
||||
unsigned int reg, reg_val;
|
||||
unsigned int i;
|
||||
unsigned int i, q_count;
|
||||
|
||||
/* Clear MTL flow control */
|
||||
for (i = 0; i < pdata->rx_q_count; i++)
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
|
||||
|
||||
/* Clear MAC flow control */
|
||||
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
|
||||
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
|
||||
q_count = xgbe_get_fc_queue_count(pdata);
|
||||
reg = MAC_Q0TFCR;
|
||||
for (i = 0; i < q_count; i++) {
|
||||
reg_val = XGMAC_IOREAD(pdata, reg);
|
||||
@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct ieee_pfc *pfc = pdata->pfc;
|
||||
struct ieee_ets *ets = pdata->ets;
|
||||
unsigned int max_q_count, q_count;
|
||||
unsigned int reg, reg_val;
|
||||
unsigned int i;
|
||||
unsigned int i, q_count;
|
||||
|
||||
/* Set MTL flow control */
|
||||
for (i = 0; i < pdata->rx_q_count; i++) {
|
||||
@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
|
||||
}
|
||||
|
||||
/* Set MAC flow control */
|
||||
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
|
||||
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
|
||||
q_count = xgbe_get_fc_queue_count(pdata);
|
||||
reg = MAC_Q0TFCR;
|
||||
for (i = 0; i < q_count; i++) {
|
||||
reg_val = XGMAC_IOREAD(pdata, reg);
|
||||
|
@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
|
||||
reg |= XGBE_KR_TRAINING_ENABLE;
|
||||
reg |= XGBE_KR_TRAINING_START;
|
||||
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
|
||||
pdata->kr_start_time = jiffies;
|
||||
|
||||
netif_dbg(pdata, link, pdata->netdev,
|
||||
"KR training initiated\n");
|
||||
@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
|
||||
|
||||
xgbe_switch_mode(pdata);
|
||||
|
||||
pdata->an_result = XGBE_AN_READY;
|
||||
|
||||
xgbe_an_restart(pdata);
|
||||
|
||||
return XGBE_AN_INCOMPAT_LINK;
|
||||
@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
|
||||
static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
unsigned long link_timeout;
|
||||
unsigned long kr_time;
|
||||
int wait;
|
||||
|
||||
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
|
||||
if (time_after(jiffies, link_timeout)) {
|
||||
if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
|
||||
pdata->phy.autoneg == AUTONEG_ENABLE) {
|
||||
/* AN restart should not happen while KR training is in progress.
|
||||
* The while loop ensures no AN restart during KR training,
|
||||
* waits up to 500ms and AN restart is triggered only if KR
|
||||
* training is failed.
|
||||
*/
|
||||
wait = XGBE_KR_TRAINING_WAIT_ITER;
|
||||
while (wait--) {
|
||||
kr_time = pdata->kr_start_time +
|
||||
msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
|
||||
if (time_after(jiffies, kr_time))
|
||||
break;
|
||||
/* AN restart is not required, if AN result is COMPLETE */
|
||||
if (pdata->an_result == XGBE_AN_COMPLETE)
|
||||
return;
|
||||
usleep_range(10000, 11000);
|
||||
}
|
||||
}
|
||||
netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
|
||||
xgbe_phy_config_aneg(pdata);
|
||||
}
|
||||
|
@ -290,6 +290,7 @@
|
||||
/* Auto-negotiation */
|
||||
#define XGBE_AN_MS_TIMEOUT 500
|
||||
#define XGBE_LINK_TIMEOUT 5
|
||||
#define XGBE_KR_TRAINING_WAIT_ITER 50
|
||||
|
||||
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
|
||||
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
|
||||
@ -1280,6 +1281,7 @@ struct xgbe_prv_data {
|
||||
unsigned int parallel_detect;
|
||||
unsigned int fec_ability;
|
||||
unsigned long an_start;
|
||||
unsigned long kr_start_time;
|
||||
enum xgbe_an_mode an_mode;
|
||||
|
||||
/* I2C support */
|
||||
|
@ -3969,7 +3969,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
|
||||
test_info->timeout = HWRM_CMD_TIMEOUT;
|
||||
for (i = 0; i < bp->num_tests; i++) {
|
||||
char *str = test_info->string[i];
|
||||
char *fw_str = resp->test0_name + i * 32;
|
||||
char *fw_str = resp->test_name[i];
|
||||
|
||||
if (i == BNXT_MACLPBK_TEST_IDX) {
|
||||
strcpy(str, "Mac loopback test (offline)");
|
||||
@ -3980,14 +3980,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
|
||||
} else if (i == BNXT_IRQ_TEST_IDX) {
|
||||
strcpy(str, "Interrupt_test (offline)");
|
||||
} else {
|
||||
strscpy(str, fw_str, ETH_GSTRING_LEN);
|
||||
strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
|
||||
if (test_info->offline_mask & (1 << i))
|
||||
strncat(str, " (offline)",
|
||||
ETH_GSTRING_LEN - strlen(str));
|
||||
else
|
||||
strncat(str, " (online)",
|
||||
ETH_GSTRING_LEN - strlen(str));
|
||||
snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
|
||||
fw_str, test_info->offline_mask & (1 << i) ?
|
||||
"offline" : "online");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10249,14 +10249,7 @@ struct hwrm_selftest_qlist_output {
|
||||
u8 unused_0;
|
||||
__le16 test_timeout;
|
||||
u8 unused_1[2];
|
||||
char test0_name[32];
|
||||
char test1_name[32];
|
||||
char test2_name[32];
|
||||
char test3_name[32];
|
||||
char test4_name[32];
|
||||
char test5_name[32];
|
||||
char test6_name[32];
|
||||
char test7_name[32];
|
||||
char test_name[8][32];
|
||||
u8 eyescope_target_BER_support;
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
|
||||
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
|
||||
|
@ -2187,7 +2187,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
|
||||
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
|
||||
skb_is_nonlinear(*skb);
|
||||
int padlen = ETH_ZLEN - (*skb)->len;
|
||||
int headroom = skb_headroom(*skb);
|
||||
int tailroom = skb_tailroom(*skb);
|
||||
struct sk_buff *nskb;
|
||||
u32 fcs;
|
||||
@ -2201,9 +2200,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
|
||||
/* FCS could be appeded to tailroom. */
|
||||
if (tailroom >= ETH_FCS_LEN)
|
||||
goto add_fcs;
|
||||
/* FCS could be appeded by moving data to headroom. */
|
||||
else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
|
||||
padlen = 0;
|
||||
/* No room for FCS, need to reallocate skb. */
|
||||
else
|
||||
padlen = ETH_FCS_LEN;
|
||||
@ -2212,10 +2208,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
|
||||
padlen += ETH_FCS_LEN;
|
||||
}
|
||||
|
||||
if (!cloned && headroom + tailroom >= padlen) {
|
||||
(*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
|
||||
skb_set_tail_pointer(*skb, (*skb)->len);
|
||||
} else {
|
||||
if (cloned || tailroom < padlen) {
|
||||
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
return -ENOMEM;
|
||||
|
@ -2290,14 +2290,14 @@ static void enetc_tx_onestep_tstamp(struct work_struct *work)
|
||||
|
||||
priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
|
||||
|
||||
netif_tx_lock(priv->ndev);
|
||||
netif_tx_lock_bh(priv->ndev);
|
||||
|
||||
clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
|
||||
skb = skb_dequeue(&priv->tx_skbs);
|
||||
if (skb)
|
||||
enetc_start_xmit(skb, priv->ndev);
|
||||
|
||||
netif_tx_unlock(priv->ndev);
|
||||
netif_tx_unlock_bh(priv->ndev);
|
||||
}
|
||||
|
||||
static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
|
||||
|
@ -1012,7 +1012,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
|
||||
rbpool = cq->rbpool;
|
||||
free_ptrs = cq->pool_ptrs;
|
||||
|
||||
get_cpu();
|
||||
while (cq->pool_ptrs) {
|
||||
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
|
||||
/* Schedule a WQ if we fails to free atleast half of the
|
||||
@ -1032,7 +1031,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
|
||||
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
|
||||
cq->pool_ptrs--;
|
||||
}
|
||||
put_cpu();
|
||||
cq->refill_task_sched = false;
|
||||
}
|
||||
|
||||
@ -1370,7 +1368,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
get_cpu();
|
||||
/* Allocate pointers and free them to aura/pool */
|
||||
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
|
||||
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
|
||||
@ -1394,7 +1391,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
|
||||
}
|
||||
|
||||
err_mem:
|
||||
put_cpu();
|
||||
return err ? -ENOMEM : 0;
|
||||
|
||||
fail:
|
||||
@ -1435,21 +1431,18 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
get_cpu();
|
||||
/* Allocate pointers and free them to aura/pool */
|
||||
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
|
||||
pool = &pfvf->qset.pool[pool_id];
|
||||
for (ptr = 0; ptr < num_ptrs; ptr++) {
|
||||
err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
|
||||
if (err)
|
||||
goto err_mem;
|
||||
return -ENOMEM;
|
||||
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
|
||||
bufptr + OTX2_HEAD_ROOM);
|
||||
}
|
||||
}
|
||||
err_mem:
|
||||
put_cpu();
|
||||
return err ? -ENOMEM : 0;
|
||||
return 0;
|
||||
fail:
|
||||
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
|
||||
otx2_aura_pool_free(pfvf);
|
||||
|
@ -736,8 +736,10 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
|
||||
u64 ptrs[2];
|
||||
|
||||
ptrs[1] = buf;
|
||||
get_cpu();
|
||||
/* Free only one buffer at time during init and teardown */
|
||||
__cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
/* Alloc pointer from pool/aura */
|
||||
|
@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
|
||||
if (child->bw_share == old_bw_share)
|
||||
continue;
|
||||
|
||||
err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
|
||||
err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
|
||||
child->max_average_bw, child->hw_id);
|
||||
if (!err && err_one) {
|
||||
err = err_one;
|
||||
@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
|
||||
mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
|
||||
mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
|
||||
|
||||
err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
|
||||
err = mlx5_qos_update_node(htb->mdev, bw_share,
|
||||
max_average_bw, node->hw_id);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
|
||||
|
@ -578,7 +578,6 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
|
||||
{
|
||||
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
|
||||
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
|
||||
bool unaligned = xsk ? xsk->unaligned : false;
|
||||
u16 max_mtu_pkts;
|
||||
|
||||
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
|
||||
@ -591,7 +590,7 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
|
||||
* needed number of WQEs exceeds the maximum.
|
||||
*/
|
||||
max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
|
||||
mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
|
||||
mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
|
||||
if (params->log_rq_mtu_frames > max_mtu_pkts) {
|
||||
mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
|
||||
1 << params->log_rq_mtu_frames, xsk->chunk_size);
|
||||
|
@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
|
||||
struct mlx5e_sample_flow *sample_flow;
|
||||
struct mlx5e_sample_attr *sample_attr;
|
||||
struct mlx5_flow_attr *pre_attr;
|
||||
u32 tunnel_id = attr->tunnel_id;
|
||||
struct mlx5_eswitch *esw;
|
||||
u32 default_tbl_id;
|
||||
u32 obj_id;
|
||||
@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
|
||||
restore_obj.sample.group_id = sample_attr->group_num;
|
||||
restore_obj.sample.rate = sample_attr->rate;
|
||||
restore_obj.sample.trunc_size = sample_attr->trunc_size;
|
||||
restore_obj.sample.tunnel_id = tunnel_id;
|
||||
restore_obj.sample.tunnel_id = attr->tunnel_id;
|
||||
err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
|
||||
if (err)
|
||||
goto err_obj_id;
|
||||
@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
|
||||
/* For decap action, do decap in the original flow table instead of the
|
||||
* default flow table.
|
||||
*/
|
||||
if (tunnel_id)
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
|
||||
pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
|
||||
pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
|
||||
pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
|
||||
|
@ -122,11 +122,8 @@ struct mlx5e_ipsec_aso {
|
||||
u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
|
||||
dma_addr_t dma_addr;
|
||||
struct mlx5_aso *aso;
|
||||
/* IPsec ASO caches data on every query call,
|
||||
* so in nested calls, we can use this boolean to save
|
||||
* recursive calls to mlx5e_ipsec_aso_query()
|
||||
*/
|
||||
u8 use_cache : 1;
|
||||
/* Protect ASO WQ access, as it is global to whole IPsec */
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec {
|
||||
|
@ -320,7 +320,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
aso->use_cache = true;
|
||||
if (attrs->esn_trigger &&
|
||||
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
|
||||
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
|
||||
@ -333,7 +332,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
|
||||
!MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
|
||||
!MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
|
||||
xfrm_state_check_expire(sa_entry->x);
|
||||
aso->use_cache = false;
|
||||
|
||||
unlock:
|
||||
spin_unlock(&sa_entry->x->lock);
|
||||
@ -398,6 +396,7 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
|
||||
goto err_aso_create;
|
||||
}
|
||||
|
||||
spin_lock_init(&aso->lock);
|
||||
ipsec->nb.notifier_call = mlx5e_ipsec_event;
|
||||
mlx5_notifier_register(mdev, &ipsec->nb);
|
||||
|
||||
@ -456,13 +455,12 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
struct mlx5e_hw_objs *res;
|
||||
struct mlx5_aso_wqe *wqe;
|
||||
u8 ds_cnt;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&sa_entry->x->lock);
|
||||
if (aso->use_cache)
|
||||
return 0;
|
||||
|
||||
res = &mdev->mlx5e_res.hw_objs;
|
||||
|
||||
spin_lock_bh(&aso->lock);
|
||||
memset(aso->ctx, 0, sizeof(aso->ctx));
|
||||
wqe = mlx5_aso_get_wqe(aso->aso);
|
||||
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
|
||||
@ -477,7 +475,9 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
mlx5e_ipsec_aso_copy(ctrl, data);
|
||||
|
||||
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
|
||||
return mlx5_aso_poll_cq(aso->aso, false);
|
||||
ret = mlx5_aso_poll_cq(aso->aso, false);
|
||||
spin_unlock_bh(&aso->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
|
@ -166,6 +166,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
|
||||
* it's different than the ht->mutex here.
|
||||
*/
|
||||
static struct lock_class_key tc_ht_lock_key;
|
||||
static struct lock_class_key tc_ht_wq_key;
|
||||
|
||||
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
|
||||
static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
|
||||
@ -5182,6 +5183,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||
return err;
|
||||
|
||||
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
|
||||
lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
|
||||
|
||||
mapping_id = mlx5_query_nic_system_image_guid(dev);
|
||||
|
||||
@ -5288,6 +5290,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
|
||||
return err;
|
||||
|
||||
lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
|
||||
lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -22,15 +22,13 @@ struct mlx5_esw_rate_group {
|
||||
};
|
||||
|
||||
static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
|
||||
u32 parent_ix, u32 tsar_ix,
|
||||
u32 max_rate, u32 bw_share)
|
||||
u32 tsar_ix, u32 max_rate, u32 bw_share)
|
||||
{
|
||||
u32 bitmask = 0;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
|
||||
MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
|
||||
MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
|
||||
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
|
||||
@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g
|
||||
int err;
|
||||
|
||||
err = esw_qos_tsar_config(dev, sched_ctx,
|
||||
esw->qos.root_tsar_ix, group->tsar_ix,
|
||||
group->tsar_ix,
|
||||
max_rate, bw_share);
|
||||
if (err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
|
||||
@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
|
||||
struct mlx5_esw_rate_group *group = vport->qos.group;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
u32 parent_tsar_ix;
|
||||
void *vport_elem;
|
||||
int err;
|
||||
|
||||
if (!vport->qos.enabled)
|
||||
return -EIO;
|
||||
|
||||
parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
|
||||
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
|
||||
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
|
||||
element_attributes);
|
||||
MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
|
||||
|
||||
err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
|
||||
err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
|
||||
max_rate, bw_share);
|
||||
if (err) {
|
||||
esw_warn(esw->dev,
|
||||
|
@ -1464,6 +1464,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
|
||||
mlx5_lag_disable_change(esw->dev);
|
||||
down_write(&esw->mode_lock);
|
||||
mlx5_eswitch_disable_locked(esw);
|
||||
esw->mode = MLX5_ESWITCH_LEGACY;
|
||||
up_write(&esw->mode_lock);
|
||||
mlx5_lag_enable_change(esw->dev);
|
||||
}
|
||||
|
@ -677,6 +677,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
|
||||
mlx5_core_err(dev, "health works are not permitted at this stage\n");
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
|
@ -2098,7 +2098,7 @@ static void mlx5_core_verify_params(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int __init init(void)
|
||||
static int __init mlx5_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -2133,7 +2133,7 @@ err_debug:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit cleanup(void)
|
||||
static void __exit mlx5_cleanup(void)
|
||||
{
|
||||
mlx5e_cleanup();
|
||||
mlx5_sf_driver_unregister();
|
||||
@ -2141,5 +2141,5 @@ static void __exit cleanup(void)
|
||||
mlx5_unregister_debugfs();
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
module_exit(cleanup);
|
||||
module_init(mlx5_init);
|
||||
module_exit(mlx5_cleanup);
|
||||
|
@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
|
||||
return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
|
||||
}
|
||||
|
||||
int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
|
||||
int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
|
||||
u32 bw_share, u32 max_avg_bw, u32 id)
|
||||
{
|
||||
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
||||
u32 bitmask = 0;
|
||||
|
||||
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
|
||||
MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
|
||||
MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
|
||||
|
||||
|
@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
|
||||
int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
|
||||
u32 bw_share, u32 max_avg_bw, u32 *id);
|
||||
int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
|
||||
int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
|
||||
int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
|
||||
u32 max_avg_bw, u32 id);
|
||||
int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
|
||||
|
||||
|
@ -1043,11 +1043,6 @@ static int lan966x_probe(struct platform_device *pdev)
|
||||
lan966x->base_mac[5] &= 0xf0;
|
||||
}
|
||||
|
||||
ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
|
||||
if (!ports)
|
||||
return dev_err_probe(&pdev->dev, -ENODEV,
|
||||
"no ethernet-ports child found\n");
|
||||
|
||||
err = lan966x_create_targets(pdev, lan966x);
|
||||
if (err)
|
||||
return dev_err_probe(&pdev->dev, err,
|
||||
@ -1125,6 +1120,11 @@ static int lan966x_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
|
||||
if (!ports)
|
||||
return dev_err_probe(&pdev->dev, -ENODEV,
|
||||
"no ethernet-ports child found\n");
|
||||
|
||||
/* init switch */
|
||||
lan966x_init(lan966x);
|
||||
lan966x_stats_init(lan966x);
|
||||
@ -1162,6 +1162,8 @@ static int lan966x_probe(struct platform_device *pdev)
|
||||
goto cleanup_ports;
|
||||
}
|
||||
|
||||
fwnode_handle_put(ports);
|
||||
|
||||
lan966x_mdb_init(lan966x);
|
||||
err = lan966x_fdb_init(lan966x);
|
||||
if (err)
|
||||
@ -1191,6 +1193,7 @@ cleanup_fdb:
|
||||
lan966x_fdb_deinit(lan966x);
|
||||
|
||||
cleanup_ports:
|
||||
fwnode_handle_put(ports);
|
||||
fwnode_handle_put(portnp);
|
||||
|
||||
lan966x_cleanup_ports(lan966x);
|
||||
|
@ -186,11 +186,25 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
|
||||
int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
|
||||
struct stmmac_safety_feature_cfg *safety_feat_cfg)
|
||||
{
|
||||
struct stmmac_safety_feature_cfg all_safety_feats = {
|
||||
.tsoee = 1,
|
||||
.mrxpee = 1,
|
||||
.mestee = 1,
|
||||
.mrxee = 1,
|
||||
.mtxee = 1,
|
||||
.epsi = 1,
|
||||
.edpp = 1,
|
||||
.prtyen = 1,
|
||||
.tmouten = 1,
|
||||
};
|
||||
u32 value;
|
||||
|
||||
if (!asp)
|
||||
return -EINVAL;
|
||||
|
||||
if (!safety_feat_cfg)
|
||||
safety_feat_cfg = &all_safety_feats;
|
||||
|
||||
/* 1. Enable Safety Features */
|
||||
value = readl(ioaddr + MTL_ECC_CONTROL);
|
||||
value |= MEEAO; /* MTL ECC Error Addr Status Override */
|
||||
|
@ -551,16 +551,16 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
|
||||
p = (char *)priv + offsetof(struct stmmac_priv,
|
||||
xstats.txq_stats[q].tx_pkt_n);
|
||||
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
|
||||
*data++ = (*(u64 *)p);
|
||||
p += sizeof(u64 *);
|
||||
*data++ = (*(unsigned long *)p);
|
||||
p += sizeof(unsigned long);
|
||||
}
|
||||
}
|
||||
for (q = 0; q < rx_cnt; q++) {
|
||||
p = (char *)priv + offsetof(struct stmmac_priv,
|
||||
xstats.rxq_stats[q].rx_pkt_n);
|
||||
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
|
||||
*data++ = (*(u64 *)p);
|
||||
p += sizeof(u64 *);
|
||||
*data++ = (*(unsigned long *)p);
|
||||
p += sizeof(unsigned long);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1150,6 +1150,11 @@ static int stmmac_init_phy(struct net_device *dev)
|
||||
int addr = priv->plat->phy_addr;
|
||||
struct phy_device *phydev;
|
||||
|
||||
if (addr < 0) {
|
||||
netdev_err(priv->dev, "no phy found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
phydev = mdiobus_get_phy(priv->mii, addr);
|
||||
if (!phydev) {
|
||||
netdev_err(priv->dev, "no phy at addr %d\n", addr);
|
||||
|
@ -127,6 +127,16 @@ out_power_put:
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void ipa_interrupt_irq_disable(struct ipa *ipa)
|
||||
{
|
||||
disable_irq(ipa->interrupt->irq);
|
||||
}
|
||||
|
||||
void ipa_interrupt_irq_enable(struct ipa *ipa)
|
||||
{
|
||||
enable_irq(ipa->interrupt->irq);
|
||||
}
|
||||
|
||||
/* Common function used to enable/disable TX_SUSPEND for an endpoint */
|
||||
static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
|
||||
u32 endpoint_id, bool enable)
|
||||
|
@ -85,6 +85,22 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
|
||||
*/
|
||||
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
|
||||
|
||||
/**
|
||||
* ipa_interrupt_irq_enable() - Enable IPA interrupts
|
||||
* @ipa: IPA pointer
|
||||
*
|
||||
* This enables the IPA interrupt line
|
||||
*/
|
||||
void ipa_interrupt_irq_enable(struct ipa *ipa);
|
||||
|
||||
/**
|
||||
* ipa_interrupt_irq_disable() - Disable IPA interrupts
|
||||
* @ipa: IPA pointer
|
||||
*
|
||||
* This disables the IPA interrupt line
|
||||
*/
|
||||
void ipa_interrupt_irq_disable(struct ipa *ipa);
|
||||
|
||||
/**
|
||||
* ipa_interrupt_config() - Configure the IPA interrupt framework
|
||||
* @ipa: IPA pointer
|
||||
|
@ -181,6 +181,17 @@ static int ipa_suspend(struct device *dev)
|
||||
|
||||
__set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
|
||||
|
||||
/* Increment the disable depth to ensure that the IRQ won't
|
||||
* be re-enabled until the matching _enable call in
|
||||
* ipa_resume(). We do this to ensure that the interrupt
|
||||
* handler won't run whilst PM runtime is disabled.
|
||||
*
|
||||
* Note that disabling the IRQ is NOT the same as disabling
|
||||
* irq wake. If wakeup is enabled for the IPA then the IRQ
|
||||
* will still cause the system to wake up, see irq_set_irq_wake().
|
||||
*/
|
||||
ipa_interrupt_irq_disable(ipa);
|
||||
|
||||
return pm_runtime_force_suspend(dev);
|
||||
}
|
||||
|
||||
@ -193,6 +204,12 @@ static int ipa_resume(struct device *dev)
|
||||
|
||||
__clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
|
||||
|
||||
/* Now that PM runtime is enabled again it's safe
|
||||
* to turn the IRQ back on and process any data
|
||||
* that was received during suspend.
|
||||
*/
|
||||
ipa_interrupt_irq_enable(ipa);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
|
||||
|
||||
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
|
||||
{
|
||||
struct mdio_device *mdiodev = bus->mdio_map[addr];
|
||||
struct mdio_device *mdiodev;
|
||||
|
||||
if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
|
||||
return NULL;
|
||||
|
||||
mdiodev = bus->mdio_map[addr];
|
||||
|
||||
if (!mdiodev)
|
||||
return NULL;
|
||||
|
@ -1044,7 +1044,6 @@ static int team_port_enter(struct team *team, struct team_port *port)
|
||||
goto err_port_enter;
|
||||
}
|
||||
}
|
||||
port->dev->priv_flags |= IFF_NO_ADDRCONF;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1058,7 +1057,6 @@ static void team_port_leave(struct team *team, struct team_port *port)
|
||||
{
|
||||
if (team->ops.port_leave)
|
||||
team->ops.port_leave(team, port);
|
||||
port->dev->priv_flags &= ~IFF_NO_ADDRCONF;
|
||||
dev_put(team->dev);
|
||||
}
|
||||
|
||||
|
@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
/* ignore the CRC length */
|
||||
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
|
||||
|
||||
if (len > ETH_FRAME_LEN || len > skb->len)
|
||||
if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
|
||||
return 0;
|
||||
|
||||
/* the last packet of current skb */
|
||||
|
@ -1877,8 +1877,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
*/
|
||||
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
|
||||
netif_stop_subqueue(dev, qnum);
|
||||
if (!use_napi &&
|
||||
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
||||
if (use_napi) {
|
||||
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
|
||||
virtqueue_napi_schedule(&sq->napi, sq->vq);
|
||||
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
||||
/* More just got used, free them then recheck. */
|
||||
free_old_xmit_skbs(sq, false);
|
||||
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
||||
|
@ -1243,9 +1243,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
|
||||
free_dev:
|
||||
free_netdev(dev);
|
||||
undo_uhdlc_init:
|
||||
iounmap(utdm->siram);
|
||||
if (utdm)
|
||||
iounmap(utdm->siram);
|
||||
unmap_si_regs:
|
||||
iounmap(utdm->si_regs);
|
||||
if (utdm)
|
||||
iounmap(utdm->si_regs);
|
||||
free_utdm:
|
||||
if (uhdlc_priv->tsa)
|
||||
kfree(utdm);
|
||||
|
@ -7937,6 +7937,9 @@ cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
|
||||
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
|
||||
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
|
||||
|
||||
if (chan->flags & IEEE80211_CHAN_DISABLED)
|
||||
return -EINVAL;
|
||||
|
||||
/* set_channel */
|
||||
chspec = channel_to_chanspec(&cfg->d11inf, chan);
|
||||
if (chspec != INVCHANSPEC) {
|
||||
@ -7961,7 +7964,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
|
||||
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
|
||||
struct brcmf_dump_survey survey = {};
|
||||
struct ieee80211_supported_band *band;
|
||||
struct ieee80211_channel *chan;
|
||||
enum nl80211_band band_id;
|
||||
struct cca_msrmnt_query req;
|
||||
u32 noise;
|
||||
int err;
|
||||
@ -7974,26 +7977,25 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
band = wiphy->bands[NL80211_BAND_2GHZ];
|
||||
if (band && idx >= band->n_channels) {
|
||||
idx -= band->n_channels;
|
||||
band = NULL;
|
||||
}
|
||||
for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) {
|
||||
band = wiphy->bands[band_id];
|
||||
if (!band)
|
||||
continue;
|
||||
if (idx >= band->n_channels) {
|
||||
idx -= band->n_channels;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!band || idx >= band->n_channels) {
|
||||
band = wiphy->bands[NL80211_BAND_5GHZ];
|
||||
if (idx >= band->n_channels)
|
||||
return -ENOENT;
|
||||
info->channel = &band->channels[idx];
|
||||
break;
|
||||
}
|
||||
if (band_id == NUM_NL80211_BANDS)
|
||||
return -ENOENT;
|
||||
|
||||
/* Setting current channel to the requested channel */
|
||||
chan = &band->channels[idx];
|
||||
err = cfg80211_set_channel(wiphy, ndev, chan, NL80211_CHAN_HT20);
|
||||
if (err) {
|
||||
info->channel = chan;
|
||||
info->filled = 0;
|
||||
info->filled = 0;
|
||||
if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Disable mpc */
|
||||
brcmf_set_mpc(ifp, 0);
|
||||
@ -8028,7 +8030,6 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
|
||||
if (err)
|
||||
goto exit;
|
||||
|
||||
info->channel = chan;
|
||||
info->noise = noise;
|
||||
info->time = ACS_MSRMNT_DELAY;
|
||||
info->time_busy = ACS_MSRMNT_DELAY - survey.idle;
|
||||
@ -8040,7 +8041,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
|
||||
SURVEY_INFO_TIME_TX;
|
||||
|
||||
brcmf_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n",
|
||||
ieee80211_frequency_to_channel(chan->center_freq),
|
||||
ieee80211_frequency_to_channel(info->channel->center_freq),
|
||||
ACS_MSRMNT_DELAY);
|
||||
brcmf_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n",
|
||||
info->noise, info->time_busy, info->time_rx, info->time_tx);
|
||||
|
@ -1228,7 +1228,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
|
||||
BRCMF_NROF_H2D_COMMON_MSGRINGS;
|
||||
max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
|
||||
}
|
||||
if (max_flowrings > 256) {
|
||||
if (max_flowrings > 512) {
|
||||
brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -205,6 +205,52 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
mt76_dma_sync_idx(dev, q);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_queue_buf *buf, void *data)
|
||||
{
|
||||
struct mt76_desc *desc = &q->desc[q->head];
|
||||
struct mt76_queue_entry *entry = &q->entry[q->head];
|
||||
struct mt76_txwi_cache *txwi = NULL;
|
||||
u32 buf1 = 0, ctrl;
|
||||
int idx = q->head;
|
||||
int rx_token;
|
||||
|
||||
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
|
||||
|
||||
if ((q->flags & MT_QFLAG_WED) &&
|
||||
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
|
||||
txwi = mt76_get_rxwi(dev);
|
||||
if (!txwi)
|
||||
return -ENOMEM;
|
||||
|
||||
rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
|
||||
if (rx_token < 0) {
|
||||
mt76_put_rxwi(dev, txwi);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
|
||||
ctrl |= MT_DMA_CTL_TO_HOST;
|
||||
}
|
||||
|
||||
WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
|
||||
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
|
||||
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
|
||||
WRITE_ONCE(desc->info, 0);
|
||||
|
||||
entry->dma_addr[0] = buf->addr;
|
||||
entry->dma_len[0] = buf->len;
|
||||
entry->txwi = txwi;
|
||||
entry->buf = data;
|
||||
entry->wcid = 0xffff;
|
||||
entry->skip_buf1 = true;
|
||||
q->head = (q->head + 1) % q->ndesc;
|
||||
q->queued++;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static int
|
||||
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_queue_buf *buf, int nbufs, u32 info,
|
||||
@ -212,65 +258,51 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
{
|
||||
struct mt76_queue_entry *entry;
|
||||
struct mt76_desc *desc;
|
||||
u32 ctrl;
|
||||
int i, idx = -1;
|
||||
u32 ctrl, next;
|
||||
|
||||
if (txwi) {
|
||||
q->entry[q->head].txwi = DMA_DUMMY_DATA;
|
||||
q->entry[q->head].skip_buf0 = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbufs; i += 2, buf += 2) {
|
||||
u32 buf0 = buf[0].addr, buf1 = 0;
|
||||
|
||||
idx = q->head;
|
||||
q->head = (q->head + 1) % q->ndesc;
|
||||
next = (q->head + 1) % q->ndesc;
|
||||
|
||||
desc = &q->desc[idx];
|
||||
entry = &q->entry[idx];
|
||||
|
||||
if ((q->flags & MT_QFLAG_WED) &&
|
||||
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
|
||||
struct mt76_txwi_cache *t = txwi;
|
||||
int rx_token;
|
||||
if (buf[0].skip_unmap)
|
||||
entry->skip_buf0 = true;
|
||||
entry->skip_buf1 = i == nbufs - 1;
|
||||
|
||||
if (!t)
|
||||
return -ENOMEM;
|
||||
entry->dma_addr[0] = buf[0].addr;
|
||||
entry->dma_len[0] = buf[0].len;
|
||||
|
||||
rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
|
||||
buf[0].addr);
|
||||
buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
|
||||
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
|
||||
MT_DMA_CTL_TO_HOST;
|
||||
} else {
|
||||
if (txwi) {
|
||||
q->entry[q->head].txwi = DMA_DUMMY_DATA;
|
||||
q->entry[q->head].skip_buf0 = true;
|
||||
}
|
||||
|
||||
if (buf[0].skip_unmap)
|
||||
entry->skip_buf0 = true;
|
||||
entry->skip_buf1 = i == nbufs - 1;
|
||||
|
||||
entry->dma_addr[0] = buf[0].addr;
|
||||
entry->dma_len[0] = buf[0].len;
|
||||
|
||||
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
|
||||
if (i < nbufs - 1) {
|
||||
entry->dma_addr[1] = buf[1].addr;
|
||||
entry->dma_len[1] = buf[1].len;
|
||||
buf1 = buf[1].addr;
|
||||
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
|
||||
if (buf[1].skip_unmap)
|
||||
entry->skip_buf1 = true;
|
||||
}
|
||||
|
||||
if (i == nbufs - 1)
|
||||
ctrl |= MT_DMA_CTL_LAST_SEC0;
|
||||
else if (i == nbufs - 2)
|
||||
ctrl |= MT_DMA_CTL_LAST_SEC1;
|
||||
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
|
||||
if (i < nbufs - 1) {
|
||||
entry->dma_addr[1] = buf[1].addr;
|
||||
entry->dma_len[1] = buf[1].len;
|
||||
buf1 = buf[1].addr;
|
||||
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
|
||||
if (buf[1].skip_unmap)
|
||||
entry->skip_buf1 = true;
|
||||
}
|
||||
|
||||
if (i == nbufs - 1)
|
||||
ctrl |= MT_DMA_CTL_LAST_SEC0;
|
||||
else if (i == nbufs - 2)
|
||||
ctrl |= MT_DMA_CTL_LAST_SEC1;
|
||||
|
||||
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
|
||||
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
|
||||
WRITE_ONCE(desc->info, cpu_to_le32(info));
|
||||
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
|
||||
|
||||
q->head = next;
|
||||
q->queued++;
|
||||
}
|
||||
|
||||
@ -577,17 +609,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
spin_lock_bh(&q->lock);
|
||||
|
||||
while (q->queued < q->ndesc - 1) {
|
||||
struct mt76_txwi_cache *t = NULL;
|
||||
struct mt76_queue_buf qbuf;
|
||||
void *buf = NULL;
|
||||
|
||||
if ((q->flags & MT_QFLAG_WED) &&
|
||||
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
|
||||
t = mt76_get_rxwi(dev);
|
||||
if (!t)
|
||||
break;
|
||||
}
|
||||
|
||||
buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
break;
|
||||
@ -601,7 +625,12 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
qbuf.addr = addr + offset;
|
||||
qbuf.len = len - offset;
|
||||
qbuf.skip_unmap = false;
|
||||
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
|
||||
if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
|
||||
dma_unmap_single(dev->dma_dev, addr, len,
|
||||
DMA_FROM_DEVICE);
|
||||
skb_free_frag(buf);
|
||||
break;
|
||||
}
|
||||
frames++;
|
||||
}
|
||||
|
||||
|
@ -653,6 +653,13 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
|
||||
|
||||
desc->buf0 = cpu_to_le32(phy_addr);
|
||||
token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
|
||||
if (token < 0) {
|
||||
dma_unmap_single(dev->mt76.dma_dev, phy_addr,
|
||||
wed->wlan.rx_size, DMA_TO_DEVICE);
|
||||
skb_free_frag(ptr);
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
|
||||
token));
|
||||
desc++;
|
||||
|
@ -764,11 +764,12 @@ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
|
||||
spin_lock_bh(&dev->rx_token_lock);
|
||||
token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
|
||||
GFP_ATOMIC);
|
||||
if (token >= 0) {
|
||||
t->ptr = ptr;
|
||||
t->dma_addr = phys;
|
||||
}
|
||||
spin_unlock_bh(&dev->rx_token_lock);
|
||||
|
||||
t->ptr = ptr;
|
||||
t->dma_addr = phys;
|
||||
|
||||
return token;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
|
||||
|
@ -696,8 +696,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
|
||||
struct rndis_query *get;
|
||||
struct rndis_query_c *get_c;
|
||||
} u;
|
||||
int ret, buflen;
|
||||
int resplen, respoffs, copylen;
|
||||
int ret;
|
||||
size_t buflen, resplen, respoffs, copylen;
|
||||
|
||||
buflen = *len + sizeof(*u.get);
|
||||
if (buflen < CONTROL_BUFFER_SIZE)
|
||||
@ -732,22 +732,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
|
||||
|
||||
if (respoffs > buflen) {
|
||||
/* Device returned data offset outside buffer, error. */
|
||||
netdev_dbg(dev->net, "%s(%s): received invalid "
|
||||
"data offset: %d > %d\n", __func__,
|
||||
oid_to_string(oid), respoffs, buflen);
|
||||
netdev_dbg(dev->net,
|
||||
"%s(%s): received invalid data offset: %zu > %zu\n",
|
||||
__func__, oid_to_string(oid), respoffs, buflen);
|
||||
|
||||
ret = -EINVAL;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
if ((resplen + respoffs) > buflen) {
|
||||
/* Device would have returned more data if buffer would
|
||||
* have been big enough. Copy just the bits that we got.
|
||||
*/
|
||||
copylen = buflen - respoffs;
|
||||
} else {
|
||||
copylen = resplen;
|
||||
}
|
||||
copylen = min(resplen, buflen - respoffs);
|
||||
|
||||
if (copylen > *len)
|
||||
copylen = *len;
|
||||
|
@ -1832,7 +1832,7 @@ void bpf_prog_inc(struct bpf_prog *prog);
|
||||
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
|
||||
void bpf_prog_put(struct bpf_prog *prog);
|
||||
|
||||
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
|
||||
void bpf_prog_free_id(struct bpf_prog *prog);
|
||||
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
|
||||
|
||||
struct btf_field *btf_record_find(const struct btf_record *rec,
|
||||
|
@ -1832,8 +1832,6 @@ struct ieee80211_vif_cfg {
|
||||
* @drv_priv: data area for driver use, will always be aligned to
|
||||
* sizeof(void \*).
|
||||
* @txq: the multicast data TX queue
|
||||
* @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
|
||||
* protected by fq->lock.
|
||||
* @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
|
||||
* &enum ieee80211_offload_flags.
|
||||
* @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
|
||||
@ -1863,8 +1861,6 @@ struct ieee80211_vif {
|
||||
bool probe_req_reg;
|
||||
bool rx_mcast_action_reg;
|
||||
|
||||
bool txqs_stopped[IEEE80211_NUM_ACS];
|
||||
|
||||
struct ieee80211_vif *mbssid_tx_vif;
|
||||
|
||||
/* must be last */
|
||||
|
@ -1288,4 +1288,11 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
|
||||
|
||||
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
|
||||
|
||||
/* Make sure qdisc is no longer in SCHED state. */
|
||||
static inline void qdisc_synchronize(const struct Qdisc *q)
|
||||
{
|
||||
while (test_bit(__QDISC_STATE_SCHED, &q->state))
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -152,7 +152,7 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
hash = hash & HASHTAB_MAP_LOCK_MASK;
|
||||
hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
|
||||
|
||||
preempt_disable();
|
||||
if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
|
||||
@ -171,7 +171,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
|
||||
struct bucket *b, u32 hash,
|
||||
unsigned long flags)
|
||||
{
|
||||
hash = hash & HASHTAB_MAP_LOCK_MASK;
|
||||
hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
|
||||
raw_spin_unlock_irqrestore(&b->raw_lock, flags);
|
||||
__this_cpu_dec(*(htab->map_locked[hash]));
|
||||
preempt_enable();
|
||||
|
@ -216,9 +216,6 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
|
||||
if (offload->dev_state)
|
||||
offload->offdev->ops->destroy(prog);
|
||||
|
||||
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
|
||||
bpf_prog_free_id(prog, true);
|
||||
|
||||
list_del_init(&offload->offloads);
|
||||
kfree(offload);
|
||||
prog->aux->offload = NULL;
|
||||
|
@ -1972,7 +1972,7 @@ static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
|
||||
return;
|
||||
if (audit_enabled == AUDIT_OFF)
|
||||
return;
|
||||
if (op == BPF_AUDIT_LOAD)
|
||||
if (!in_irq() && !irqs_disabled())
|
||||
ctx = audit_context();
|
||||
ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
|
||||
if (unlikely(!ab))
|
||||
@ -2001,7 +2001,7 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
|
||||
return id > 0 ? 0 : id;
|
||||
}
|
||||
|
||||
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
|
||||
void bpf_prog_free_id(struct bpf_prog *prog)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -2013,18 +2013,10 @@ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
|
||||
if (!prog->aux->id)
|
||||
return;
|
||||
|
||||
if (do_idr_lock)
|
||||
spin_lock_irqsave(&prog_idr_lock, flags);
|
||||
else
|
||||
__acquire(&prog_idr_lock);
|
||||
|
||||
spin_lock_irqsave(&prog_idr_lock, flags);
|
||||
idr_remove(&prog_idr, prog->aux->id);
|
||||
prog->aux->id = 0;
|
||||
|
||||
if (do_idr_lock)
|
||||
spin_unlock_irqrestore(&prog_idr_lock, flags);
|
||||
else
|
||||
__release(&prog_idr_lock);
|
||||
spin_unlock_irqrestore(&prog_idr_lock, flags);
|
||||
}
|
||||
|
||||
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||
@ -2067,17 +2059,15 @@ static void bpf_prog_put_deferred(struct work_struct *work)
|
||||
prog = aux->prog;
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
||||
bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
|
||||
bpf_prog_free_id(prog);
|
||||
__bpf_prog_put_noref(prog, true);
|
||||
}
|
||||
|
||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||
static void __bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog_aux *aux = prog->aux;
|
||||
|
||||
if (atomic64_dec_and_test(&aux->refcnt)) {
|
||||
/* bpf_prog_free_id() must be called first */
|
||||
bpf_prog_free_id(prog, do_idr_lock);
|
||||
|
||||
if (in_irq() || irqs_disabled()) {
|
||||
INIT_WORK(&aux->work, bpf_prog_put_deferred);
|
||||
schedule_work(&aux->work);
|
||||
@ -2089,7 +2079,7 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||
|
||||
void bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
__bpf_prog_put(prog, true);
|
||||
__bpf_prog_put(prog);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_put);
|
||||
|
||||
|
@ -2748,6 +2748,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
*/
|
||||
if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
|
||||
return -ENOTSUPP;
|
||||
/* kfunc with imm==0 is invalid and fixup_kfunc_call will
|
||||
* catch this error later. Make backtracking conservative
|
||||
* with ENOTSUPP.
|
||||
*/
|
||||
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
|
||||
return -ENOTSUPP;
|
||||
/* regular helper call sets R0 */
|
||||
*reg_mask &= ~1;
|
||||
if (*reg_mask & 0x3f) {
|
||||
@ -3289,7 +3295,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
bool sanitize = reg && is_spillable_regtype(reg->type);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (state->stack[spi].slot_type[i] == STACK_INVALID) {
|
||||
u8 type = state->stack[spi].slot_type[i];
|
||||
|
||||
if (type != STACK_MISC && type != STACK_ZERO) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
|
@ -848,6 +848,9 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
|
||||
return -EPERM;
|
||||
if (unlikely(!nmi_uaccess_okay()))
|
||||
return -EPERM;
|
||||
/* Task should not be pid=1 to avoid kernel panic. */
|
||||
if (unlikely(is_global_init(current)))
|
||||
return -EPERM;
|
||||
|
||||
if (irqs_disabled()) {
|
||||
/* Do an early check on signal validity. Otherwise,
|
||||
|
@ -821,6 +821,7 @@ static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
|
||||
static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
|
||||
{
|
||||
struct iso_list_data *d;
|
||||
int ret;
|
||||
|
||||
bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
|
||||
|
||||
@ -831,8 +832,12 @@ static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
|
||||
d->big = big;
|
||||
d->bis = bis;
|
||||
|
||||
return hci_cmd_sync_queue(hdev, terminate_big_sync, d,
|
||||
terminate_big_destroy);
|
||||
ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
|
||||
terminate_big_destroy);
|
||||
if (ret)
|
||||
kfree(d);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int big_terminate_sync(struct hci_dev *hdev, void *data)
|
||||
@ -857,6 +862,7 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
|
||||
static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
|
||||
{
|
||||
struct iso_list_data *d;
|
||||
int ret;
|
||||
|
||||
bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
|
||||
|
||||
@ -867,8 +873,12 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
|
||||
d->big = big;
|
||||
d->sync_handle = sync_handle;
|
||||
|
||||
return hci_cmd_sync_queue(hdev, big_terminate_sync, d,
|
||||
terminate_big_destroy);
|
||||
ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
|
||||
terminate_big_destroy);
|
||||
if (ret)
|
||||
kfree(d);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Cleanup BIS connection
|
||||
|
@ -3848,8 +3848,11 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
|
||||
conn->handle, conn->link);
|
||||
|
||||
/* Create CIS if LE is already connected */
|
||||
if (conn->link && conn->link->state == BT_CONNECTED)
|
||||
if (conn->link && conn->link->state == BT_CONNECTED) {
|
||||
rcu_read_unlock();
|
||||
hci_le_create_cis(conn->link);
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
if (i == rp->num_handles)
|
||||
break;
|
||||
|
@ -3572,7 +3572,7 @@ static const struct hci_init_stage hci_init2[] = {
|
||||
static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
|
||||
{
|
||||
/* Use Read LE Buffer Size V2 if supported */
|
||||
if (hdev->commands[41] & 0x20)
|
||||
if (iso_capable(hdev) && hdev->commands[41] & 0x20)
|
||||
return __hci_cmd_sync_status(hdev,
|
||||
HCI_OP_LE_READ_BUFFER_SIZE_V2,
|
||||
0, NULL, HCI_CMD_TIMEOUT);
|
||||
@ -3597,10 +3597,10 @@ static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
|
||||
|
||||
/* LE Controller init stage 2 command sequence */
|
||||
static const struct hci_init_stage le_init2[] = {
|
||||
/* HCI_OP_LE_READ_BUFFER_SIZE */
|
||||
HCI_INIT(hci_le_read_buffer_size_sync),
|
||||
/* HCI_OP_LE_READ_LOCAL_FEATURES */
|
||||
HCI_INIT(hci_le_read_local_features_sync),
|
||||
/* HCI_OP_LE_READ_BUFFER_SIZE */
|
||||
HCI_INIT(hci_le_read_buffer_size_sync),
|
||||
/* HCI_OP_LE_READ_SUPPORTED_STATES */
|
||||
HCI_INIT(hci_le_read_supported_states_sync),
|
||||
{}
|
||||
@ -6187,20 +6187,13 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
|
||||
|
||||
static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
|
||||
{
|
||||
u8 instance = *(u8 *)data;
|
||||
|
||||
kfree(data);
|
||||
u8 instance = PTR_ERR(data);
|
||||
|
||||
return hci_update_adv_data_sync(hdev, instance);
|
||||
}
|
||||
|
||||
int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
|
||||
{
|
||||
u8 *inst_ptr = kmalloc(1, GFP_KERNEL);
|
||||
|
||||
if (!inst_ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
*inst_ptr = instance;
|
||||
return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL);
|
||||
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
|
||||
ERR_PTR(instance), NULL);
|
||||
}
|
||||
|
@ -289,15 +289,15 @@ static int iso_connect_bis(struct sock *sk)
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
|
||||
err = iso_chan_add(conn, sk, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* Update source addr of the socket */
|
||||
bacpy(&iso_pi(sk)->src, &hcon->src);
|
||||
|
||||
err = iso_chan_add(conn, sk, NULL);
|
||||
if (err)
|
||||
goto release;
|
||||
|
||||
if (hcon->state == BT_CONNECTED) {
|
||||
iso_sock_clear_timer(sk);
|
||||
sk->sk_state = BT_CONNECTED;
|
||||
@ -306,7 +306,6 @@ static int iso_connect_bis(struct sock *sk)
|
||||
iso_sock_set_timer(sk, sk->sk_sndtimeo);
|
||||
}
|
||||
|
||||
release:
|
||||
release_sock(sk);
|
||||
return err;
|
||||
|
||||
@ -372,15 +371,15 @@ static int iso_connect_cis(struct sock *sk)
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
|
||||
err = iso_chan_add(conn, sk, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* Update source addr of the socket */
|
||||
bacpy(&iso_pi(sk)->src, &hcon->src);
|
||||
|
||||
err = iso_chan_add(conn, sk, NULL);
|
||||
if (err)
|
||||
goto release;
|
||||
|
||||
if (hcon->state == BT_CONNECTED) {
|
||||
iso_sock_clear_timer(sk);
|
||||
sk->sk_state = BT_CONNECTED;
|
||||
@ -392,7 +391,6 @@ static int iso_connect_cis(struct sock *sk)
|
||||
iso_sock_set_timer(sk, sk->sk_sndtimeo);
|
||||
}
|
||||
|
||||
release:
|
||||
release_sock(sk);
|
||||
return err;
|
||||
|
||||
@ -895,13 +893,10 @@ static int iso_listen_bis(struct sock *sk)
|
||||
if (!hdev)
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
|
||||
le_addr_type(iso_pi(sk)->dst_type),
|
||||
iso_pi(sk)->bc_sid);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
|
||||
return err;
|
||||
@ -1432,33 +1427,29 @@ static void iso_conn_ready(struct iso_conn *conn)
|
||||
struct sock *parent;
|
||||
struct sock *sk = conn->sk;
|
||||
struct hci_ev_le_big_sync_estabilished *ev;
|
||||
struct hci_conn *hcon;
|
||||
|
||||
BT_DBG("conn %p", conn);
|
||||
|
||||
if (sk) {
|
||||
iso_sock_ready(conn->sk);
|
||||
} else {
|
||||
iso_conn_lock(conn);
|
||||
|
||||
if (!conn->hcon) {
|
||||
iso_conn_unlock(conn);
|
||||
hcon = conn->hcon;
|
||||
if (!hcon)
|
||||
return;
|
||||
}
|
||||
|
||||
ev = hci_recv_event_data(conn->hcon->hdev,
|
||||
ev = hci_recv_event_data(hcon->hdev,
|
||||
HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
|
||||
if (ev)
|
||||
parent = iso_get_sock_listen(&conn->hcon->src,
|
||||
&conn->hcon->dst,
|
||||
parent = iso_get_sock_listen(&hcon->src,
|
||||
&hcon->dst,
|
||||
iso_match_big, ev);
|
||||
else
|
||||
parent = iso_get_sock_listen(&conn->hcon->src,
|
||||
parent = iso_get_sock_listen(&hcon->src,
|
||||
BDADDR_ANY, NULL, NULL);
|
||||
|
||||
if (!parent) {
|
||||
iso_conn_unlock(conn);
|
||||
if (!parent)
|
||||
return;
|
||||
}
|
||||
|
||||
lock_sock(parent);
|
||||
|
||||
@ -1466,30 +1457,29 @@ static void iso_conn_ready(struct iso_conn *conn)
|
||||
BTPROTO_ISO, GFP_ATOMIC, 0);
|
||||
if (!sk) {
|
||||
release_sock(parent);
|
||||
iso_conn_unlock(conn);
|
||||
return;
|
||||
}
|
||||
|
||||
iso_sock_init(sk, parent);
|
||||
|
||||
bacpy(&iso_pi(sk)->src, &conn->hcon->src);
|
||||
iso_pi(sk)->src_type = conn->hcon->src_type;
|
||||
bacpy(&iso_pi(sk)->src, &hcon->src);
|
||||
iso_pi(sk)->src_type = hcon->src_type;
|
||||
|
||||
/* If hcon has no destination address (BDADDR_ANY) it means it
|
||||
* was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to
|
||||
* initialize using the parent socket destination address.
|
||||
*/
|
||||
if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) {
|
||||
bacpy(&conn->hcon->dst, &iso_pi(parent)->dst);
|
||||
conn->hcon->dst_type = iso_pi(parent)->dst_type;
|
||||
conn->hcon->sync_handle = iso_pi(parent)->sync_handle;
|
||||
if (!bacmp(&hcon->dst, BDADDR_ANY)) {
|
||||
bacpy(&hcon->dst, &iso_pi(parent)->dst);
|
||||
hcon->dst_type = iso_pi(parent)->dst_type;
|
||||
hcon->sync_handle = iso_pi(parent)->sync_handle;
|
||||
}
|
||||
|
||||
bacpy(&iso_pi(sk)->dst, &conn->hcon->dst);
|
||||
iso_pi(sk)->dst_type = conn->hcon->dst_type;
|
||||
bacpy(&iso_pi(sk)->dst, &hcon->dst);
|
||||
iso_pi(sk)->dst_type = hcon->dst_type;
|
||||
|
||||
hci_conn_hold(conn->hcon);
|
||||
__iso_chan_add(conn, sk, parent);
|
||||
hci_conn_hold(hcon);
|
||||
iso_chan_add(conn, sk, parent);
|
||||
|
||||
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
|
||||
sk->sk_state = BT_CONNECT2;
|
||||
@ -1500,8 +1490,6 @@ static void iso_conn_ready(struct iso_conn *conn)
|
||||
parent->sk_data_ready(parent);
|
||||
|
||||
release_sock(parent);
|
||||
|
||||
iso_conn_unlock(conn);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ struct mgmt_mesh_tx {
|
||||
struct sock *sk;
|
||||
u8 handle;
|
||||
u8 instance;
|
||||
u8 param[sizeof(struct mgmt_cp_mesh_send) + 29];
|
||||
u8 param[sizeof(struct mgmt_cp_mesh_send) + 31];
|
||||
};
|
||||
|
||||
struct mgmt_pending_cmd {
|
||||
|
@ -391,6 +391,7 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
|
||||
addr->sa_family != AF_BLUETOOTH)
|
||||
return -EINVAL;
|
||||
|
||||
sock_hold(sk);
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
|
||||
@ -410,14 +411,18 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
|
||||
d->sec_level = rfcomm_pi(sk)->sec_level;
|
||||
d->role_switch = rfcomm_pi(sk)->role_switch;
|
||||
|
||||
/* Drop sock lock to avoid potential deadlock with the RFCOMM lock */
|
||||
release_sock(sk);
|
||||
err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
|
||||
sa->rc_channel);
|
||||
if (!err)
|
||||
lock_sock(sk);
|
||||
if (!err && !sock_flag(sk, SOCK_ZAPPED))
|
||||
err = bt_sock_wait_state(sk, BT_CONNECTED,
|
||||
sock_sndtimeo(sk, flags & O_NONBLOCK));
|
||||
|
||||
done:
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -122,10 +122,13 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
|
||||
{
|
||||
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
|
||||
|
||||
if (nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc) ||
|
||||
nla_put(skb, ETHTOOL_A_RSS_INDIR,
|
||||
sizeof(u32) * data->indir_size, data->indir_table) ||
|
||||
nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey))
|
||||
if ((data->hfunc &&
|
||||
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
|
||||
(data->indir_size &&
|
||||
nla_put(skb, ETHTOOL_A_RSS_INDIR,
|
||||
sizeof(u32) * data->indir_size, data->indir_table)) ||
|
||||
(data->hkey_size &&
|
||||
nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)))
|
||||
return -EMSGSIZE;
|
||||
|
||||
return 0;
|
||||
|
@ -650,8 +650,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
|
||||
spin_lock(lock);
|
||||
if (osk) {
|
||||
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
|
||||
ret = sk_nulls_del_node_init_rcu(osk);
|
||||
} else if (found_dup_sk) {
|
||||
ret = sk_hashed(osk);
|
||||
if (ret) {
|
||||
/* Before deleting the node, we insert a new one to make
|
||||
* sure that the look-up-sk process would not miss either
|
||||
* of them and that at least one node would exist in ehash
|
||||
* table all the time. Otherwise there's a tiny chance
|
||||
* that lookup process could find nothing in ehash table.
|
||||
*/
|
||||
__sk_nulls_add_node_tail_rcu(sk, list);
|
||||
sk_nulls_del_node_init_rcu(osk);
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
if (found_dup_sk) {
|
||||
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
|
||||
if (*found_dup_sk)
|
||||
ret = false;
|
||||
@ -660,6 +672,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
|
||||
if (ret)
|
||||
__sk_nulls_add_node_rcu(sk, list);
|
||||
|
||||
unlock:
|
||||
spin_unlock(lock);
|
||||
|
||||
return ret;
|
||||
|
@ -91,10 +91,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_twsk_put);
|
||||
|
||||
static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
|
||||
struct hlist_nulls_head *list)
|
||||
static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
|
||||
struct hlist_nulls_head *list)
|
||||
{
|
||||
hlist_nulls_add_head_rcu(&tw->tw_node, list);
|
||||
hlist_nulls_add_tail_rcu(&tw->tw_node, list);
|
||||
}
|
||||
|
||||
static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
|
||||
@ -147,7 +147,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
|
||||
|
||||
spin_lock(lock);
|
||||
|
||||
inet_twsk_add_node_rcu(tw, &ehead->chain);
|
||||
inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
|
||||
|
||||
/* Step 3: Remove SK from hash chain */
|
||||
if (__sk_nulls_del_node_init_rcu(sk))
|
||||
|
@ -435,6 +435,7 @@ void tcp_init_sock(struct sock *sk)
|
||||
|
||||
/* There's a bubble in the pipe until at least the first ACK. */
|
||||
tp->app_limited = ~0U;
|
||||
tp->rate_app_limited = 1;
|
||||
|
||||
/* See draft-stevens-tcpca-spec-01 for discussion of the
|
||||
* initialization of these values.
|
||||
@ -3178,6 +3179,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
tp->plb_rehash = 0;
|
||||
/* There's a bubble in the pipe until at least the first ACK. */
|
||||
tp->app_limited = ~0U;
|
||||
tp->rate_app_limited = 1;
|
||||
tp->rack.mstamp = 0;
|
||||
tp->rack.advanced = 0;
|
||||
tp->rack.reo_wnd_steps = 1;
|
||||
|
@ -139,7 +139,7 @@ static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
|
||||
if (sk->sk_socket)
|
||||
clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
|
||||
|
||||
err = -EINVAL;
|
||||
err = -ENOTCONN;
|
||||
if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN)
|
||||
goto out_err;
|
||||
|
||||
|
@ -104,9 +104,9 @@ static struct workqueue_struct *l2tp_wq;
|
||||
/* per-net private data for this module */
|
||||
static unsigned int l2tp_net_id;
|
||||
struct l2tp_net {
|
||||
struct list_head l2tp_tunnel_list;
|
||||
/* Lock for write access to l2tp_tunnel_list */
|
||||
spinlock_t l2tp_tunnel_list_lock;
|
||||
/* Lock for write access to l2tp_tunnel_idr */
|
||||
spinlock_t l2tp_tunnel_idr_lock;
|
||||
struct idr l2tp_tunnel_idr;
|
||||
struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
|
||||
/* Lock for write access to l2tp_session_hlist */
|
||||
spinlock_t l2tp_session_hlist_lock;
|
||||
@ -208,13 +208,10 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
|
||||
struct l2tp_tunnel *tunnel;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
|
||||
if (tunnel->tunnel_id == tunnel_id &&
|
||||
refcount_inc_not_zero(&tunnel->ref_count)) {
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return tunnel;
|
||||
}
|
||||
tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
|
||||
if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
|
||||
rcu_read_unlock_bh();
|
||||
return tunnel;
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
@ -224,13 +221,14 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
|
||||
|
||||
struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
|
||||
{
|
||||
const struct l2tp_net *pn = l2tp_pernet(net);
|
||||
struct l2tp_net *pn = l2tp_pernet(net);
|
||||
unsigned long tunnel_id, tmp;
|
||||
struct l2tp_tunnel *tunnel;
|
||||
int count = 0;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
|
||||
if (++count > nth &&
|
||||
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
|
||||
if (tunnel && ++count > nth &&
|
||||
refcount_inc_not_zero(&tunnel->ref_count)) {
|
||||
rcu_read_unlock_bh();
|
||||
return tunnel;
|
||||
@ -1043,7 +1041,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
|
||||
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
|
||||
nf_reset_ct(skb);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
bh_lock_sock_nested(sk);
|
||||
if (sock_owned_by_user(sk)) {
|
||||
kfree_skb(skb);
|
||||
ret = NET_XMIT_DROP;
|
||||
@ -1227,6 +1225,15 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
|
||||
l2tp_tunnel_delete(tunnel);
|
||||
}
|
||||
|
||||
static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
|
||||
{
|
||||
struct l2tp_net *pn = l2tp_pernet(net);
|
||||
|
||||
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
|
||||
idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
|
||||
}
|
||||
|
||||
/* Workqueue tunnel deletion function */
|
||||
static void l2tp_tunnel_del_work(struct work_struct *work)
|
||||
{
|
||||
@ -1234,7 +1241,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
|
||||
del_work);
|
||||
struct sock *sk = tunnel->sock;
|
||||
struct socket *sock = sk->sk_socket;
|
||||
struct l2tp_net *pn;
|
||||
|
||||
l2tp_tunnel_closeall(tunnel);
|
||||
|
||||
@ -1248,12 +1254,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove the tunnel struct from the tunnel list */
|
||||
pn = l2tp_pernet(tunnel->l2tp_net);
|
||||
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
list_del_rcu(&tunnel->list);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
|
||||
l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
|
||||
/* drop initial ref */
|
||||
l2tp_tunnel_dec_refcount(tunnel);
|
||||
|
||||
@ -1384,8 +1385,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct lock_class_key l2tp_socket_class;
|
||||
|
||||
int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
|
||||
struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
|
||||
{
|
||||
@ -1455,12 +1454,19 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
|
||||
int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
|
||||
struct l2tp_tunnel_cfg *cfg)
|
||||
{
|
||||
struct l2tp_tunnel *tunnel_walk;
|
||||
struct l2tp_net *pn;
|
||||
struct l2tp_net *pn = l2tp_pernet(net);
|
||||
u32 tunnel_id = tunnel->tunnel_id;
|
||||
struct socket *sock;
|
||||
struct sock *sk;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
|
||||
ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
|
||||
GFP_ATOMIC);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
|
||||
if (ret)
|
||||
return ret == -ENOSPC ? -EEXIST : ret;
|
||||
|
||||
if (tunnel->fd < 0) {
|
||||
ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
|
||||
tunnel->peer_tunnel_id, cfg,
|
||||
@ -1474,6 +1480,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
|
||||
}
|
||||
|
||||
sk = sock->sk;
|
||||
lock_sock(sk);
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
ret = l2tp_validate_socket(sk, net, tunnel->encap);
|
||||
if (ret < 0)
|
||||
@ -1481,24 +1488,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
|
||||
rcu_assign_sk_user_data(sk, tunnel);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
tunnel->l2tp_net = net;
|
||||
pn = l2tp_pernet(net);
|
||||
|
||||
sock_hold(sk);
|
||||
tunnel->sock = sk;
|
||||
|
||||
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
|
||||
if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
sock_put(sk);
|
||||
ret = -EEXIST;
|
||||
goto err_sock;
|
||||
}
|
||||
}
|
||||
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
|
||||
|
||||
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
|
||||
struct udp_tunnel_sock_cfg udp_cfg = {
|
||||
.sk_user_data = tunnel,
|
||||
@ -1512,9 +1501,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
|
||||
|
||||
tunnel->old_sk_destruct = sk->sk_destruct;
|
||||
sk->sk_destruct = &l2tp_tunnel_destruct;
|
||||
lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
|
||||
"l2tp_sock");
|
||||
sk->sk_allocation = GFP_ATOMIC;
|
||||
release_sock(sk);
|
||||
|
||||
sock_hold(sk);
|
||||
tunnel->sock = sk;
|
||||
tunnel->l2tp_net = net;
|
||||
|
||||
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
|
||||
idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
|
||||
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
|
||||
|
||||
trace_register_tunnel(tunnel);
|
||||
|
||||
@ -1523,17 +1519,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
|
||||
|
||||
return 0;
|
||||
|
||||
err_sock:
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
rcu_assign_sk_user_data(sk, NULL);
|
||||
err_inval_sock:
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
release_sock(sk);
|
||||
|
||||
if (tunnel->fd < 0)
|
||||
sock_release(sock);
|
||||
else
|
||||
sockfd_put(sock);
|
||||
err:
|
||||
l2tp_tunnel_remove(net, tunnel);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
|
||||
@ -1647,8 +1642,8 @@ static __net_init int l2tp_init_net(struct net *net)
|
||||
struct l2tp_net *pn = net_generic(net, l2tp_net_id);
|
||||
int hash;
|
||||
|
||||
INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
|
||||
spin_lock_init(&pn->l2tp_tunnel_list_lock);
|
||||
idr_init(&pn->l2tp_tunnel_idr);
|
||||
spin_lock_init(&pn->l2tp_tunnel_idr_lock);
|
||||
|
||||
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
|
||||
INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
|
||||
@ -1662,11 +1657,13 @@ static __net_exit void l2tp_exit_net(struct net *net)
|
||||
{
|
||||
struct l2tp_net *pn = l2tp_pernet(net);
|
||||
struct l2tp_tunnel *tunnel = NULL;
|
||||
unsigned long tunnel_id, tmp;
|
||||
int hash;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
|
||||
l2tp_tunnel_delete(tunnel);
|
||||
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
|
||||
if (tunnel)
|
||||
l2tp_tunnel_delete(tunnel);
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
@ -1676,6 +1673,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
|
||||
|
||||
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
|
||||
WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
|
||||
idr_destroy(&pn->l2tp_tunnel_idr);
|
||||
}
|
||||
|
||||
static struct pernet_operations l2tp_net_ops = {
|
||||
|
@ -491,7 +491,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
|
||||
{
|
||||
struct tid_ampdu_tx *tid_tx;
|
||||
struct ieee80211_local *local = sta->local;
|
||||
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
struct ieee80211_ampdu_params params = {
|
||||
.sta = &sta->sta,
|
||||
.action = IEEE80211_AMPDU_TX_START,
|
||||
@ -511,8 +511,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
|
||||
*/
|
||||
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
|
||||
|
||||
ieee80211_agg_stop_txq(sta, tid);
|
||||
|
||||
/*
|
||||
* Make sure no packets are being processed. This ensures that
|
||||
* we have a valid starting sequence number and that in-flight
|
||||
@ -521,6 +519,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
|
||||
*/
|
||||
synchronize_net();
|
||||
|
||||
sdata = sta->sdata;
|
||||
params.ssn = sta->tid_seq[tid] >> 4;
|
||||
ret = drv_ampdu_action(local, sdata, ¶ms);
|
||||
tid_tx->ssn = params.ssn;
|
||||
@ -534,6 +533,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
|
||||
*/
|
||||
set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
|
||||
} else if (ret) {
|
||||
if (!sdata)
|
||||
return;
|
||||
|
||||
ht_dbg(sdata,
|
||||
"BA request denied - HW unavailable for %pM tid %d\n",
|
||||
sta->sta.addr, tid);
|
||||
|
@ -147,6 +147,7 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
|
||||
link_conf->bssid_index = 0;
|
||||
link_conf->nontransmitted = false;
|
||||
link_conf->ema_ap = false;
|
||||
link_conf->bssid_indicator = 0;
|
||||
|
||||
if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev)
|
||||
return -EINVAL;
|
||||
@ -1511,6 +1512,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
|
||||
kfree(link_conf->ftmr_params);
|
||||
link_conf->ftmr_params = NULL;
|
||||
|
||||
sdata->vif.mbssid_tx_vif = NULL;
|
||||
link_conf->bssid_index = 0;
|
||||
link_conf->nontransmitted = false;
|
||||
link_conf->ema_ap = false;
|
||||
link_conf->bssid_indicator = 0;
|
||||
|
||||
__sta_info_flush(sdata, true);
|
||||
ieee80211_free_keys(sdata, true);
|
||||
|
||||
|
@ -167,7 +167,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
|
||||
continue;
|
||||
txqi = to_txq_info(sta->sta.txq[i]);
|
||||
p += scnprintf(p, bufsz + buf - p,
|
||||
"%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
|
||||
"%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
|
||||
txqi->txq.tid,
|
||||
txqi->txq.ac,
|
||||
txqi->tin.backlog_bytes,
|
||||
@ -182,7 +182,8 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
|
||||
txqi->flags,
|
||||
test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
|
||||
test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
|
||||
test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
|
||||
test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
|
||||
test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -392,6 +392,9 @@ int drv_ampdu_action(struct ieee80211_local *local,
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!sdata)
|
||||
return -EIO;
|
||||
|
||||
sdata = get_bss_sdata(sdata);
|
||||
if (!check_sdata_in_driver(sdata))
|
||||
return -EIO;
|
||||
|
@ -1199,7 +1199,7 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
|
||||
|
||||
/* In reconfig don't transmit now, but mark for waking later */
|
||||
if (local->in_reconfig) {
|
||||
set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
|
||||
set_bit(IEEE80211_TXQ_DIRTY, &txq->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -391,6 +391,37 @@ void ieee80211_ba_session_work(struct work_struct *work)
|
||||
|
||||
tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
|
||||
if (!blocked && tid_tx) {
|
||||
struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
|
||||
struct ieee80211_sub_if_data *sdata =
|
||||
vif_to_sdata(txqi->txq.vif);
|
||||
struct fq *fq = &sdata->local->fq;
|
||||
|
||||
spin_lock_bh(&fq->lock);
|
||||
|
||||
/* Allow only frags to be dequeued */
|
||||
set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
|
||||
|
||||
if (!skb_queue_empty(&txqi->frags)) {
|
||||
/* Fragmented Tx is ongoing, wait for it to
|
||||
* finish. Reschedule worker to retry later.
|
||||
*/
|
||||
|
||||
spin_unlock_bh(&fq->lock);
|
||||
spin_unlock_bh(&sta->lock);
|
||||
|
||||
/* Give the task working on the txq a chance
|
||||
* to send out the queued frags
|
||||
*/
|
||||
synchronize_net();
|
||||
|
||||
mutex_unlock(&sta->ampdu_mlme.mtx);
|
||||
|
||||
ieee80211_queue_work(&sdata->local->hw, work);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&fq->lock);
|
||||
|
||||
/*
|
||||
* Assign it over to the normal tid_tx array
|
||||
* where it "goes live".
|
||||
|
@ -838,7 +838,7 @@ enum txq_info_flags {
|
||||
IEEE80211_TXQ_STOP,
|
||||
IEEE80211_TXQ_AMPDU,
|
||||
IEEE80211_TXQ_NO_AMSDU,
|
||||
IEEE80211_TXQ_STOP_NETIF_TX,
|
||||
IEEE80211_TXQ_DIRTY,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -364,7 +364,9 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
/* No support for VLAN with MLO yet */
|
||||
if (iftype == NL80211_IFTYPE_AP_VLAN &&
|
||||
nsdata->wdev.use_4addr)
|
||||
sdata->wdev.use_4addr &&
|
||||
nsdata->vif.type == NL80211_IFTYPE_AP &&
|
||||
nsdata->vif.valid_links)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/*
|
||||
@ -2195,7 +2197,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
|
||||
|
||||
ret = cfg80211_register_netdevice(ndev);
|
||||
if (ret) {
|
||||
ieee80211_if_free(ndev);
|
||||
free_netdev(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -4049,6 +4049,58 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
|
||||
#undef CALL_RXH
|
||||
}
|
||||
|
||||
static bool
|
||||
ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
|
||||
{
|
||||
if (!sta->mlo)
|
||||
return false;
|
||||
|
||||
return !!(sta->valid_links & BIT(link_id));
|
||||
}
|
||||
|
||||
static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
|
||||
u8 link_id)
|
||||
{
|
||||
rx->link_id = link_id;
|
||||
rx->link = rcu_dereference(rx->sdata->link[link_id]);
|
||||
|
||||
if (!rx->sta)
|
||||
return rx->link;
|
||||
|
||||
if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
|
||||
return false;
|
||||
|
||||
rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
|
||||
|
||||
return rx->link && rx->link_sta;
|
||||
}
|
||||
|
||||
static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
|
||||
struct ieee80211_sta *pubsta,
|
||||
int link_id)
|
||||
{
|
||||
struct sta_info *sta;
|
||||
|
||||
sta = container_of(pubsta, struct sta_info, sta);
|
||||
|
||||
rx->link_id = link_id;
|
||||
rx->sta = sta;
|
||||
|
||||
if (sta) {
|
||||
rx->local = sta->sdata->local;
|
||||
if (!rx->sdata)
|
||||
rx->sdata = sta->sdata;
|
||||
rx->link_sta = &sta->deflink;
|
||||
}
|
||||
|
||||
if (link_id < 0)
|
||||
rx->link = &rx->sdata->deflink;
|
||||
else if (!ieee80211_rx_data_set_link(rx, link_id))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function makes calls into the RX path, therefore
|
||||
* it has to be invoked under RCU read lock.
|
||||
@ -4057,16 +4109,19 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
|
||||
{
|
||||
struct sk_buff_head frames;
|
||||
struct ieee80211_rx_data rx = {
|
||||
.sta = sta,
|
||||
.sdata = sta->sdata,
|
||||
.local = sta->local,
|
||||
/* This is OK -- must be QoS data frame */
|
||||
.security_idx = tid,
|
||||
.seqno_idx = tid,
|
||||
.link_id = -1,
|
||||
};
|
||||
struct tid_ampdu_rx *tid_agg_rx;
|
||||
u8 link_id;
|
||||
int link_id = -1;
|
||||
|
||||
/* FIXME: statistics won't be right with this */
|
||||
if (sta->sta.valid_links)
|
||||
link_id = ffs(sta->sta.valid_links) - 1;
|
||||
|
||||
if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
|
||||
return;
|
||||
|
||||
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
|
||||
if (!tid_agg_rx)
|
||||
@ -4086,10 +4141,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
|
||||
};
|
||||
drv_event_callback(rx.local, rx.sdata, &event);
|
||||
}
|
||||
/* FIXME: statistics won't be right with this */
|
||||
link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
|
||||
rx.link = rcu_dereference(sta->sdata->link[link_id]);
|
||||
rx.link_sta = rcu_dereference(sta->link[link_id]);
|
||||
|
||||
ieee80211_rx_handlers(&rx, &frames);
|
||||
}
|
||||
@ -4105,7 +4156,6 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
|
||||
/* This is OK -- must be QoS data frame */
|
||||
.security_idx = tid,
|
||||
.seqno_idx = tid,
|
||||
.link_id = -1,
|
||||
};
|
||||
int i, diff;
|
||||
|
||||
@ -4116,10 +4166,8 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
|
||||
|
||||
sta = container_of(pubsta, struct sta_info, sta);
|
||||
|
||||
rx.sta = sta;
|
||||
rx.sdata = sta->sdata;
|
||||
rx.link = &rx.sdata->deflink;
|
||||
rx.local = sta->local;
|
||||
if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
|
||||
@ -4506,15 +4554,6 @@ void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
}
|
||||
|
||||
static bool
|
||||
ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
|
||||
{
|
||||
if (!sta->mlo)
|
||||
return false;
|
||||
|
||||
return !!(sta->valid_links & BIT(link_id));
|
||||
}
|
||||
|
||||
static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
|
||||
struct ieee80211_fast_rx *fast_rx,
|
||||
int orig_len)
|
||||
@ -4625,7 +4664,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
|
||||
struct sk_buff *skb = rx->skb;
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
struct sta_info *sta = rx->sta;
|
||||
int orig_len = skb->len;
|
||||
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
int snap_offs = hdrlen;
|
||||
@ -4637,7 +4675,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
|
||||
u8 da[ETH_ALEN];
|
||||
u8 sa[ETH_ALEN];
|
||||
} addrs __aligned(2);
|
||||
struct link_sta_info *link_sta;
|
||||
struct ieee80211_sta_rx_stats *stats;
|
||||
|
||||
/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
|
||||
@ -4740,18 +4777,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
|
||||
drop:
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
if (rx->link_id >= 0) {
|
||||
link_sta = rcu_dereference(sta->link[rx->link_id]);
|
||||
if (!link_sta)
|
||||
return true;
|
||||
} else {
|
||||
link_sta = &sta->deflink;
|
||||
}
|
||||
|
||||
if (fast_rx->uses_rss)
|
||||
stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
|
||||
stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
|
||||
else
|
||||
stats = &link_sta->rx_stats;
|
||||
stats = &rx->link_sta->rx_stats;
|
||||
|
||||
stats->dropped++;
|
||||
return true;
|
||||
@ -4769,8 +4798,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
|
||||
struct ieee80211_local *local = rx->local;
|
||||
struct ieee80211_sub_if_data *sdata = rx->sdata;
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
struct link_sta_info *link_sta = NULL;
|
||||
struct ieee80211_link_data *link;
|
||||
struct link_sta_info *link_sta = rx->link_sta;
|
||||
struct ieee80211_link_data *link = rx->link;
|
||||
|
||||
rx->skb = skb;
|
||||
|
||||
@ -4792,35 +4821,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
|
||||
if (!ieee80211_accept_frame(rx))
|
||||
return false;
|
||||
|
||||
if (rx->link_id >= 0) {
|
||||
link = rcu_dereference(rx->sdata->link[rx->link_id]);
|
||||
|
||||
/* we might race link removal */
|
||||
if (!link)
|
||||
return true;
|
||||
rx->link = link;
|
||||
|
||||
if (rx->sta) {
|
||||
rx->link_sta =
|
||||
rcu_dereference(rx->sta->link[rx->link_id]);
|
||||
if (!rx->link_sta)
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (rx->sta)
|
||||
rx->link_sta = &rx->sta->deflink;
|
||||
|
||||
rx->link = &sdata->deflink;
|
||||
}
|
||||
|
||||
if (unlikely(!is_multicast_ether_addr(hdr->addr1) &&
|
||||
rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) {
|
||||
link_sta = rcu_dereference(rx->sta->link[rx->link_id]);
|
||||
|
||||
if (WARN_ON_ONCE(!link_sta))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!consume) {
|
||||
struct skb_shared_hwtstamps *shwt;
|
||||
|
||||
@ -4838,9 +4838,12 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
|
||||
*/
|
||||
shwt = skb_hwtstamps(rx->skb);
|
||||
shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
|
||||
|
||||
/* Update the hdr pointer to the new skb for translation below */
|
||||
hdr = (struct ieee80211_hdr *)rx->skb->data;
|
||||
}
|
||||
|
||||
if (unlikely(link_sta)) {
|
||||
if (unlikely(rx->sta && rx->sta->sta.mlo)) {
|
||||
/* translate to MLD addresses */
|
||||
if (ether_addr_equal(link->conf->addr, hdr->addr1))
|
||||
ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
|
||||
@ -4870,6 +4873,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
struct ieee80211_fast_rx *fast_rx;
|
||||
struct ieee80211_rx_data rx;
|
||||
int link_id = -1;
|
||||
|
||||
memset(&rx, 0, sizeof(rx));
|
||||
rx.skb = skb;
|
||||
@ -4886,12 +4890,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
|
||||
if (!pubsta)
|
||||
goto drop;
|
||||
|
||||
rx.sta = container_of(pubsta, struct sta_info, sta);
|
||||
rx.sdata = rx.sta->sdata;
|
||||
|
||||
if (status->link_valid &&
|
||||
!ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id))
|
||||
goto drop;
|
||||
if (status->link_valid)
|
||||
link_id = status->link_id;
|
||||
|
||||
/*
|
||||
* TODO: Should the frame be dropped if the right link_id is not
|
||||
@ -4900,19 +4900,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
|
||||
* link_id is used only for stats purpose and updating the stats on
|
||||
* the deflink is fine?
|
||||
*/
|
||||
if (status->link_valid)
|
||||
rx.link_id = status->link_id;
|
||||
|
||||
if (rx.link_id >= 0) {
|
||||
struct ieee80211_link_data *link;
|
||||
|
||||
link = rcu_dereference(rx.sdata->link[rx.link_id]);
|
||||
if (!link)
|
||||
goto drop;
|
||||
rx.link = link;
|
||||
} else {
|
||||
rx.link = &rx.sdata->deflink;
|
||||
}
|
||||
if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
|
||||
goto drop;
|
||||
|
||||
fast_rx = rcu_dereference(rx.sta->fast_rx);
|
||||
if (!fast_rx)
|
||||
@ -4930,6 +4919,8 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
|
||||
{
|
||||
struct link_sta_info *link_sta;
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
struct sta_info *sta;
|
||||
int link_id = -1;
|
||||
|
||||
/*
|
||||
* Look up link station first, in case there's a
|
||||
@ -4939,24 +4930,19 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
|
||||
*/
|
||||
link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
|
||||
if (link_sta) {
|
||||
rx->sta = link_sta->sta;
|
||||
rx->link_id = link_sta->link_id;
|
||||
sta = link_sta->sta;
|
||||
link_id = link_sta->link_id;
|
||||
} else {
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
|
||||
rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2);
|
||||
if (rx->sta) {
|
||||
if (status->link_valid &&
|
||||
!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta,
|
||||
status->link_id))
|
||||
return false;
|
||||
|
||||
rx->link_id = status->link_valid ? status->link_id : -1;
|
||||
} else {
|
||||
rx->link_id = -1;
|
||||
}
|
||||
sta = sta_info_get_bss(rx->sdata, hdr->addr2);
|
||||
if (status->link_valid)
|
||||
link_id = status->link_id;
|
||||
}
|
||||
|
||||
if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
|
||||
return false;
|
||||
|
||||
return ieee80211_prepare_and_rx_handle(rx, skb, consume);
|
||||
}
|
||||
|
||||
@ -5015,19 +5001,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
|
||||
|
||||
if (ieee80211_is_data(fc)) {
|
||||
struct sta_info *sta, *prev_sta;
|
||||
u8 link_id = status->link_id;
|
||||
int link_id = -1;
|
||||
|
||||
if (status->link_valid)
|
||||
link_id = status->link_id;
|
||||
|
||||
if (pubsta) {
|
||||
rx.sta = container_of(pubsta, struct sta_info, sta);
|
||||
rx.sdata = rx.sta->sdata;
|
||||
|
||||
if (status->link_valid &&
|
||||
!ieee80211_rx_is_valid_sta_link_id(pubsta, link_id))
|
||||
if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
|
||||
goto out;
|
||||
|
||||
if (status->link_valid)
|
||||
rx.link_id = status->link_id;
|
||||
|
||||
/*
|
||||
* In MLO connection, fetch the link_id using addr2
|
||||
* when the driver does not pass link_id in status.
|
||||
@ -5045,7 +5027,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
|
||||
if (!link_sta)
|
||||
goto out;
|
||||
|
||||
rx.link_id = link_sta->link_id;
|
||||
ieee80211_rx_data_set_link(&rx, link_sta->link_id);
|
||||
}
|
||||
|
||||
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
|
||||
@ -5061,30 +5043,27 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((status->link_valid &&
|
||||
!ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
|
||||
link_id)) ||
|
||||
(!status->link_valid && prev_sta->sta.mlo))
|
||||
rx.sdata = prev_sta->sdata;
|
||||
if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
|
||||
link_id))
|
||||
goto out;
|
||||
|
||||
if (!status->link_valid && prev_sta->sta.mlo)
|
||||
continue;
|
||||
|
||||
rx.link_id = status->link_valid ? link_id : -1;
|
||||
rx.sta = prev_sta;
|
||||
rx.sdata = prev_sta->sdata;
|
||||
ieee80211_prepare_and_rx_handle(&rx, skb, false);
|
||||
|
||||
prev_sta = sta;
|
||||
}
|
||||
|
||||
if (prev_sta) {
|
||||
if ((status->link_valid &&
|
||||
!ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
|
||||
link_id)) ||
|
||||
(!status->link_valid && prev_sta->sta.mlo))
|
||||
rx.sdata = prev_sta->sdata;
|
||||
if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
|
||||
link_id))
|
||||
goto out;
|
||||
|
||||
rx.link_id = status->link_valid ? link_id : -1;
|
||||
rx.sta = prev_sta;
|
||||
rx.sdata = prev_sta->sdata;
|
||||
if (!status->link_valid && prev_sta->sta.mlo)
|
||||
goto out;
|
||||
|
||||
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
|
||||
return;
|
||||
|
@ -1129,7 +1129,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
|
||||
struct sk_buff *purge_skb = NULL;
|
||||
|
||||
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
|
||||
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
||||
reset_agg_timer = true;
|
||||
} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
|
||||
/*
|
||||
@ -1161,7 +1160,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
|
||||
if (!tid_tx) {
|
||||
/* do nothing, let packet pass through */
|
||||
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
|
||||
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
||||
reset_agg_timer = true;
|
||||
} else {
|
||||
queued = true;
|
||||
@ -3677,8 +3675,7 @@ static void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
||||
info->band = fast_tx->band;
|
||||
info->control.vif = &sdata->vif;
|
||||
info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
|
||||
IEEE80211_TX_CTL_DONTFRAG |
|
||||
(ampdu ? IEEE80211_TX_CTL_AMPDU : 0);
|
||||
IEEE80211_TX_CTL_DONTFRAG;
|
||||
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT |
|
||||
u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
|
||||
IEEE80211_TX_CTRL_MLO_LINK);
|
||||
@ -3783,6 +3780,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
||||
struct ieee80211_tx_data tx;
|
||||
ieee80211_tx_result r;
|
||||
struct ieee80211_vif *vif = txq->vif;
|
||||
int q = vif->hw_queue[txq->ac];
|
||||
bool q_stopped;
|
||||
|
||||
WARN_ON_ONCE(softirq_count() == 0);
|
||||
|
||||
@ -3790,17 +3789,18 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
||||
return NULL;
|
||||
|
||||
begin:
|
||||
spin_lock_bh(&fq->lock);
|
||||
spin_lock(&local->queue_stop_reason_lock);
|
||||
q_stopped = local->queue_stop_reasons[q];
|
||||
spin_unlock(&local->queue_stop_reason_lock);
|
||||
|
||||
if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
|
||||
test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
|
||||
goto out;
|
||||
|
||||
if (vif->txqs_stopped[txq->ac]) {
|
||||
set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
|
||||
goto out;
|
||||
if (unlikely(q_stopped)) {
|
||||
/* mark for waking later */
|
||||
set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock_bh(&fq->lock);
|
||||
|
||||
/* Make sure fragments stay together. */
|
||||
skb = __skb_dequeue(&txqi->frags);
|
||||
if (unlikely(skb)) {
|
||||
@ -3810,6 +3810,9 @@ begin:
|
||||
IEEE80211_SKB_CB(skb)->control.flags &=
|
||||
~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
|
||||
} else {
|
||||
if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
|
||||
goto out;
|
||||
|
||||
skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
|
||||
}
|
||||
|
||||
@ -3860,9 +3863,8 @@ begin:
|
||||
}
|
||||
|
||||
if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
|
||||
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
||||
else
|
||||
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
||||
info->flags |= (IEEE80211_TX_CTL_AMPDU |
|
||||
IEEE80211_TX_CTL_DONTFRAG);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
|
||||
if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
|
||||
@ -4596,8 +4598,6 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
memset(info, 0, sizeof(*info));
|
||||
if (tid_tx)
|
||||
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
||||
|
||||
info->hw_queue = sdata->vif.hw_queue[queue];
|
||||
|
||||
|
@ -292,22 +292,12 @@ static void wake_tx_push_queue(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_txq *queue)
|
||||
{
|
||||
int q = sdata->vif.hw_queue[queue->ac];
|
||||
struct ieee80211_tx_control control = {
|
||||
.sta = queue->sta,
|
||||
};
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
bool q_stopped;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
||||
q_stopped = local->queue_stop_reasons[q];
|
||||
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
||||
|
||||
if (q_stopped)
|
||||
break;
|
||||
|
||||
skb = ieee80211_tx_dequeue(&local->hw, queue);
|
||||
if (!skb)
|
||||
break;
|
||||
@ -347,8 +337,6 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
|
||||
local_bh_disable();
|
||||
spin_lock(&fq->lock);
|
||||
|
||||
sdata->vif.txqs_stopped[ac] = false;
|
||||
|
||||
if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
|
||||
goto out;
|
||||
|
||||
@ -370,7 +358,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
|
||||
if (ac != txq->ac)
|
||||
continue;
|
||||
|
||||
if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
|
||||
if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY,
|
||||
&txqi->flags))
|
||||
continue;
|
||||
|
||||
@ -385,7 +373,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
|
||||
|
||||
txqi = to_txq_info(vif->txq);
|
||||
|
||||
if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
|
||||
if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ||
|
||||
(ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
|
||||
goto out;
|
||||
|
||||
@ -517,8 +505,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
|
||||
bool refcounted)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
int n_acs = IEEE80211_NUM_ACS;
|
||||
|
||||
trace_stop_queue(local, queue, reason);
|
||||
|
||||
@ -530,29 +516,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
|
||||
else
|
||||
local->q_stop_reasons[queue][reason]++;
|
||||
|
||||
if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
|
||||
return;
|
||||
|
||||
if (local->hw.queues < IEEE80211_NUM_ACS)
|
||||
n_acs = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
||||
int ac;
|
||||
|
||||
if (!sdata->dev)
|
||||
continue;
|
||||
|
||||
for (ac = 0; ac < n_acs; ac++) {
|
||||
if (sdata->vif.hw_queue[ac] == queue ||
|
||||
sdata->vif.cab_queue == queue) {
|
||||
spin_lock(&local->fq.lock);
|
||||
sdata->vif.txqs_stopped[ac] = true;
|
||||
spin_unlock(&local->fq.lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
set_bit(reason, &local->queue_stop_reasons[queue]);
|
||||
}
|
||||
|
||||
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
|
||||
|
@ -420,6 +420,31 @@ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
|
||||
}
|
||||
}
|
||||
|
||||
/* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
|
||||
* otherwise allow any matching local/remote pair
|
||||
*/
|
||||
bool mptcp_pm_addr_families_match(const struct sock *sk,
|
||||
const struct mptcp_addr_info *loc,
|
||||
const struct mptcp_addr_info *rem)
|
||||
{
|
||||
bool mptcp_is_v4 = sk->sk_family == AF_INET;
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6);
|
||||
bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6);
|
||||
|
||||
if (mptcp_is_v4)
|
||||
return loc_is_v4 && rem_is_v4;
|
||||
|
||||
if (ipv6_only_sock(sk))
|
||||
return !loc_is_v4 && !rem_is_v4;
|
||||
|
||||
return loc_is_v4 == rem_is_v4;
|
||||
#else
|
||||
return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET;
|
||||
#endif
|
||||
}
|
||||
|
||||
void mptcp_pm_data_reset(struct mptcp_sock *msk)
|
||||
{
|
||||
u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
|
||||
|
@ -294,6 +294,13 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
sk = (struct sock *)msk;
|
||||
|
||||
if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) {
|
||||
GENL_SET_ERR_MSG(info, "families mismatch");
|
||||
err = -EINVAL;
|
||||
goto create_err;
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
|
||||
|
@ -98,7 +98,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
|
||||
struct socket *ssock;
|
||||
int err;
|
||||
|
||||
err = mptcp_subflow_create_socket(sk, &ssock);
|
||||
err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -641,7 +641,8 @@ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
|
||||
/* called with sk socket lock held */
|
||||
int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
|
||||
const struct mptcp_addr_info *remote);
|
||||
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
|
||||
int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
|
||||
struct socket **new_sock);
|
||||
void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
|
||||
struct sockaddr_storage *addr,
|
||||
unsigned short family);
|
||||
@ -776,6 +777,9 @@ int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
|
||||
int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
|
||||
bool require_family,
|
||||
struct mptcp_pm_addr_entry *entry);
|
||||
bool mptcp_pm_addr_families_match(const struct sock *sk,
|
||||
const struct mptcp_addr_info *loc,
|
||||
const struct mptcp_addr_info *rem);
|
||||
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
|
||||
void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
|
||||
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
|
||||
|
@ -1547,7 +1547,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
|
||||
if (!mptcp_is_fully_established(sk))
|
||||
goto err_out;
|
||||
|
||||
err = mptcp_subflow_create_socket(sk, &sf);
|
||||
err = mptcp_subflow_create_socket(sk, loc->family, &sf);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
@ -1660,7 +1660,9 @@ static void mptcp_subflow_ops_undo_override(struct sock *ssk)
|
||||
#endif
|
||||
ssk->sk_prot = &tcp_prot;
|
||||
}
|
||||
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
|
||||
|
||||
int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
|
||||
struct socket **new_sock)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow;
|
||||
struct net *net = sock_net(sk);
|
||||
@ -1673,8 +1675,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
|
||||
if (unlikely(!sk->sk_socket))
|
||||
return -EINVAL;
|
||||
|
||||
err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
|
||||
&sf);
|
||||
err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -308,8 +308,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
|
||||
return -IPSET_ERR_BITMAP_RANGE;
|
||||
|
||||
pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
|
||||
hosts = 2 << (32 - netmask - 1);
|
||||
elements = 2 << (netmask - mask_bits - 1);
|
||||
hosts = 2U << (32 - netmask - 1);
|
||||
elements = 2UL << (netmask - mask_bits - 1);
|
||||
}
|
||||
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
|
||||
return -IPSET_ERR_BITMAP_RANGE_SIZE;
|
||||
|
@ -1068,6 +1068,13 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
|
||||
ct->proto.tcp.last_flags |=
|
||||
IP_CT_EXP_CHALLENGE_ACK;
|
||||
}
|
||||
|
||||
/* possible challenge ack reply to syn */
|
||||
if (old_state == TCP_CONNTRACK_SYN_SENT &&
|
||||
index == TCP_ACK_SET &&
|
||||
dir == IP_CT_DIR_REPLY)
|
||||
ct->proto.tcp.last_ack = ntohl(th->ack_seq);
|
||||
|
||||
spin_unlock_bh(&ct->lock);
|
||||
nf_ct_l4proto_log_invalid(skb, ct, state,
|
||||
"packet (index %d) in dir %d ignored, state %s",
|
||||
@ -1193,6 +1200,14 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
|
||||
* segments we ignored. */
|
||||
goto in_window;
|
||||
}
|
||||
|
||||
/* Reset in response to a challenge-ack we let through earlier */
|
||||
if (old_state == TCP_CONNTRACK_SYN_SENT &&
|
||||
ct->proto.tcp.last_index == TCP_ACK_SET &&
|
||||
ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
|
||||
ntohl(th->seq) == ct->proto.tcp.last_ack)
|
||||
goto in_window;
|
||||
|
||||
break;
|
||||
default:
|
||||
/* Keep compilers happy. */
|
||||
|
@ -63,7 +63,7 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
|
||||
return false;
|
||||
|
||||
if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
|
||||
ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
|
||||
ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
|
||||
|
||||
memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
|
||||
|
||||
|
@ -157,6 +157,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
|
||||
cancel_work_sync(&local->rx_work);
|
||||
cancel_work_sync(&local->timeout_work);
|
||||
kfree_skb(local->rx_pending);
|
||||
local->rx_pending = NULL;
|
||||
del_timer_sync(&local->sdreq_timer);
|
||||
cancel_work_sync(&local->sdreq_timeout_work);
|
||||
nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
|
||||
|
@ -294,7 +294,7 @@ static void rxrpc_put_call_slot(struct rxrpc_call *call)
|
||||
static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_local *local = call->local;
|
||||
int ret = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
|
||||
|
||||
|
@ -377,6 +377,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
|
||||
/* Even if driver returns failure adjust the stats - in case offload
|
||||
* ended but driver still wants to adjust the values.
|
||||
*/
|
||||
sch_tree_lock(sch);
|
||||
for (i = 0; i < MAX_DPs; i++) {
|
||||
if (!table->tab[i])
|
||||
continue;
|
||||
@ -393,6 +394,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
|
||||
sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
|
||||
}
|
||||
_bstats_update(&sch->bstats, bytes, packets);
|
||||
sch_tree_unlock(sch);
|
||||
|
||||
kfree(hw_stats);
|
||||
return ret;
|
||||
|
@ -1549,7 +1549,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
|
||||
struct tc_htb_qopt_offload offload_opt;
|
||||
struct netdev_queue *dev_queue;
|
||||
struct Qdisc *q = cl->leaf.q;
|
||||
struct Qdisc *old = NULL;
|
||||
struct Qdisc *old;
|
||||
int err;
|
||||
|
||||
if (cl->level)
|
||||
@ -1557,14 +1557,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
|
||||
|
||||
WARN_ON(!q);
|
||||
dev_queue = htb_offload_get_queue(cl);
|
||||
old = htb_graft_helper(dev_queue, NULL);
|
||||
if (destroying)
|
||||
/* Before HTB is destroyed, the kernel grafts noop_qdisc to
|
||||
* all queues.
|
||||
/* When destroying, caller qdisc_graft grafts the new qdisc and invokes
|
||||
* qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
|
||||
* does not need to graft or qdisc_put the qdisc being destroyed.
|
||||
*/
|
||||
if (!destroying) {
|
||||
old = htb_graft_helper(dev_queue, NULL);
|
||||
/* Last qdisc grafted should be the same as cl->leaf.q when
|
||||
* calling htb_delete.
|
||||
*/
|
||||
WARN_ON(!(old->flags & TCQ_F_BUILTIN));
|
||||
else
|
||||
WARN_ON(old != q);
|
||||
}
|
||||
|
||||
if (cl->parent) {
|
||||
_bstats_update(&cl->parent->bstats_bias,
|
||||
@ -1581,10 +1584,12 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
|
||||
};
|
||||
err = htb_offload(qdisc_dev(sch), &offload_opt);
|
||||
|
||||
if (!err || destroying)
|
||||
qdisc_put(old);
|
||||
else
|
||||
htb_graft_helper(dev_queue, old);
|
||||
if (!destroying) {
|
||||
if (!err)
|
||||
qdisc_put(old);
|
||||
else
|
||||
htb_graft_helper(dev_queue, old);
|
||||
}
|
||||
|
||||
if (last_child)
|
||||
return err;
|
||||
|
@ -1700,6 +1700,8 @@ static void taprio_reset(struct Qdisc *sch)
|
||||
int i;
|
||||
|
||||
hrtimer_cancel(&q->advance_timer);
|
||||
qdisc_synchronize(sch);
|
||||
|
||||
if (q->qdiscs) {
|
||||
for (i = 0; i < dev->num_tx_queues; i++)
|
||||
if (q->qdiscs[i])
|
||||
@ -1720,6 +1722,7 @@ static void taprio_destroy(struct Qdisc *sch)
|
||||
* happens in qdisc_create(), after taprio_init() has been called.
|
||||
*/
|
||||
hrtimer_cancel(&q->advance_timer);
|
||||
qdisc_synchronize(sch);
|
||||
|
||||
taprio_disable_offload(dev, q, NULL);
|
||||
|
||||
|
@ -752,6 +752,52 @@ test_subflows()
|
||||
"$server4_token" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
test_subflows_v4_v6_mix()
|
||||
{
|
||||
# Attempt to add a listener at 10.0.2.1:<subflow-port>
|
||||
ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
|
||||
$app6_port > /dev/null 2>&1 &
|
||||
local listener_pid=$!
|
||||
|
||||
# ADD_ADDR4 from server to client machine reusing the subflow port on
|
||||
# the established v6 connection
|
||||
:>"$client_evts"
|
||||
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
|
||||
$server_addr_id dev ns1eth2 > /dev/null 2>&1
|
||||
stdbuf -o0 -e0 printf "ADD_ADDR4 id:%d 10.0.2.1 (ns1) => ns2, reuse port\t\t" $server_addr_id
|
||||
sleep 0.5
|
||||
verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
|
||||
"$server_addr_id" "$app6_port"
|
||||
|
||||
# CREATE_SUBFLOW from client to server machine
|
||||
:>"$client_evts"
|
||||
ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
|
||||
$app6_port token "$client6_token" > /dev/null 2>&1
|
||||
sleep 0.5
|
||||
verify_subflow_events "$client_evts" "$SUB_ESTABLISHED" "$client6_token"\
|
||||
"$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\
|
||||
"$server_addr_id" "ns2" "ns1"
|
||||
|
||||
# Delete the listener from the server ns, if one was created
|
||||
kill_wait $listener_pid
|
||||
|
||||
sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
|
||||
|
||||
# DESTROY_SUBFLOW from client to server machine
|
||||
:>"$client_evts"
|
||||
ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
|
||||
$app6_port token "$client6_token" > /dev/null 2>&1
|
||||
sleep 0.5
|
||||
verify_subflow_events "$client_evts" "$SUB_CLOSED" "$client6_token" \
|
||||
"$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\
|
||||
"$server_addr_id" "ns2" "ns1"
|
||||
|
||||
# RM_ADDR from server to client machine
|
||||
ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
|
||||
"$server6_token" > /dev/null 2>&1
|
||||
sleep 0.5
|
||||
}
|
||||
|
||||
test_prio()
|
||||
{
|
||||
local count
|
||||
@ -861,6 +907,7 @@ make_connection "v6"
|
||||
test_announce
|
||||
test_remove
|
||||
test_subflows
|
||||
test_subflows_v4_v6_mix
|
||||
test_prio
|
||||
test_listener
|
||||
|
||||
|
@ -215,7 +215,7 @@ static char *recv_frame(const struct ring_state *ring, char *frame)
|
||||
}
|
||||
|
||||
/* A single TPACKET_V3 block can hold multiple frames */
|
||||
static void recv_block(struct ring_state *ring)
|
||||
static bool recv_block(struct ring_state *ring)
|
||||
{
|
||||
struct tpacket_block_desc *block;
|
||||
char *frame;
|
||||
@ -223,7 +223,7 @@ static void recv_block(struct ring_state *ring)
|
||||
|
||||
block = (void *)(ring->mmap + ring->idx * ring_block_sz);
|
||||
if (!(block->hdr.bh1.block_status & TP_STATUS_USER))
|
||||
return;
|
||||
return false;
|
||||
|
||||
frame = (char *)block;
|
||||
frame += block->hdr.bh1.offset_to_first_pkt;
|
||||
@ -235,6 +235,8 @@ static void recv_block(struct ring_state *ring)
|
||||
|
||||
block->hdr.bh1.block_status = TP_STATUS_KERNEL;
|
||||
ring->idx = (ring->idx + 1) % ring_block_nr;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* simple test: sleep once unconditionally and then process all rings */
|
||||
@ -245,7 +247,7 @@ static void process_rings(void)
|
||||
usleep(1000 * cfg_timeout_msec);
|
||||
|
||||
for (i = 0; i < num_cpus; i++)
|
||||
recv_block(&rings[i]);
|
||||
do {} while (recv_block(&rings[i]));
|
||||
|
||||
fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n",
|
||||
frames_received - frames_nohash - frames_error,
|
||||
@ -257,12 +259,12 @@ static char *setup_ring(int fd)
|
||||
struct tpacket_req3 req3 = {0};
|
||||
void *ring;
|
||||
|
||||
req3.tp_retire_blk_tov = cfg_timeout_msec;
|
||||
req3.tp_retire_blk_tov = cfg_timeout_msec / 8;
|
||||
req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
|
||||
|
||||
req3.tp_frame_size = 2048;
|
||||
req3.tp_frame_nr = 1 << 10;
|
||||
req3.tp_block_nr = 2;
|
||||
req3.tp_block_nr = 16;
|
||||
|
||||
req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr;
|
||||
req3.tp_block_size /= req3.tp_block_nr;
|
||||
|
@ -10,12 +10,20 @@
|
||||
ksft_skip=4
|
||||
|
||||
testns=testns-$(mktemp -u "XXXXXXXX")
|
||||
tmp=""
|
||||
|
||||
tables="foo bar baz quux"
|
||||
global_ret=0
|
||||
eret=0
|
||||
lret=0
|
||||
|
||||
cleanup() {
|
||||
ip netns pids "$testns" | xargs kill 2>/dev/null
|
||||
ip netns del "$testns"
|
||||
|
||||
rm -f "$tmp"
|
||||
}
|
||||
|
||||
check_result()
|
||||
{
|
||||
local r=$1
|
||||
@ -43,6 +51,7 @@ if [ $? -ne 0 ];then
|
||||
exit $ksft_skip
|
||||
fi
|
||||
|
||||
trap cleanup EXIT
|
||||
tmp=$(mktemp)
|
||||
|
||||
for table in $tables; do
|
||||
@ -139,11 +148,4 @@ done
|
||||
|
||||
check_result $lret "add/delete with nftrace enabled"
|
||||
|
||||
pkill -9 ping
|
||||
|
||||
wait
|
||||
|
||||
rm -f "$tmp"
|
||||
ip netns del "$testns"
|
||||
|
||||
exit $global_ret
|
||||
|
1
tools/testing/selftests/netfilter/settings
Normal file
1
tools/testing/selftests/netfilter/settings
Normal file
@ -0,0 +1 @@
|
||||
timeout=120
|
Loading…
Reference in New Issue
Block a user