Including fixes from WiFi and netfilter.

Most regressions addressed here come from quite old versions, with
 the exceptions of the iavf one and the WiFi fixes. No known
 outstanding reports or investigation.
 
 Fixes to fixes:
 
  - eth: iavf: in iavf_down, disable queues when removing the driver
 
 Previous releases - regressions:
 
  - sched: act_ct: additional checks for outdated flows
 
  - tcp: do not leave an empty skb in write queue
 
  - tcp: fix wrong RTO timeout when received SACK reneging
 
  - wifi: cfg80211: pass correct pointer to rdev_inform_bss()
 
  - eth: i40e: sync next_to_clean and next_to_process for programming status desc
 
  - eth: iavf: initialize waitqueues before starting watchdog_task
 
 Previous releases - always broken:
 
  - eth: r8169: fix data-races
 
  - eth: igb: fix potential memory leak in igb_add_ethtool_nfc_entry
 
  - eth: r8152: avoid writing garbage to the adapter's registers
 
  - eth: gtp: fix fragmentation needed check with gso
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmU6NNoSHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOk+JgP/i60m8bHsip+e1yuDsqXmo5hXzoAFe84
 87Za4NhOBOsD0UMSpLr8M6EFBlTe+WLLMWJQf9Wv0XOwsh2Hhpa9VoQkRyEzkjBf
 ba4H4qZFTtq0dKO57/VJss3hb1CHUwHwxZ54u30qwMayREQxrtQI4SDjOJINtPJX
 2VXUGvPY4wcUZNrBTtsj5oTuG/eKxzfM2mRJTuKLPHB4qwbj2D7O9oWKlezuJE6l
 y0/Ym3Oc+knmNah+cAUMKf9QdyBK9rVyBnN8vhdLR23DyKhKUfbrDaIJET6eyvQX
 lTs2TAP7Z3DoSfvBZoYRPCzDOQzioxOyLx6zD1r8J4PgoC/Syhl6oI0cKO1x0SOt
 535a9ZeWUnwc5KiCnM0f2b57FsfsPE4B40cKlm4A+5IORlqs6htww43LNw3SFCh8
 C2d8nZHy2EZHNeMhY4Rgqea7kawAh0b8SfszMM68mXrFlXvwSKxpLqbXMcXwrdla
 FQTNODiarHfnxFOGX0Dp+4Ri5KdYQBlIZxIJv+ohj6kRLi+TSkpiDi7FuoFInvbM
 agrIaZuIkc/9gDyiIXuDbf867o3OjFTmrlYA5sZjJwwVr+2Y1W4VipKjAW5d2yvv
 F3FHPiVFrjmuz05jW+jezPMscfIysE5SdmD4wCe2+fjaKRqXi6ZYxHG+uXQbVOmS
 1uw8FFc87gkE
 =7acu
 -----END PGP SIGNATURE-----

Merge tag 'net-6.6-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from WiFi and netfilter.

  Most regressions addressed here come from quite old versions, with the
  exceptions of the iavf one and the WiFi fixes. No known outstanding
  reports or investigation.

  Fixes to fixes:

   - eth: iavf: in iavf_down, disable queues when removing the driver

  Previous releases - regressions:

   - sched: act_ct: additional checks for outdated flows

   - tcp: do not leave an empty skb in write queue

   - tcp: fix wrong RTO timeout when received SACK reneging

   - wifi: cfg80211: pass correct pointer to rdev_inform_bss()

   - eth: i40e: sync next_to_clean and next_to_process for programming
     status desc

   - eth: iavf: initialize waitqueues before starting watchdog_task

  Previous releases - always broken:

   - eth: r8169: fix data-races

   - eth: igb: fix potential memory leak in igb_add_ethtool_nfc_entry

   - eth: r8152: avoid writing garbage to the adapter's registers

   - eth: gtp: fix fragmentation needed check with gso"

* tag 'net-6.6-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (43 commits)
  iavf: in iavf_down, disable queues when removing the driver
  vsock/virtio: initialize the_virtio_vsock before using VQs
  net: ipv6: fix typo in comments
  net: ipv4: fix typo in comments
  net/sched: act_ct: additional checks for outdated flows
  netfilter: flowtable: GC pushes back packets to classic path
  i40e: Fix wrong check for I40E_TXR_FLAGS_WB_ON_ITR
  gtp: fix fragmentation needed check with gso
  gtp: uapi: fix GTPA_MAX
  Fix NULL pointer dereference in cn_filter()
  sfc: cleanup and reduce netlink error messages
  net/handshake: fix file ref count in handshake_nl_accept_doit()
  wifi: mac80211: don't drop all unprotected public action frames
  wifi: cfg80211: fix assoc response warning on failed links
  wifi: cfg80211: pass correct pointer to rdev_inform_bss()
  isdn: mISDN: hfcsusb: Spelling fix in comment
  tcp: fix wrong RTO timeout when received SACK reneging
  r8152: Block future register access if register access fails
  r8152: Rename RTL8152_UNPLUG to RTL8152_INACCESSIBLE
  r8152: Check for unplug in r8153b_ups_en() / r8153c_ups_en()
  ...
This commit is contained in:
Linus Torvalds 2023-10-26 07:41:27 -10:00
commit c17cda15cc
34 changed files with 458 additions and 214 deletions

View File

@ -54,7 +54,7 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
enum proc_cn_mcast_op mc_op;
uintptr_t val;
if (!dsk || !data)
if (!dsk || !dsk->sk_user_data || !data)
return 0;
ptr = (__u32 *)data;

View File

@ -678,7 +678,7 @@ ph_state(struct dchannel *dch)
}
/*
* disable/enable BChannel for desired protocoll
* disable/enable BChannel for desired protocol
*/
static int
hfcsusb_setup_bch(struct bchannel *bch, int protocol)

View File

@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
{
struct adin1110_priv *priv = port_priv->priv;
u32 header_len = ADIN1110_RD_HEADER_LEN;
struct spi_transfer t;
struct spi_transfer t = {0};
u32 frame_size_no_fcs;
struct sk_buff *rxb;
u32 frame_size;

View File

@ -2170,7 +2170,7 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
.of_match_table = of_match_ptr(xgene_enet_of_match),
.of_match_table = xgene_enet_of_match,
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,

View File

@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
if (ret)
return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.

View File

@ -580,7 +580,6 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@ -603,6 +602,7 @@ struct i40e_pf {
* in abilities field of i40e_aq_set_phy_config structure
*/
#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;

View File

@ -2544,7 +2544,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
rx_buffer = i40e_rx_bi(rx_ring, ntp);
i40e_inc_ntp(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
cleaned_count++;
/* Update ntc and bump cleaned count if not in the
* middle of mb packet.
*/
if (rx_ring->next_to_clean == ntp) {
rx_ring->next_to_clean =
rx_ring->next_to_process;
cleaned_count++;
}
continue;
}
@ -2847,7 +2854,7 @@ tx_only:
return budget;
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Exit the polling mode, but don't re-enable interrupts if stack might

View File

@ -437,12 +437,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 next_to_process = rx_ring->next_to_process;
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
struct xdp_buff *first = NULL;
u32 count = rx_ring->count;
struct bpf_prog *xdp_prog;
u32 entries_to_alloc;
bool failure = false;
u16 cleaned_count;
if (next_to_process != next_to_clean)
first = *i40e_rx_bi(rx_ring, next_to_clean);
@ -475,7 +475,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
qword);
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_free(bi);
next_to_process = (next_to_process + 1) & count_mask;
if (++next_to_process == count)
next_to_process = 0;
continue;
}
@ -493,7 +494,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
break;
next_to_process = (next_to_process + 1) & count_mask;
if (++next_to_process == count)
next_to_process = 0;
if (i40e_is_non_eop(rx_ring, rx_desc))
continue;
@ -513,10 +515,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
rx_ring->next_to_clean = next_to_clean;
rx_ring->next_to_process = next_to_process;
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
entries_to_alloc = I40E_DESC_UNUSED(rx_ring);
if (entries_to_alloc >= I40E_RX_BUFFER_WRITE)
failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@ -752,14 +754,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
while (ntc != ntu) {
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
ntc++;
if (ntc >= rx_ring->count)
ntc = 0;
}
}

View File

@ -1437,9 +1437,9 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
if (!list_empty(&adapter->adv_rss_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
@ -4982,8 +4982,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->finish_config, iavf_finish_config);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
@ -4994,6 +4992,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Initialization goes on in the work. Do not add more of it below. */
return 0;
err_ioremap:

View File

@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if (err)
goto err_out_w_lock;
igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
if (err)
goto err_out_input_filter;
spin_unlock(&adapter->nfc_lock);
return 0;
err_out_input_filter:
igb_erase_filter(adapter, input);
err_out_w_lock:
spin_unlock(&adapter->nfc_lock);
err_out:

View File

@ -1817,7 +1817,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
u32 advertising;
u16 advertised = 0;
/* When adapter in resetting mode, autoneg/speed/duplex
* cannot be changed
@ -1842,18 +1842,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
/* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
* We have to check this and convert it to ADVERTISE_2500_FULL
* (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
*/
if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
advertising |= ADVERTISE_2500_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
2500baseT_Full))
advertised |= ADVERTISE_2500_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
1000baseT_Full))
advertised |= ADVERTISE_1000_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
100baseT_Full))
advertised |= ADVERTISE_100_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
100baseT_Half))
advertised |= ADVERTISE_100_HALF;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
10baseT_Full))
advertised |= ADVERTISE_10_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
10baseT_Half))
advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
hw->phy.autoneg_advertised = advertising;
hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {

View File

@ -4364,7 +4364,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
unsigned int entry = dirty_tx % NUM_TX_DESC;
u32 status;
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
if (status & DescOwn)
break;
@ -4394,7 +4394,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* If skb is NULL then we come here again once a tx irq is
* triggered after the last fragment is marked transmitted.
*/
if (tp->cur_tx != dirty_tx && skb)
if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
rtl8169_doorbell(tp);
}
}
@ -4427,7 +4427,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
dma_addr_t addr;
u32 status;
status = le32_to_cpu(desc->opts1);
status = le32_to_cpu(READ_ONCE(desc->opts1));
if (status & DescOwn)
break;

View File

@ -629,14 +629,14 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
}
if (child_ip_tos_mask != old->child_ip_tos_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x",
"Pseudo encap match for TOS mask %#04x conflicts with existing mask %#04x",
child_ip_tos_mask,
old->child_ip_tos_mask);
return -EEXIST;
}
if (child_udp_sport_mask != old->child_udp_sport_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x",
"Pseudo encap match for UDP src port mask %#x conflicts with existing mask %#x",
child_udp_sport_mask,
old->child_udp_sport_mask);
return -EEXIST;
@ -1081,7 +1081,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement ttl twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@ -1106,7 +1106,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement hoplimit twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@ -1120,7 +1120,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
}
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: ttl add action type %x %x %x/%x",
"ttl add action type %x %x %x/%x is not supported",
fa->mangle.htype, fa->mangle.offset,
fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
@ -1164,7 +1164,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 0:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: mask (%#x) of eth.dst32 mangle",
"mask (%#x) of eth.dst32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@ -1184,7 +1184,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->dst_mac_16 = 1;
} else {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
"mask (%#x) of eth+4 mangle is not high or low 16b",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@ -1192,7 +1192,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 8:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: mask (%#x) of eth.src32 mangle",
"mask (%#x) of eth.src32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@ -1201,7 +1201,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->src_mac_32 = 1;
return efx_tc_complete_mac_mangle(efx, act, mung, extack);
default:
NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
NL_SET_ERR_MSG_FMT_MOD(extack, "mangle eth+%u %x/%x is not supported",
fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
}
@ -1217,7 +1217,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
"mask (%#x) out of range, only support mangle action on ipv4.ttl",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@ -1227,7 +1227,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
"only support mangle ttl when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@ -1237,7 +1237,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported: we cannot decrement ttl past 0");
"decrement ttl past 0 is not supported");
return -EOPNOTSUPP;
}
@ -1245,7 +1245,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported: multiple dec ttl");
"multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@ -1259,7 +1259,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: only support mangle on the ttl field (offset is %u)",
"only support mangle on the ttl field (offset is %u)",
fa->mangle.offset);
return -EOPNOTSUPP;
}
@ -1275,7 +1275,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
"mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
fa->mangle.mask);
return -EOPNOTSUPP;
@ -1286,7 +1286,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
"only support hop_limit when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@ -1296,7 +1296,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported: we cannot decrement hop_limit past 0");
"decrementing hop_limit past 0 is not supported");
return -EOPNOTSUPP;
}
@ -1304,7 +1304,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported: multiple dec ttl");
"multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@ -1318,7 +1318,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
"Unsupported: only support mangle on the hop_limit field");
"only support mangle on the hop_limit field");
return -EOPNOTSUPP;
}
default:

View File

@ -1197,6 +1197,17 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
static void stmmac_set_half_duplex(struct stmmac_priv *priv)
{
/* Half-Duplex can only work with single tx queue */
if (priv->plat->tx_queues_to_use > 1)
priv->phylink_config.mac_capabilities &=
~(MAC_10HD | MAC_100HD | MAC_1000HD);
else
priv->phylink_config.mac_capabilities |=
(MAC_10HD | MAC_100HD | MAC_1000HD);
}
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@ -1228,10 +1239,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
MAC_10FD | MAC_100FD |
MAC_1000FD;
/* Half-Duplex can only work with single queue */
if (priv->plat->tx_queues_to_use <= 1)
priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
MAC_1000HD;
stmmac_set_half_duplex(priv);
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
@ -7172,6 +7180,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
stmmac_set_half_duplex(priv);
stmmac_napi_add(dev);
if (netif_running(dev))

View File

@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
key_index = wl->current_key;
if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
/* reques to change default key index */
/* request to change default key index */
pr_debug("%s: request to change default key to %d\n",
__func__, key_index);
wl->current_key = key_index;

View File

@ -872,8 +872,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
if (iph->frag_off & htons(IP_DF) &&
((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));

View File

@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
static void adf7242_debugfs_init(struct adf7242_local *lp)
{
char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
char debugfs_dir_name[DNAME_INLINE_LEN + 1];
strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
"adf7242-%s", dev_name(&lp->spi->dev));
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);

View File

@ -764,7 +764,7 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
RTL8152_INACCESSIBLE = 0,
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
@ -773,6 +773,9 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
RX_EPROTO,
IN_PRE_RESET,
PROBED_WITH_NO_ERRORS,
PROBE_SHOULD_RETRY,
};
#define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e
@ -953,6 +956,8 @@ struct r8152 {
u8 version;
u8 duplex;
u8 autoneg;
unsigned int reg_access_reset_count;
};
/**
@ -1200,6 +1205,96 @@ static unsigned int agg_buf_sz = 16384;
#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc))
/* If register access fails then we block access and issue a reset. If this
* happens too many times in a row without a successful access then we stop
* trying to reset and just leave access blocked.
*/
#define REGISTER_ACCESS_MAX_RESETS 3
static void rtl_set_inaccessible(struct r8152 *tp)
{
set_bit(RTL8152_INACCESSIBLE, &tp->flags);
smp_mb__after_atomic();
}
static void rtl_set_accessible(struct r8152 *tp)
{
clear_bit(RTL8152_INACCESSIBLE, &tp->flags);
smp_mb__after_atomic();
}
static
int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request,
__u8 requesttype, __u16 value, __u16 index, void *data,
__u16 size, const char *msg_tag)
{
struct usb_device *udev = tp->udev;
int ret;
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
ret = usb_control_msg(udev, pipe, request, requesttype,
value, index, data, size,
USB_CTRL_GET_TIMEOUT);
/* No need to issue a reset to report an error if the USB device got
* unplugged; just return immediately.
*/
if (ret == -ENODEV)
return ret;
/* If the write was successful then we're done */
if (ret >= 0) {
tp->reg_access_reset_count = 0;
return ret;
}
dev_err(&udev->dev,
"Failed to %s %d bytes at %#06x/%#06x (%d)\n",
msg_tag, size, value, index, ret);
/* Block all future register access until we reset. Much of the code
* in the driver doesn't check for errors. Notably, many parts of the
* driver do a read/modify/write of a register value without
* confirming that the read succeeded. Writing back modified garbage
* like this can fully wedge the adapter, requiring a power cycle.
*/
rtl_set_inaccessible(tp);
/* If probe hasn't yet finished, then we'll request a retry of the
* whole probe routine if we get any control transfer errors. We
* never have to clear this bit since we free/reallocate the whole "tp"
* structure if we retry probe.
*/
if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) {
set_bit(PROBE_SHOULD_RETRY, &tp->flags);
return ret;
}
/* Failing to access registers in pre-reset is not surprising since we
* wouldn't be resetting if things were behaving normally. The register
* access we do in pre-reset isn't truly mandatory--we're just reusing
* the disable() function and trying to be nice by powering the
* adapter down before resetting it. Thus, if we're in pre-reset,
* we'll return right away and not try to queue up yet another reset.
* We know the post-reset is already coming.
*/
if (test_bit(IN_PRE_RESET, &tp->flags))
return ret;
if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) {
usb_queue_reset_device(tp->intf);
tp->reg_access_reset_count++;
} else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) {
dev_err(&udev->dev,
"Tried to reset %d times; giving up.\n",
REGISTER_ACCESS_MAX_RESETS);
}
return ret;
}
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
@ -1210,9 +1305,10 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, 500);
ret = r8152_control_msg(tp, tp->pipe_ctrl_in,
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, "read");
if (ret < 0)
memset(data, 0xff, size);
else
@ -1233,9 +1329,9 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, tmp, size, 500);
ret = r8152_control_msg(tp, tp->pipe_ctrl_out,
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, tmp, size, "write");
kfree(tmp);
@ -1244,10 +1340,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
static void rtl_set_unplug(struct r8152 *tp)
{
if (tp->udev->state == USB_STATE_NOTATTACHED) {
set_bit(RTL8152_UNPLUG, &tp->flags);
smp_mb__after_atomic();
}
if (tp->udev->state == USB_STATE_NOTATTACHED)
rtl_set_inaccessible(tp);
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@ -1256,7 +1350,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
u16 limit = 64;
int ret = 0;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@ -1300,7 +1394,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 byteen_start, byteen_end, byen;
u16 limit = 512;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@ -1537,7 +1631,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
struct r8152 *tp = netdev_priv(netdev);
int ret;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
if (phy_id != R8152_PHY_ID)
@ -1553,7 +1647,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
{
struct r8152 *tp = netdev_priv(netdev);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (phy_id != R8152_PHY_ID)
@ -1758,7 +1852,7 @@ static void read_bulk_callback(struct urb *urb)
if (!tp)
return;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@ -1850,7 +1944,7 @@ static void write_bulk_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!skb_queue_empty(&tp->tx_queue))
@ -1871,7 +1965,7 @@ static void intr_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
switch (status) {
@ -2615,7 +2709,7 @@ static void bottom_half(struct tasklet_struct *t)
{
struct r8152 *tp = from_tasklet(tp, t, tx_tl);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@ -2658,7 +2752,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
int ret;
/* The rx would be stopped, so skip submitting */
if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) ||
!test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
return 0;
@ -3058,7 +3152,7 @@ static int rtl_enable(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@ -3145,7 +3239,7 @@ static int rtl8153_enable(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@ -3177,7 +3271,7 @@ static void rtl_disable(struct r8152 *tp)
u32 ocp_data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@ -3631,7 +3725,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
}
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@ -3663,6 +3757,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@ -3703,6 +3799,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@ -4046,6 +4144,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
for (i = 0; wait && i < 5000; i++) {
u32 ocp_data;
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
usleep_range(1000, 2000);
ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
if ((ocp_data & PATCH_READY) ^ check)
@ -6002,7 +6103,7 @@ static int rtl8156_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
r8156_fc_parameter(tp);
@ -6060,7 +6161,7 @@ static int rtl8156b_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@ -6246,7 +6347,7 @@ out:
static void rtl8152_up(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8152_aldps_en(tp, false);
@ -6256,7 +6357,7 @@ static void rtl8152_up(struct r8152 *tp)
static void rtl8152_down(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@ -6271,7 +6372,7 @@ static void rtl8153_up(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@ -6311,7 +6412,7 @@ static void rtl8153_down(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@ -6332,7 +6433,7 @@ static void rtl8153b_up(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@ -6356,7 +6457,7 @@ static void rtl8153b_down(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@ -6393,7 +6494,7 @@ static void rtl8153c_up(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@ -6474,7 +6575,7 @@ static void rtl8156_up(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@ -6547,7 +6648,7 @@ static void rtl8156_down(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@ -6685,7 +6786,7 @@ static void rtl_work_func_t(struct work_struct *work)
/* If the device is unplugged or !netif_running(), the workqueue
* doesn't need to wake the device, and could return directly.
*/
if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@ -6724,7 +6825,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@ -6851,7 +6952,7 @@ static int rtl8152_close(struct net_device *netdev)
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
rtl_stop_rx(tp);
} else {
@ -6884,7 +6985,7 @@ static void r8152b_init(struct r8152 *tp)
u32 ocp_data;
u16 data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
data = r8152_mdio_read(tp, MII_BMCR);
@ -6928,7 +7029,7 @@ static void r8153_init(struct r8152 *tp)
u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@ -6939,7 +7040,7 @@ static void r8153_init(struct r8152 *tp)
break;
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@ -7068,7 +7169,7 @@ static void r8153b_init(struct r8152 *tp)
u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@ -7079,7 +7180,7 @@ static void r8153b_init(struct r8152 *tp)
break;
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@ -7150,7 +7251,7 @@ static void r8153c_init(struct r8152 *tp)
u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@ -7170,7 +7271,7 @@ static void r8153c_init(struct r8152 *tp)
break;
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@ -7999,7 +8100,7 @@ static void r8156_init(struct r8152 *tp)
u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@ -8020,7 +8121,7 @@ static void r8156_init(struct r8152 *tp)
break;
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@ -8095,7 +8196,7 @@ static void r8156b_init(struct r8152 *tp)
u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@ -8129,7 +8230,7 @@ static void r8156b_init(struct r8152 *tp)
break;
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@ -8255,7 +8356,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
if (!tp)
if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
netdev = tp->netdev;
@ -8270,7 +8371,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
set_bit(IN_PRE_RESET, &tp->flags);
tp->rtl_ops.disable(tp);
clear_bit(IN_PRE_RESET, &tp->flags);
mutex_unlock(&tp->control);
}
@ -8283,9 +8386,11 @@ static int rtl8152_post_reset(struct usb_interface *intf)
struct net_device *netdev;
struct sockaddr sa;
if (!tp)
if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
rtl_set_accessible(tp);
/* reset the MAC address in case of policy change */
if (determine_ethernet_addr(tp, &sa) >= 0) {
rtnl_lock();
@ -9158,7 +9263,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *data = if_mii(rq);
int res;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
res = usb_autopm_get_interface(tp->intf);
@ -9260,7 +9365,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
static void rtl8152_unload(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (tp->version != RTL_VER_01)
@ -9269,7 +9374,7 @@ static void rtl8152_unload(struct r8152 *tp)
static void rtl8153_unload(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_power_cut_en(tp, false);
@ -9277,7 +9382,7 @@ static void rtl8153_unload(struct r8152 *tp)
static void rtl8153b_unload(struct r8152 *tp)
{
if (test_bit(RTL8152_UNPLUG, &tp->flags))
if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_power_cut_en(tp, false);
@ -9487,16 +9592,29 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev)
__le32 *tmp;
u8 version;
int ret;
int i;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return 0;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
if (ret > 0)
ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
/* Retry up to 3 times in case there is a transitory error. We do this
* since retrying a read of the version is always safe and this
* function doesn't take advantage of r8152_control_msg().
*/
for (i = 0; i < 3; i++) {
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
USB_CTRL_GET_TIMEOUT);
if (ret > 0) {
ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
break;
}
}
if (i != 0 && ret > 0)
dev_warn(&udev->dev, "Needed %d retries to read version\n", i);
kfree(tmp);
@ -9595,25 +9713,14 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
return 0;
}
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
static int rtl8152_probe_once(struct usb_interface *intf,
const struct usb_device_id *id, u8 version)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
u8 version;
int ret;
if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
return -ENODEV;
if (!rtl_check_vendor_ok(intf))
return -ENODEV;
version = rtl8152_get_version(intf);
if (version == RTL_VER_UNKNOWN)
return -ENODEV;
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@ -9776,18 +9883,68 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
/* If we saw a control transfer error while probing then we may
* want to try probe() again. Consider this an error.
*/
if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
goto out2;
set_bit(PROBED_WITH_NO_ERRORS, &tp->flags);
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
out2:
unregister_netdev(netdev);
out1:
tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
if (tp->rtl_ops.unload)
tp->rtl_ops.unload(tp);
rtl8152_release_firmware(tp);
usb_set_intfdata(intf, NULL);
out:
if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
ret = -EAGAIN;
free_netdev(netdev);
return ret;
}
#define RTL8152_PROBE_TRIES 3
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
u8 version;
int ret;
int i;
if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
return -ENODEV;
if (!rtl_check_vendor_ok(intf))
return -ENODEV;
version = rtl8152_get_version(intf);
if (version == RTL_VER_UNKNOWN)
return -ENODEV;
for (i = 0; i < RTL8152_PROBE_TRIES; i++) {
ret = rtl8152_probe_once(intf, id, version);
if (ret != -EAGAIN)
break;
}
if (ret == -EAGAIN) {
dev_err(&intf->dev,
"r8152 failed probe after %d tries; giving up\n", i);
return -ENODEV;
}
return ret;
}
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);

View File

@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
if (ret < 0) {
if (ret < 4) {
ret = ret < 0 ? ret : -ENODATA;
if (ret != -ENODEV)
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);

View File

@ -4355,6 +4355,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
}
/**
* ieee80211_is_protected_dual_of_public_action - check if skb contains a
* protected dual of public action management frame
* @skb: the skb containing the frame, length will be checked
*
* Return: true if the skb contains a protected dual of public action
* management frame, false otherwise.
*/
static inline bool
ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
{
u8 action;
if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
return false;
action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
action != WLAN_PUB_ACTION_MSMT_PILOT &&
action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
action != WLAN_PUB_ACTION_FTM_REQUEST &&
action != WLAN_PUB_ACTION_FTM_RESPONSE &&
action != WLAN_PUB_ACTION_FILS_DISCOVERY;
}
/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
* privacy action frame

View File

@ -53,6 +53,7 @@ struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
bool (*gc)(const struct flow_offload *flow);
int (*setup)(struct nf_flowtable *ft,
struct net_device *dev,
enum flow_block_command cmd);

View File

@ -33,6 +33,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
#define GTPA_MAX (__GTPA_MAX + 1)
#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */

View File

@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
int max_clean = atomic_read(&tbl->gc_entries) -
READ_ONCE(tbl->gc_thresh2);
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
}
}
tbl->last_flush = jiffies;
WRITE_ONCE(tbl->last_flush, jiffies);
write_unlock_bh(&tbl->lock);
@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
int entries;
int entries, gc_thresh3;
if (exempt_from_gc)
goto do_alloc;
entries = atomic_inc_return(&tbl->gc_entries) - 1;
if (entries >= tbl->gc_thresh3 ||
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
if (!neigh_forced_gc(tbl) &&
entries >= tbl->gc_thresh3) {
gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
if (entries >= gc_thresh3 ||
(entries >= READ_ONCE(tbl->gc_thresh2) &&
time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
tbl->last_rand = jiffies;
WRITE_ONCE(tbl->last_rand, jiffies);
list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
NDTA_PAD) ||
nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
goto nla_put_failure;
{
unsigned long now = jiffies;
long flush_delta = now - tbl->last_flush;
long rand_delta = now - tbl->last_rand;
long flush_delta = now - READ_ONCE(tbl->last_flush);
long rand_delta = now - READ_ONCE(tbl->last_rand);
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
.ndtc_proxy_qlen = tbl->proxy_queue.qlen,
.ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
};
rcu_read_lock();
@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
ndst.ndts_allocs += st->allocs;
ndst.ndts_destroys += st->destroys;
ndst.ndts_hash_grows += st->hash_grows;
ndst.ndts_res_failed += st->res_failed;
ndst.ndts_lookups += st->lookups;
ndst.ndts_hits += st->hits;
ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
ndst.ndts_forced_gc_runs += st->forced_gc_runs;
ndst.ndts_table_fulls += st->table_fulls;
ndst.ndts_allocs += READ_ONCE(st->allocs);
ndst.ndts_destroys += READ_ONCE(st->destroys);
ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
ndst.ndts_res_failed += READ_ONCE(st->res_failed);
ndst.ndts_lookups += READ_ONCE(st->lookups);
ndst.ndts_hits += READ_ONCE(st->hits);
ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
}
if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_tbl_lock;
if (tb[NDTA_THRESH1])
tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
if (tb[NDTA_THRESH2])
tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
if (tb[NDTA_THRESH3])
tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
if (tb[NDTA_GC_INTERVAL])
tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
err = 0;

View File

@ -87,29 +87,6 @@ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
}
EXPORT_SYMBOL(handshake_genl_put);
/*
* dup() a kernel socket for use as a user space file descriptor
* in the current process. The kernel socket must have an
* instatiated struct file.
*
* Implicit argument: "current()"
*/
static int handshake_dup(struct socket *sock)
{
struct file *file;
int newfd;
file = get_file(sock->file);
newfd = get_unused_fd_flags(O_CLOEXEC);
if (newfd < 0) {
fput(file);
return newfd;
}
fd_install(newfd, file);
return newfd;
}
int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
@ -133,17 +110,20 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
goto out_status;
sock = req->hr_sk->sk_socket;
fd = handshake_dup(sock);
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
err = fd;
goto out_complete;
}
err = req->hr_proto->hp_accept(req, info, fd);
if (err) {
fput(sock->file);
put_unused_fd(fd);
goto out_complete;
}
fd_install(fd, get_file(sock->file));
trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
return 0;

View File

@ -786,7 +786,7 @@ int esp_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
* advertize the change to the keying daemon.
* advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/

View File

@ -927,10 +927,11 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
return mss_now;
}
/* In some cases, both sendmsg() could have added an skb to the write queue,
* but failed adding payload on it. We need to remove it to consume less
/* In some cases, sendmsg() could have added an skb to the write queue,
* but failed adding payload on it. We need to remove it to consume less
* memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
* epoll() users.
* epoll() users. Another reason is that tcp_write_xmit() does not like
* finding an empty skb in the write queue.
*/
void tcp_remove_empty_skb(struct sock *sk)
{
@ -1289,6 +1290,7 @@ new_segment:
wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
tcp_remove_empty_skb(sk);
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);

View File

@ -2207,16 +2207,17 @@ void tcp_enter_loss(struct sock *sk)
* restore sanity to the SACK scoreboard. If the apparent reneging
* persists until this RTO then we'll clear the SACK scoreboard.
*/
static bool tcp_check_sack_reneging(struct sock *sk, int flag)
static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
{
if (flag & FLAG_SACK_RENEGING &&
flag & FLAG_SND_UNA_ADVANCED) {
if (*ack_flag & FLAG_SACK_RENEGING &&
*ack_flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
*ack_flag &= ~FLAG_SET_XMIT_TIMER;
return true;
}
return false;
@ -2986,7 +2987,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
if (tcp_check_sack_reneging(sk, flag))
if (tcp_check_sack_reneging(sk, ack_flag))
return;
/* C. Check consistency of the current state. */

View File

@ -833,7 +833,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
* advertize the change to the keying daemon.
* advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/

View File

@ -2468,8 +2468,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
/* drop unicast public action frames when using MPF */
if (is_unicast_ether_addr(mgmt->da) &&
ieee80211_is_public_action((void *)rx->skb->data,
rx->skb->len))
ieee80211_is_protected_dual_of_public_action(rx->skb))
return -EACCES;
}

View File

@ -316,12 +316,6 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
static bool nf_flow_is_outdated(const struct flow_offload *flow)
{
return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
!test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
}
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
@ -407,12 +401,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
const struct flow_offload *flow)
{
return flow_table->type->gc && flow_table->type->gc(flow);
}
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
nf_flow_is_outdated(flow))
nf_flow_custom_gc(flow_table, flow))
flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {

View File

@ -278,7 +278,16 @@ err_nat:
return err;
}
static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
{
return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
!test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
!test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
}
static struct nf_flowtable_type flowtable_ct = {
.gc = tcf_ct_flow_is_outdated,
.action = tcf_ct_flow_table_fill_actions,
.owner = THIS_MODULE,
};

View File

@ -555,6 +555,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
virtio_device_ready(vdev);
return 0;
}
static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
{
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
mutex_unlock(&vsock->tx_lock);
@ -569,7 +574,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
return 0;
/* virtio_transport_send_pkt() can queue packets once
* the_virtio_vsock is set, but they won't be processed until
* vsock->tx_run is set to true. We queue vsock->send_pkt_work
* when initialization finishes to send those packets queued
* earlier.
* We don't need to queue the other workers (rx, event) because
* as long as we don't fill the queues with empty buffers, the
* host can't send us any notification.
*/
queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
}
static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
@ -664,6 +678,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
virtio_vsock_vqs_start(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@ -736,6 +751,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
virtio_vsock_vqs_start(vsock);
out:
mutex_unlock(&the_virtio_vsock_mutex);

View File

@ -43,10 +43,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
cr.links[link_id].status = data->links[link_id].status;
cr.links[link_id].bss = data->links[link_id].bss;
WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
(!cr.ap_mld_addr || !cr.links[link_id].bss));
cr.links[link_id].bss = data->links[link_id].bss;
if (!cr.links[link_id].bss)
continue;
cr.links[link_id].bssid = data->links[link_id].bss->bssid;

View File

@ -2125,7 +2125,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!res)
goto drop;
rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
if (data->bss_source == BSS_SOURCE_MBSSID) {
/* this is a nontransmitting bss, we need to add it to