No contributions from subtrees.

Current release - new code bugs:
 
   - eth: mlx5: HWS, don't destroy more bwc queue locks than allocated
 
 Previous releases - regressions:
 
   - ipv4: give an IPv4 dev to blackhole_netdev
 
   - udp: compute L4 checksum as usual when not segmenting the skb
 
   - tcp/dccp: don't use timer_pending() in reqsk_queue_unlink().
 
   - eth: mlx5e: don't call cleanup on profile rollback failure
 
   - eth: microchip: vcap api: fix memory leaks in vcap_api_encode_rule_test()
 
   - eth: enetc: disable Tx BD rings after they are empty
 
   - eth: macb: avoid 20s boot delay by skipping MDIO bus registration for fixed-link PHY
 
 Previous releases - always broken:
 
   - posix-clock: fix missing timespec64 check in pc_clock_settime()
 
   - genetlink: hold RCU in genlmsg_mcast()
 
   - mptcp: prevent MPC handshake on port-based signal endpoints
 
   - eth: vmxnet3: fix packet corruption in vmxnet3_xdp_xmit_frame
 
   - eth: stmmac: dwmac-tegra: fix link bring-up sequence
 
   - eth: bcmasp: fix potential memory leak in bcmasp_xmit()
 
 Misc:
 
   - add Andrew Lunn as a co-maintainer of all networking drivers
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmcRDoMSHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOklXoP/2VKcUDYMfP03vB6a60riiqMcD+GVZzm
 lLaa9xBiDdEsnL+4dcQLF7tecYbqpIqh+0ZeEe0aXgp0HjIm2Im1eiXv5cB7INxK
 n1lxVibI/zD2j2M9cy2NDTeeYSI29GH98g0IkLrU9vnfsrp5jRuXFttrCmotzesZ
 tYb200cVMR/nt9rrG3aNAxTUHjaykgpKh/zSvFC0MRStXFfUbIia88LzcQOQzEK3
 jcWMj8jsJHkDLhUHS8LVZEV1JDYIb+QeEjGz5LxMpQV5/T5sP7Ki4ISPxpUbYNrZ
 QWwSrFSg2ivaq8PkQ2LBTKAtiHyGlcEJtnlSf7Y50cpc9RNphClq8YMSBUCcGTEi
 3W18eVIZ0aj1omTeHEQLbMkqT0soTwYxskDW0uCCFKf/SRCQm1ixrpQiA6PGP266
 e0q7mMD7v4S9ZdO5VF+oYzf1fF0OhaOkZtUEsWjxHite6ujh8719EPbUoee4cqPL
 ofoHYF338BlYl4YIZERZMff8HJD4+PR0R8c9SIBXQcGUMvf2mQdvaD9q2MGySSJd
 4mlH6bE8Ckj4KVp9llxB8zyw/5OmJdMZ+ar4o7+CX2Z3Wrs1SMSTy7qAADcV2B2G
 S9kvwNDffak+N94N/MqswQM4se5yo8dmyUnhqChBFYPFWc4N7v6wwKelROigqLL7
 7Xhj2TXRBGKs
 =ukTi
 -----END PGP SIGNATURE-----

Merge tag 'net-6.12-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Current release - new code bugs:

   - eth: mlx5: HWS, don't destroy more bwc queue locks than allocated

  Previous releases - regressions:

   - ipv4: give an IPv4 dev to blackhole_netdev

   - udp: compute L4 checksum as usual when not segmenting the skb

   - tcp/dccp: don't use timer_pending() in reqsk_queue_unlink().

   - eth: mlx5e: don't call cleanup on profile rollback failure

   - eth: microchip: vcap api: fix memory leaks in
     vcap_api_encode_rule_test()

   - eth: enetc: disable Tx BD rings after they are empty

   - eth: macb: avoid 20s boot delay by skipping MDIO bus registration
     for fixed-link PHY

  Previous releases - always broken:

   - posix-clock: fix missing timespec64 check in pc_clock_settime()

   - genetlink: hold RCU in genlmsg_mcast()

   - mptcp: prevent MPC handshake on port-based signal endpoints

   - eth: vmxnet3: fix packet corruption in vmxnet3_xdp_xmit_frame

   - eth: stmmac: dwmac-tegra: fix link bring-up sequence

   - eth: bcmasp: fix potential memory leak in bcmasp_xmit()

  Misc:

   - add Andrew Lunn as a co-maintainer of all networking drivers"

* tag 'net-6.12-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (47 commits)
  net/mlx5e: Don't call cleanup on profile rollback failure
  net/mlx5: Unregister notifier on eswitch init failure
  net/mlx5: Fix command bitmask initialization
  net/mlx5: Check for invalid vector index on EQ creation
  net/mlx5: HWS, use lock classes for bwc locks
  net/mlx5: HWS, don't destroy more bwc queue locks than allocated
  net/mlx5: HWS, fixed double free in error flow of definer layout
  net/mlx5: HWS, removed wrong access to a number of rules variable
  mptcp: pm: fix UaF read in mptcp_pm_nl_rm_addr_or_subflow
  net: ethernet: mtk_eth_soc: fix memory corruption during fq dma init
  vmxnet3: Fix packet corruption in vmxnet3_xdp_xmit_frame
  net: dsa: vsc73xx: fix reception from VLAN-unaware bridges
  net: ravb: Only advertise Rx/Tx timestamps if hardware supports it
  net: microchip: vcap api: Fix memory leaks in vcap_api_encode_rule_test()
  net: phy: mdio-bcm-unimac: Add BCM6846 support
  dt-bindings: net: brcm,unimac-mdio: Add bcm6846-mdio
  udp: Compute L4 checksum as usual when not segmenting the skb
  genetlink: hold RCU in genlmsg_mcast()
  net: dsa: mv88e6xxx: Fix the max_vid definition for the MV88E6361
  tcp/dccp: Don't use timer_pending() in reqsk_queue_unlink().
  ...
This commit is contained in:
Linus Torvalds 2024-10-17 09:31:18 -07:00
commit 07d6bf634b
53 changed files with 354 additions and 182 deletions

View File

@ -26,6 +26,7 @@ properties:
- brcm,asp-v2.1-mdio
- brcm,asp-v2.2-mdio
- brcm,unimac-mdio
- brcm,bcm6846-mdio
reg:
minItems: 1

View File

@ -16086,6 +16086,7 @@ F: include/uapi/linux/net_dropmon.h
F: net/core/drop_monitor.c
NETWORKING DRIVERS
M: Andrew Lunn <andrew+netdev@lunn.ch>
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
M: Jakub Kicinski <kuba@kernel.org>

View File

@ -6347,7 +6347,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
.num_internal_phys = 5,
.internal_phys_offset = 3,
.max_vid = 4095,
.max_vid = 8191,
.max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,

View File

@ -851,7 +851,6 @@ static int vsc73xx_setup(struct dsa_switch *ds)
dev_info(vsc->dev, "set up the switch\n");
ds->untag_bridge_pvid = true;
ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
ds->fdb_isolation = true;

View File

@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
goto out;
goto len_error;
}
/* Save skb pointer. */
@ -575,6 +575,7 @@ frag_map_error:
map_error:
if (net_ratelimit())
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
len_error:
dev_kfree_skb(skb);
out:
return err;

View File

@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Rewind so we do not have a hole */
spb_index = intf->tx_spb_index;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}

View File

@ -1359,6 +1359,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
dev_kfree_skb_any(skb);
goto out;
}

View File

@ -930,9 +930,6 @@ static int macb_mdiobus_register(struct macb *bp)
return ret;
}
if (of_phy_is_fixed_link(np))
return mdiobus_register(bp->mii_bus);
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
@ -953,8 +950,19 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
int err = -ENXIO;
/* With fixed-link, we don't need to register the MDIO bus,
* except if we have a child named "mdio" in the device tree.
* In that case, some devices may be attached to the MACB's MDIO bus.
*/
child = of_get_child_by_name(np, "mdio");
if (child)
of_node_put(child);
else if (of_phy_is_fixed_link(np))
return macb_mii_probe(bp->dev);
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));

View File

@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
(enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@ -1377,6 +1378,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
int xdp_tx_bd_cnt, i, k;
int xdp_tx_frm_cnt = 0;
if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
return -ENETDOWN;
enetc_lock_mdio();
tx_ring = priv->xdp_tx_ring[smp_processor_id()];
@ -1521,7 +1525,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
&rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
rx_ring->stats.xdp_drops++;
}
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
@ -1586,6 +1589,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
fallthrough;
case XDP_DROP:
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
rxbd = orig_rxbd;
@ -1602,6 +1606,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
break;
}
xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
rx_ring,
orig_i, i);
@ -2223,16 +2233,22 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_enable_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
@ -2251,16 +2267,22 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
}
static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_disable_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
@ -2460,9 +2482,13 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
enetc_enable_bdrs(priv);
enetc_enable_tx_bdrs(priv);
enetc_enable_rx_bdrs(priv);
netif_tx_start_all_queues(ndev);
clear_bit(ENETC_TX_DOWN, &priv->flags);
}
EXPORT_SYMBOL_GPL(enetc_start);
@ -2520,9 +2546,15 @@ void enetc_stop(struct net_device *ndev)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i;
set_bit(ENETC_TX_DOWN, &priv->flags);
netif_tx_stop_all_queues(ndev);
enetc_disable_bdrs(priv);
enetc_disable_rx_bdrs(priv);
enetc_wait_bdrs(priv);
enetc_disable_tx_bdrs(priv);
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
@ -2533,8 +2565,6 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
enetc_wait_bdrs(priv);
enetc_clear_interrupts(priv);
}
EXPORT_SYMBOL_GPL(enetc_stop);

View File

@ -325,6 +325,7 @@ enum enetc_active_offloads {
enum enetc_flags_bit {
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
ENETC_TX_DOWN,
};
/* interrupt coalescing modes */

View File

@ -2411,7 +2411,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
if (!(cfg & BIT_ULL(12)))
continue;
bmap |= (1 << i);
bmap |= BIT_ULL(i);
cfg &= ~BIT_ULL(12);
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
@ -2432,7 +2432,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
if (!(bmap & (1 << i)))
if (!(bmap & BIT_ULL(i)))
continue;
cfg = rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));

View File

@ -1171,7 +1171,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
for (i = 0; i < cnt; i++) {
for (i = 0; i < len; i++) {
struct mtk_tx_dma_v2 *txd;
txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;

View File

@ -1765,6 +1765,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
@ -1776,7 +1780,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
/* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev);
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
if (!vector)
goto no_trig;
@ -2361,7 +2365,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
cmd->state = MLX5_CMDIF_STATE_DOWN;
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
cmd->vars.bitmask = MLX5_CMD_MASK;
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);

View File

@ -6509,7 +6509,9 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
priv->profile->cleanup(priv);
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);

View File

@ -1061,6 +1061,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
struct mlx5_eq_comp *eq;
int ret = 0;
if (vecidx >= table->max_comp_eqs) {
mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
vecidx, table->max_comp_eqs);
return -EINVAL;
}
mutex_lock(&table->comp_lock);
eq = xa_load(&table->comp_eqs, vecidx);
if (eq) {

View File

@ -1489,7 +1489,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
}
if (err)
goto abort;
goto err_esw_enable;
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
@ -1503,7 +1503,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
return 0;
abort:
err_esw_enable:
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
mlx5_esw_acls_ns_cleanup(esw);
return err;
}

View File

@ -691,7 +691,6 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
static int
hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
{
u32 num_of_rules;
int ret;
/* If the current matcher size is already at its max size, we can't
@ -705,8 +704,7 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* Need to check again if we really need rehash.
* If the reason for rehash was size, but not any more - skip rehash.
*/
num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED);
if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
return 0;
/* Now we're done all the checking - do the rehash:

View File

@ -46,6 +46,7 @@ struct mlx5hws_context {
struct mlx5hws_send_engine *send_queue;
size_t queues;
struct mutex *bwc_send_queue_locks; /* protect BWC queues */
struct lock_class_key *bwc_lock_class_keys;
struct list_head tbl_list;
struct mlx5hws_context_debug_info debug_info;
struct xarray peer_ctx_xa;

View File

@ -1925,7 +1925,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
if (ret) {
mlx5hws_err(ctx, "Failed to convert items to header layout\n");
goto free_fc;
goto free_match_hl;
}
/* Find the match definer layout for header layout match union */
@ -1946,7 +1946,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
free_fc:
kfree(mt->fc);
free_match_hl:
kfree(match_hl);
return ret;
}

View File

@ -941,14 +941,18 @@ static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
{
int bwc_queues = ctx->queues - 1;
int bwc_queues = mlx5hws_bwc_queues(ctx);
int i;
if (!mlx5hws_context_bwc_supported(ctx))
return;
for (i = 0; i < bwc_queues; i++)
for (i = 0; i < bwc_queues; i++) {
mutex_destroy(&ctx->bwc_send_queue_locks[i]);
lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
}
kfree(ctx->bwc_lock_class_keys);
kfree(ctx->bwc_send_queue_locks);
}
@ -977,10 +981,22 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
if (!ctx->bwc_send_queue_locks)
return -ENOMEM;
for (i = 0; i < bwc_queues; i++)
ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
sizeof(*ctx->bwc_lock_class_keys),
GFP_KERNEL);
if (!ctx->bwc_lock_class_keys)
goto err_lock_class_keys;
for (i = 0; i < bwc_queues; i++) {
mutex_init(&ctx->bwc_send_queue_locks[i]);
lockdep_register_key(ctx->bwc_lock_class_keys + i);
}
return 0;
err_lock_class_keys:
kfree(ctx->bwc_send_queue_locks);
return -ENOMEM;
}
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,

View File

@ -401,28 +401,21 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
u32 nano_seconds = 0;
u32 seconds = 0;
if (ts) {
if (ts->tv_sec > 0xFFFFFFFFLL ||
ts->tv_sec < 0) {
netif_warn(adapter, drv, adapter->netdev,
"ts->tv_sec out of range, %lld\n",
ts->tv_sec);
return -ERANGE;
}
if (ts->tv_nsec >= 1000000000L ||
ts->tv_nsec < 0) {
netif_warn(adapter, drv, adapter->netdev,
"ts->tv_nsec out of range, %ld\n",
ts->tv_nsec);
return -ERANGE;
}
seconds = ts->tv_sec;
nano_seconds = ts->tv_nsec;
lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
} else {
netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
return -EINVAL;
if (ts->tv_sec > 0xFFFFFFFFLL) {
netif_warn(adapter, drv, adapter->netdev,
"ts->tv_sec out of range, %lld\n",
ts->tv_sec);
return -ERANGE;
}
if (ts->tv_nsec < 0) {
netif_warn(adapter, drv, adapter->netdev,
"ts->tv_nsec out of range, %ld\n",
ts->tv_nsec);
return -ERANGE;
}
seconds = ts->tv_sec;
nano_seconds = ts->tv_nsec;
lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
return 0;
}

View File

@ -31,10 +31,10 @@ static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
/* Add port to mirror (only front ports) */
static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
{
u32 val, reg = portno;
u64 reg = portno;
u32 val;
reg = portno / BITS_PER_BYTE;
val = BIT(portno % BITS_PER_BYTE);
val = BIT(do_div(reg, 32));
if (reg == 0)
return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
@ -45,10 +45,10 @@ static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
/* Delete port from mirror (only front ports) */
static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno)
{
u32 val, reg = portno;
u64 reg = portno;
u32 val;
reg = portno / BITS_PER_BYTE;
val = BIT(portno % BITS_PER_BYTE);
val = BIT(do_div(reg, 32));
if (reg == 0)
return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));

View File

@ -1444,6 +1444,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
KUNIT_EXPECT_EQ(test, 0, ret);
vcap_free_rule(rule);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)

View File

@ -1750,20 +1750,19 @@ static int ravb_get_ts_info(struct net_device *ndev,
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_ALL);
if (hw_info->gptp || hw_info->ccc_gac)
if (hw_info->gptp || hw_info->ccc_gac) {
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_ALL);
info->phc_index = ptp_clock_index(priv->ptp.clock);
else
info->phc_index = 0;
}
return 0;
}

View File

@ -1057,6 +1057,7 @@ static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb->len >= TX_DS) {
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
dev_kfree_skb_any(skb);
goto out;
}

View File

@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
usleep_range(10, 20); /* 500ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
return err;
}
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
msleep(30); /* 30ms delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
500, 500 * 2000);

View File

@ -1051,6 +1051,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@ -1071,6 +1072,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++;
axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
true, NULL, 0);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);

View File

@ -154,19 +154,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa;
}
static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
{
struct macsec_rx_sa *sa = NULL;
int an;
for (an = 0; an < MACSEC_NUM_AN; an++) {
sa = macsec_rxsa_get(rx_sc->sa[an]);
if (sa)
break;
}
return sa;
}
static void free_rx_sc_rcu(struct rcu_head *head)
{
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@ -1208,15 +1195,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard
*/
struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
DEV_STATS_INC(secy->netdev, rx_errors);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
}
@ -1226,8 +1210,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver;
}

View File

@ -337,6 +337,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
{ .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,bcm6846-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },

View File

@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
nsim_dev = nsim_trap_data->nsim_dev;
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
queue_delayed_work(system_unbound_wq,
&nsim_dev->trap_data->trap_report_dw, 1);
return;
}
@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
continue;
nsim_dev_trap_report(nsim_dev_port);
cond_resched();
}
devl_unlock(priv_to_devlink(nsim_dev));
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
queue_delayed_work(system_unbound_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
}
static int nsim_dev_traps_init(struct devlink *devlink)
@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink)
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
queue_delayed_work(system_unbound_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
return 0;

View File

@ -1870,6 +1870,7 @@ out1:
* may trigger an error resubmitting itself and, worse,
* schedule a timer. So we kill it all just in case.
*/
usbnet_mark_going_away(dev);
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
free_netdev(net);

View File

@ -148,7 +148,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
tbi->dma_addr = page_pool_get_dma_addr(page) +
VMXNET3_XDP_HEADROOM;
(xdpf->data - (void *)xdpf);
dma_sync_single_for_device(&adapter->pdev->dev,
tbi->dma_addr, buf_size,
DMA_TO_DEVICE);

View File

@ -2130,7 +2130,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
}
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
TCMU_MCGRP_CONFIG);
/* Wait during an add as the listener may not be up yet */
if (ret == 0 ||

View File

@ -59,7 +59,8 @@ static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
int devad, int regnum, u16 value)
{ return -EINVAL; }
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
#endif

View File

@ -531,13 +531,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
* @flags: allocation flags
*
* This function must hold the RTNL or rcu_read_lock().
*/
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags);
unsigned int group);
/**
* genlmsg_unicast - unicast a netlink message

View File

@ -318,6 +318,9 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
goto out;
}
if (!timespec64_valid_strict(ts))
return -EINVAL;
if (cd.clk->ops.clock_settime)
err = cd.clk->ops.clock_settime(cd.clk, ts);
else

View File

@ -298,17 +298,19 @@ static struct in_device *inetdev_init(struct net_device *dev)
/* Account for reference dev->ip_ptr (below) */
refcount_set(&in_dev->refcnt, 1);
err = devinet_sysctl_register(in_dev);
if (err) {
in_dev->dead = 1;
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
in_dev_put(in_dev);
in_dev = NULL;
goto out;
if (dev != blackhole_netdev) {
err = devinet_sysctl_register(in_dev);
if (err) {
in_dev->dead = 1;
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
in_dev_put(in_dev);
in_dev = NULL;
goto out;
}
ip_mc_init_dev(in_dev);
if (dev->flags & IFF_UP)
ip_mc_up(in_dev);
}
ip_mc_init_dev(in_dev);
if (dev->flags & IFF_UP)
ip_mc_up(in_dev);
/* we can receive as soon as ip_ptr is set -- do this last */
rcu_assign_pointer(dev->ip_ptr, in_dev);
@ -347,6 +349,19 @@ static void inetdev_destroy(struct in_device *in_dev)
in_dev_put(in_dev);
}
static int __init inet_blackhole_dev_init(void)
{
int err = 0;
rtnl_lock();
if (!inetdev_init(blackhole_netdev))
err = -ENOMEM;
rtnl_unlock();
return err;
}
late_initcall(inet_blackhole_dev_init);
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
{
const struct in_ifaddr *ifa;

View File

@ -1045,21 +1045,31 @@ static bool reqsk_queue_unlink(struct request_sock *req)
found = __sk_nulls_del_node_init_rcu(sk);
spin_unlock(lock);
}
if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
}
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
struct request_sock *req,
bool from_timer)
{
bool unlinked = reqsk_queue_unlink(req);
if (!from_timer && timer_delete_sync(&req->rsk_timer))
reqsk_put(req);
if (unlinked) {
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
reqsk_put(req);
}
return unlinked;
}
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{
return __inet_csk_reqsk_queue_drop(sk, req, false);
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
@ -1152,7 +1162,7 @@ static void reqsk_timer_handler(struct timer_list *t)
if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
/* delete timer */
inet_csk_reqsk_queue_drop(sk_listener, nreq);
__inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
goto no_ownership;
}
@ -1178,7 +1188,8 @@ no_ownership:
}
drop:
inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
__inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
reqsk_put(req);
}
static bool reqsk_queue_hash_req(struct request_sock *req,

View File

@ -951,8 +951,10 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
cork->gso_size);
/* Don't checksum the payload, skb will get segmented */
goto csum_partial;
}
goto csum_partial;
}
if (is_udplite) /* UDP-Lite */

View File

@ -1266,8 +1266,10 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
cork->gso_size);
/* Don't checksum the payload, skb will get segmented */
goto csum_partial;
}
goto csum_partial;
}
if (is_udplite)

View File

@ -116,7 +116,7 @@ static int l2tp_tunnel_notify(struct genl_family *family,
NLM_F_ACK, tunnel, cmd);
if (ret >= 0) {
ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
ret = genlmsg_multicast_allns(family, msg, 0, 0);
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
@ -144,7 +144,7 @@ static int l2tp_session_notify(struct genl_family *family,
NLM_F_ACK, session, cmd);
if (ret >= 0) {
ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
ret = genlmsg_multicast_allns(family, msg, 0, 0);
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;

View File

@ -17,6 +17,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
SNMP_MIB_ITEM("MPCapableSYNTXDrop", MPTCP_MIB_MPCAPABLEACTIVEDROP),
SNMP_MIB_ITEM("MPCapableSYNTXDisabled", MPTCP_MIB_MPCAPABLEACTIVEDISABLED),
SNMP_MIB_ITEM("MPCapableEndpAttempt", MPTCP_MIB_MPCAPABLEENDPATTEMPT),
SNMP_MIB_ITEM("MPFallbackTokenInit", MPTCP_MIB_TOKENFALLBACKINIT),
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),

View File

@ -12,6 +12,7 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
MPTCP_MIB_MPCAPABLEACTIVEDROP, /* Client-side fallback due to a MPC drop */
MPTCP_MIB_MPCAPABLEACTIVEDISABLED, /* Client-side disabled due to past issues */
MPTCP_MIB_MPCAPABLEENDPATTEMPT, /* Prohibited MPC to port-based endp */
MPTCP_MIB_TOKENFALLBACKINIT, /* Could not init/allocate token */
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */

View File

@ -873,12 +873,12 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
i, rm_id, id, remote_id, msk->mpc_endpoint_id);
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
removed |= subflow->request_join;
/* the following takes care of updating the subflows counter */
mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock);
removed |= subflow->request_join;
if (rm_type == MPTCP_MIB_RMSUBFLOW)
__MPTCP_INC_STATS(sock_net(sk), rm_type);
}
@ -1121,6 +1121,7 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
*/
inet_sk_state_store(newsk, TCP_LISTEN);
lock_sock(ssk);
WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true);
err = __inet_listen_sk(ssk, backlog);
if (!err)
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);

View File

@ -535,6 +535,7 @@ struct mptcp_subflow_context {
__unused : 8;
bool data_avail;
bool scheduled;
bool pm_listener; /* a listener managed by the kernel PM? */
u32 remote_nonce;
u64 thmac;
u32 local_nonce;

View File

@ -132,6 +132,13 @@ static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
}
}
static int subflow_reset_req_endp(struct request_sock *req, struct sk_buff *skb)
{
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEENDPATTEMPT);
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
return -EPERM;
}
/* Init mptcp request socket.
*
* Returns an error code if a JOIN has failed and a TCP reset
@ -165,6 +172,8 @@ static int subflow_check_req(struct request_sock *req,
if (opt_mp_capable) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
if (unlikely(listener->pm_listener))
return subflow_reset_req_endp(req, skb);
if (opt_mp_join)
return 0;
} else if (opt_mp_join) {
@ -172,6 +181,8 @@ static int subflow_check_req(struct request_sock *req,
if (mp_opt.backup)
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
} else if (unlikely(listener->pm_listener)) {
return subflow_reset_req_endp(req, skb);
}
if (opt_mp_capable && listener->request_mptcp) {

View File

@ -1501,15 +1501,11 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
if (IS_ERR(msg))
return PTR_ERR(msg);
if (!family->netnsok) {
if (!family->netnsok)
genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
0, GFP_KERNEL);
} else {
rcu_read_lock();
genlmsg_multicast_allns(&genl_ctrl, msg, 0,
0, GFP_ATOMIC);
rcu_read_unlock();
}
else
genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
return 0;
}
@ -1929,23 +1925,23 @@ problem:
core_initcall(genl_init);
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
gfp_t flags)
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
bool delivered = false;
int err;
rcu_read_lock();
for_each_net_rcu(net) {
if (prev) {
tmp = skb_clone(skb, flags);
tmp = skb_clone(skb, GFP_ATOMIC);
if (!tmp) {
err = -ENOMEM;
goto error;
}
err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags);
portid, group, GFP_ATOMIC);
if (!err)
delivered = true;
else if (err != -ESRCH)
@ -1954,27 +1950,31 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
prev = net;
}
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
rcu_read_unlock();
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
if (!err)
delivered = true;
else if (err != -ESRCH)
return err;
return delivered ? 0 : -ESRCH;
error:
rcu_read_unlock();
kfree_skb(skb);
return err;
}
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags)
unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
return genlmsg_mcast(skb, portid, group, flags);
return genlmsg_mcast(skb, portid, group);
}
EXPORT_SYMBOL(genlmsg_multicast_allns);

View File

@ -753,7 +753,7 @@ static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
write_lock(&sn->pnetids_ndev.lock);
list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
if (smc_pnet_match(pnetid, pe->pnetid)) {
if (smc_pnet_match(pnetid, pi->pnetid)) {
refcount_inc(&pi->refcnt);
kfree(pe);
goto unlock;

View File

@ -648,8 +648,10 @@ void smc_wr_free_link(struct smc_link *lnk)
smc_wr_tx_wait_no_pending_sends(lnk);
percpu_ref_kill(&lnk->wr_reg_refs);
wait_for_completion(&lnk->reg_ref_comp);
percpu_ref_exit(&lnk->wr_reg_refs);
percpu_ref_kill(&lnk->wr_tx_refs);
wait_for_completion(&lnk->tx_ref_comp);
percpu_ref_exit(&lnk->wr_tx_refs);
if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@ -912,11 +914,13 @@ int smc_wr_create_link(struct smc_link *lnk)
init_waitqueue_head(&lnk->wr_reg_wait);
rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL);
if (rc)
goto dma_unmap;
goto cancel_ref;
init_completion(&lnk->reg_ref_comp);
init_waitqueue_head(&lnk->wr_rx_empty_wait);
return rc;
cancel_ref:
percpu_ref_exit(&lnk->wr_tx_refs);
dma_unmap:
if (lnk->wr_rx_v2_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,

View File

@ -17986,10 +17986,8 @@ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
rcu_read_unlock();
NL80211_MCGRP_REGULATORY);
return;
@ -18722,10 +18720,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
rcu_read_unlock();
NL80211_MCGRP_REGULATORY);
return;

View File

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
import errno
import json
import os
import random

View File

@ -23,6 +23,7 @@ tmpfile=""
cout=""
err=""
capout=""
cappid=""
ns1=""
ns2=""
iptables="iptables"
@ -887,6 +888,44 @@ check_cestab()
fi
}
cond_start_capture()
{
local ns="$1"
:> "$capout"
if $capture; then
local capuser capfile
if [ -z $SUDO_USER ]; then
capuser=""
else
capuser="-Z $SUDO_USER"
fi
capfile=$(printf "mp_join-%02u-%s.pcap" "$MPTCP_LIB_TEST_COUNTER" "$ns")
echo "Capturing traffic for test $MPTCP_LIB_TEST_COUNTER into $capfile"
ip netns exec "$ns" tcpdump -i any -s 65535 -B 32768 $capuser -w "$capfile" > "$capout" 2>&1 &
cappid=$!
sleep 1
fi
}
cond_stop_capture()
{
if $capture; then
sleep 1
kill $cappid
cat "$capout"
fi
}
get_port()
{
echo "$((10000 + MPTCP_LIB_TEST_COUNTER - 1))"
}
do_transfer()
{
local listener_ns="$1"
@ -894,33 +933,17 @@ do_transfer()
local cl_proto="$3"
local srv_proto="$4"
local connect_addr="$5"
local port
local port=$((10000 + MPTCP_LIB_TEST_COUNTER - 1))
local cappid
local FAILING_LINKS=${FAILING_LINKS:-""}
local fastclose=${fastclose:-""}
local speed=${speed:-"fast"}
port=$(get_port)
:> "$cout"
:> "$sout"
:> "$capout"
if $capture; then
local capuser
if [ -z $SUDO_USER ] ; then
capuser=""
else
capuser="-Z $SUDO_USER"
fi
capfile=$(printf "mp_join-%02u-%s.pcap" "$MPTCP_LIB_TEST_COUNTER" "${listener_ns}")
echo "Capturing traffic for test $MPTCP_LIB_TEST_COUNTER into $capfile"
ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
cappid=$!
sleep 1
fi
cond_start_capture ${listener_ns}
NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
nstat -n
@ -1007,10 +1030,7 @@ do_transfer()
wait $spid
local rets=$?
if $capture; then
sleep 1
kill $cappid
fi
cond_stop_capture
NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
nstat | grep Tcp > /tmp/${listener_ns}.out
@ -1026,7 +1046,6 @@ do_transfer()
ip netns exec ${connector_ns} ss -Menita 1>&2 -o "dport = :$port"
cat /tmp/${connector_ns}.out
cat "$capout"
return 1
fi
@ -1043,13 +1062,7 @@ do_transfer()
fi
rets=$?
if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
cat "$capout"
return 0
fi
cat "$capout"
return 1
[ $retc -eq 0 ] && [ $rets -eq 0 ]
}
make_file()
@ -2873,6 +2886,32 @@ verify_listener_events()
fail_test
}
chk_mpc_endp_attempt()
{
local retl=$1
local attempts=$2
print_check "Connect"
if [ ${retl} = 124 ]; then
fail_test "timeout on connect"
elif [ ${retl} = 0 ]; then
fail_test "unexpected successful connect"
else
print_ok
print_check "Attempts"
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPCapableEndpAttempt")
if [ -z "$count" ]; then
print_skip
elif [ "$count" != "$attempts" ]; then
fail_test "got ${count} MPC attempt[s] on port-based endpoint, expected ${attempts}"
else
print_ok
fi
fi
}
add_addr_ports_tests()
{
# signal address with port
@ -2963,6 +3002,22 @@ add_addr_ports_tests()
chk_join_nr 2 2 2
chk_add_nr 2 2 2
fi
if reset "port-based signal endpoint must not accept mpc"; then
local port retl count
port=$(get_port)
cond_start_capture ${ns1}
pm_nl_add_endpoint ${ns1} 10.0.2.1 flags signal port ${port}
mptcp_lib_wait_local_port_listen ${ns1} ${port}
timeout 1 ip netns exec ${ns2} \
./mptcp_connect -t ${timeout_poll} -p $port -s MPTCP 10.0.2.1 >/dev/null 2>&1
retl=$?
cond_stop_capture
chk_mpc_endp_attempt ${retl} 1
fi
}
syncookies_tests()

View File

@ -14,8 +14,11 @@ import sys
import atexit
from pwd import getpwuid
from os import stat
from lib.py import ip
# Allow utils module to be imported from different directory
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(this_dir, "../"))
from lib.py.utils import ip
libc = ctypes.cdll.LoadLibrary('libc.so.6')
setns = libc.setns