mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
A little calmer than usual, probably just the timing of sub-tree PRs.
Including fixes from netfilter. Current release - regressions: - inet: bring NLM_DONE out to a separate recv() again, fix user space which assumes multiple recv()s will happen and gets blocked forever - drv: mlx5: - restore mistakenly dropped parts in register devlink flow - use channel mdev reference instead of global mdev instance for coalescing - acquire RTNL lock before RQs/SQs activation/deactivation Previous releases - regressions: - net: change maximum number of UDP segments to 128, fix virtio compatibility with Windows peers - usb: ax88179_178a: avoid writing the mac address before first reading Previous releases - always broken: - sched: fix mirred deadlock on device recursion - netfilter: - br_netfilter: skip conntrack input hook for promisc packets - fixes removal of duplicate elements in the pipapo set backend - various fixes for abort paths and error handling - af_unix: don't peek OOB data without MSG_OOB - drv: flower: fix fragment flags handling in multiple drivers - drv: ravb: fix jumbo frames and packet stats accounting Misc: - kselftest_harness: fix Clang warning about zero-length format - tun: limit printing rate when illegal packet received by tun dev Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmYhZfAACgkQMUZtbf5S IrtC8Q/+NgOHdLq23vnJUK5vw9RKIP4eIupG8rvD7LxoJDzuld/Ynf3FlX1IdT/6 UGYvxChtRETlgfM2lXx+hEZPhG+8IZhL6rcbWr9D268fVrG2jwrFgwIhcuhmOysw cU8t/qQaS1ceuiJx/dJbJI1jSILe620ONZhZgLN8LdqJ7w31dTAFu0eS50TcNS7+ ZH4JJRA+qMr9Hyrodf8mv8rLV8KdzmaLBzE0ml6kBBdNE0L86YpRrnljYxkqhc99 HoYVOLuigo0F0vjBn81mGehgmZzAXiWPhWirUGrNbtoxdneXlcKLoMFY8i5guqta C5zrzVjapwIogdnieMU0/creG0gAVwBDpGiBaci/kV8hNMliwIAonokSvJLXxnlX KdLmcsbr7Dx6mm27DbhKXTfUSzAUFUWQRY1bb1sRbmUmmuzHN4IwpdQzncoF8GAP 2Ss06pce9GovdCKuBQ8HQMmsYRnTr87Ab9/J7cjyENw61RLI+019ZfU15/V9ytfC m3kUbEBjsdWP0bzK+uu1f1tTBj6ZXC/pGDlFTJl8cOoyAvY8KP1ckQ46DUL04XRq PExYepIIcKhMI48ovBq2pBB4zIoQXKZA3Cn8hhDXVeBbxX06WAiSHXzIjbSPY2IG FYlxR12uglB04czdJYg0sB1g88SbXeM9HSYbsOo4sFRHoe3d46U= =sJzH -----END PGP SIGNATURE----- Merge tag 'net-6.9-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "A little calmer than usual, probably just the timing of sub-tree PRs. Including fixes from netfilter. Current release - regressions: - inet: bring NLM_DONE out to a separate recv() again, fix user space which assumes multiple recv()s will happen and gets blocked forever - drv: mlx5: - restore mistakenly dropped parts in register devlink flow - use channel mdev reference instead of global mdev instance for coalescing - acquire RTNL lock before RQs/SQs activation/deactivation Previous releases - regressions: - net: change maximum number of UDP segments to 128, fix virtio compatibility with Windows peers - usb: ax88179_178a: avoid writing the mac address before first reading Previous releases - always broken: - sched: fix mirred deadlock on device recursion - netfilter: - br_netfilter: skip conntrack input hook for promisc packets - fixes removal of duplicate elements in the pipapo set backend - various fixes for abort paths and error handling - af_unix: don't peek OOB data without MSG_OOB - drv: flower: fix fragment flags handling in multiple drivers - drv: ravb: fix jumbo frames and packet stats accounting Misc: - kselftest_harness: fix Clang warning about zero-length format - tun: limit printing rate when illegal packet received by tun dev" * tag 'net-6.9-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (46 commits) net: ethernet: ti: am65-cpsw-nuss: cleanup DMA Channels before using them net: usb: ax88179_178a: avoid writing the mac address before first reading net: ravb: Fix RX byte accounting for jumbo packets net: ravb: Fix GbEth jumbo packet RX checksum handling net: ravb: Allow RX loop to move past DMA mapping errors net: ravb: Count packets instead of descriptors in R-Car RX path net: ethernet: mtk_eth_soc: fix WED + wifi reset net:usb:qmi_wwan: support Rolling modules selftests: kselftest_harness: fix Clang warning about zero-length format net/sched: Fix mirred deadlock on device recursion netfilter: nf_tables: fix memleak in map from abort path netfilter: nf_tables: restore set elements when delete set fails netfilter: nf_tables: missing iterator type in lookup walk s390/ism: Properly fix receive message buffer allocation net: dsa: mt7530: fix port mirroring for MT7988 SoC switch net: dsa: mt7530: fix mirroring frames received on local port tun: limit printing rate when illegal packet received by tun dev ice: Fix checking for unsupported keys on non-tunnel device ice: tc: allow zero flags in parsing tc flower ice: tc: check src_vsi in case of traffic from VF ...
This commit is contained in:
commit
7586c8501d
@ -1883,14 +1883,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
|
||||
|
||||
static int mt753x_mirror_port_get(unsigned int id, u32 val)
|
||||
{
|
||||
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
|
||||
MIRROR_PORT(val);
|
||||
return (id == ID_MT7531 || id == ID_MT7988) ?
|
||||
MT7531_MIRROR_PORT_GET(val) :
|
||||
MIRROR_PORT(val);
|
||||
}
|
||||
|
||||
static int mt753x_mirror_port_set(unsigned int id, u32 val)
|
||||
{
|
||||
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
|
||||
MIRROR_PORT(val);
|
||||
return (id == ID_MT7531 || id == ID_MT7988) ?
|
||||
MT7531_MIRROR_PORT_SET(val) :
|
||||
MIRROR_PORT(val);
|
||||
}
|
||||
|
||||
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
|
||||
@ -2480,6 +2482,9 @@ mt7530_setup(struct dsa_switch *ds)
|
||||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||
}
|
||||
|
||||
/* Allow mirroring frames received on the local port (monitor port). */
|
||||
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
|
||||
|
||||
/* Setup VLAN ID 0 for VLAN-unaware bridges */
|
||||
ret = mt7530_setup_vlan0(priv);
|
||||
if (ret)
|
||||
@ -2591,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds)
|
||||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||
}
|
||||
|
||||
/* Allow mirroring frames received on the local port (monitor port). */
|
||||
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
|
||||
|
||||
/* Flush the FDB table */
|
||||
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
|
||||
if (ret < 0)
|
||||
|
@ -32,6 +32,10 @@ enum mt753x_id {
|
||||
#define SYSC_REG_RSTCTRL 0x34
|
||||
#define RESET_MCM BIT(2)
|
||||
|
||||
/* Register for ARL global control */
|
||||
#define MT753X_AGC 0xc
|
||||
#define LOCAL_EN BIT(7)
|
||||
|
||||
/* Registers to mac forward control for unknown frames */
|
||||
#define MT7530_MFC 0x10
|
||||
#define BC_FFP(x) (((x) & 0xff) << 24)
|
||||
|
@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
|
||||
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
|
||||
* - Tunnel flag (present if tunnel)
|
||||
*/
|
||||
if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
|
||||
lkups_cnt++;
|
||||
|
||||
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
|
||||
lkups_cnt++;
|
||||
@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
|
||||
/* Always add direction metadata */
|
||||
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
|
||||
|
||||
if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
|
||||
ice_rule_add_src_vsi_metadata(&list[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
|
||||
if (tc_fltr->tunnel_type != TNL_LAST) {
|
||||
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
|
||||
@ -772,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
|
||||
if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
|
||||
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -820,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
|
||||
|
||||
/* specify the cookie as filter_rule_id */
|
||||
rule_info.fltr_rule_id = fltr->cookie;
|
||||
rule_info.src_vsi = vsi->idx;
|
||||
|
||||
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
|
||||
if (ret == -EEXIST) {
|
||||
@ -1481,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
|
||||
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
|
||||
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
|
||||
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
|
||||
return -EOPNOTSUPP;
|
||||
} else {
|
||||
|
@ -689,6 +689,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
|
||||
struct flow_match_control match;
|
||||
u32 val;
|
||||
|
||||
flow_rule_match_control(rule, &match);
|
||||
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
|
||||
@ -697,12 +698,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
|
||||
}
|
||||
|
||||
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||
val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
|
||||
if (ntohs(flow_spec->etype) == ETH_P_IP) {
|
||||
flow_spec->ip_flag = IPV4_FLAG_MORE;
|
||||
flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
|
||||
flow_mask->ip_flag = IPV4_FLAG_MORE;
|
||||
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
|
||||
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
|
||||
flow_spec->next_header = IPPROTO_FRAGMENT;
|
||||
flow_spec->next_header = val ?
|
||||
IPPROTO_FRAGMENT : 0;
|
||||
flow_mask->next_header = 0xff;
|
||||
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
|
||||
} else {
|
||||
|
@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
|
||||
static void
|
||||
mtk_wed_stop(struct mtk_wed_device *dev)
|
||||
{
|
||||
mtk_wed_dma_disable(dev);
|
||||
mtk_wed_set_ext_int(dev, false);
|
||||
|
||||
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
|
||||
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
|
||||
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
|
||||
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
|
||||
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
|
||||
|
||||
if (!mtk_wed_get_rx_capa(dev))
|
||||
return;
|
||||
@ -1093,7 +1093,6 @@ static void
|
||||
mtk_wed_deinit(struct mtk_wed_device *dev)
|
||||
{
|
||||
mtk_wed_stop(dev);
|
||||
mtk_wed_dma_disable(dev);
|
||||
|
||||
wed_clr(dev, MTK_WED_CTRL,
|
||||
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
|
||||
@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
|
||||
static void
|
||||
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
|
||||
{
|
||||
if (!dev->running)
|
||||
return;
|
||||
|
||||
mtk_wed_set_ext_int(dev, !!mask);
|
||||
wed_w32(dev, MTK_WED_INT_MASK, mask);
|
||||
}
|
||||
|
@ -108,7 +108,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
|
||||
mlx5e_reset_txqsq_cc_pc(sq);
|
||||
sq->stats->recover++;
|
||||
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
|
||||
rtnl_lock();
|
||||
mlx5e_activate_txqsq(sq);
|
||||
rtnl_unlock();
|
||||
|
||||
if (sq->channel)
|
||||
mlx5e_trigger_napi_icosq(sq->channel);
|
||||
else
|
||||
@ -179,12 +182,16 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
|
||||
carrier_ok = netif_carrier_ok(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
rtnl_lock();
|
||||
mlx5e_deactivate_priv_channels(priv);
|
||||
rtnl_unlock();
|
||||
|
||||
mlx5e_ptp_close(chs->ptp);
|
||||
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
|
||||
|
||||
rtnl_lock();
|
||||
mlx5e_activate_priv_channels(priv);
|
||||
rtnl_unlock();
|
||||
|
||||
/* return carrier back if needed */
|
||||
if (carrier_ok)
|
||||
|
@ -46,6 +46,10 @@ struct arfs_table {
|
||||
struct hlist_head rules_hash[ARFS_HASH_SIZE];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_ARFS_STATE_ENABLED,
|
||||
};
|
||||
|
||||
enum arfs_type {
|
||||
ARFS_IPV4_TCP,
|
||||
ARFS_IPV6_TCP,
|
||||
@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
|
||||
spinlock_t arfs_lock;
|
||||
int last_filter_id;
|
||||
struct workqueue_struct *wq;
|
||||
unsigned long state;
|
||||
};
|
||||
|
||||
struct arfs_tuple {
|
||||
@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -455,6 +462,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
|
||||
int i;
|
||||
int j;
|
||||
|
||||
clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
|
||||
|
||||
spin_lock_bh(&arfs->arfs_lock);
|
||||
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
|
||||
hlist_del_init(&rule->hlist);
|
||||
@ -627,17 +636,8 @@ static void arfs_handle_work(struct work_struct *work)
|
||||
struct mlx5_flow_handle *rule;
|
||||
|
||||
arfs = mlx5e_fs_get_arfs(priv->fs);
|
||||
mutex_lock(&priv->state_lock);
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
spin_lock_bh(&arfs->arfs_lock);
|
||||
hlist_del(&arfs_rule->hlist);
|
||||
spin_unlock_bh(&arfs->arfs_lock);
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
kfree(arfs_rule);
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&priv->state_lock);
|
||||
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
|
||||
return;
|
||||
|
||||
if (!arfs_rule->rule) {
|
||||
rule = arfs_add_rule(priv, arfs_rule);
|
||||
@ -753,6 +753,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
spin_lock_bh(&arfs->arfs_lock);
|
||||
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
|
||||
spin_unlock_bh(&arfs->arfs_lock);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
arfs_rule = arfs_find_rule(arfs_t, &fk);
|
||||
if (arfs_rule) {
|
||||
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
|
||||
|
@ -589,12 +589,12 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
|
||||
static void
|
||||
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int tc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->channels.num; ++i) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
|
||||
for (tc = 0; tc < c->num_tc; tc++) {
|
||||
mlx5_core_modify_cq_moderation(mdev,
|
||||
@ -608,11 +608,11 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
|
||||
static void
|
||||
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->channels.num; ++i) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
|
||||
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
|
||||
coal->rx_coalesce_usecs,
|
||||
|
@ -209,8 +209,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
|
||||
*data,
|
||||
mlx5e_devcom_event_mpv,
|
||||
priv);
|
||||
if (IS_ERR_OR_NULL(priv->devcom))
|
||||
return -EOPNOTSUPP;
|
||||
if (IS_ERR(priv->devcom))
|
||||
return PTR_ERR(priv->devcom);
|
||||
|
||||
if (mlx5_core_is_mp_master(priv->mdev)) {
|
||||
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
|
||||
|
@ -3060,7 +3060,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
|
||||
key,
|
||||
mlx5_esw_offloads_devcom_event,
|
||||
esw);
|
||||
if (IS_ERR_OR_NULL(esw->devcom))
|
||||
if (IS_ERR(esw->devcom))
|
||||
return;
|
||||
|
||||
mlx5_devcom_send_event(esw->devcom,
|
||||
|
@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
|
||||
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
|
||||
mlx5_lag_port_sel_destroy(ldev);
|
||||
ldev->buckets = 1;
|
||||
}
|
||||
if (mlx5_lag_has_drop_rule(ldev))
|
||||
mlx5_lag_drop_rule_cleanup(ldev);
|
||||
|
||||
|
@ -220,7 +220,7 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
|
||||
struct mlx5_devcom_comp *comp;
|
||||
|
||||
if (IS_ERR_OR_NULL(devc))
|
||||
return NULL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&comp_list_lock);
|
||||
comp = devcom_component_get(devc, id, key, handler);
|
||||
|
@ -213,8 +213,8 @@ static int sd_register(struct mlx5_core_dev *dev)
|
||||
sd = mlx5_get_sd(dev);
|
||||
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
|
||||
sd->group_id, NULL, dev);
|
||||
if (!devcom)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(devcom))
|
||||
return PTR_ERR(devcom);
|
||||
|
||||
sd->devcom = devcom;
|
||||
|
||||
|
@ -956,7 +956,7 @@ static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
|
||||
mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
|
||||
mlx5_query_nic_system_image_guid(dev),
|
||||
NULL, dev);
|
||||
if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
|
||||
if (IS_ERR(dev->priv.hca_devcom_comp))
|
||||
mlx5_core_err(dev, "Failed to register devcom HCA component\n");
|
||||
}
|
||||
|
||||
@ -1699,12 +1699,15 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
|
||||
err = mlx5_devlink_params_register(priv_to_devlink(dev));
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
|
||||
goto query_hca_caps_err;
|
||||
goto params_reg_err;
|
||||
}
|
||||
|
||||
devl_unlock(devlink);
|
||||
return 0;
|
||||
|
||||
params_reg_err:
|
||||
devl_unregister(devlink);
|
||||
devl_unlock(devlink);
|
||||
query_hca_caps_err:
|
||||
devl_unregister(devlink);
|
||||
devl_unlock(devlink);
|
||||
|
@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
|
||||
goto peer_devlink_set_err;
|
||||
}
|
||||
|
||||
devlink_register(devlink);
|
||||
return 0;
|
||||
|
||||
peer_devlink_set_err:
|
||||
|
@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
|
||||
u16 l3_proto; /* protocol specified in the template */
|
||||
};
|
||||
|
||||
/* SparX-5 VCAP fragment types:
|
||||
* 0 = no fragment, 1 = initial fragment,
|
||||
* 2 = suspicious fragment, 3 = valid follow-up fragment
|
||||
*/
|
||||
enum { /* key / mask */
|
||||
FRAG_NOT = 0x03, /* 0 / 3 */
|
||||
FRAG_SOME = 0x11, /* 1 / 1 */
|
||||
FRAG_FIRST = 0x13, /* 1 / 3 */
|
||||
FRAG_LATER = 0x33, /* 3 / 3 */
|
||||
FRAG_INVAL = 0xff, /* invalid */
|
||||
};
|
||||
|
||||
/* Flower fragment flag to VCAP fragment type mapping */
|
||||
static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
|
||||
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
|
||||
{ FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
|
||||
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
|
||||
{ FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
|
||||
/* 0/0 0/1 1/0 1/1 <-- first_frag */
|
||||
};
|
||||
|
||||
static int
|
||||
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
|
||||
{
|
||||
@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
|
||||
flow_rule_match_control(st->frule, &mt);
|
||||
|
||||
if (mt.mask->flags) {
|
||||
if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
|
||||
if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
|
||||
value = 1; /* initial fragment */
|
||||
mask = 0x3;
|
||||
} else {
|
||||
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||
value = 3; /* follow up fragment */
|
||||
mask = 0x3;
|
||||
} else {
|
||||
value = 0; /* no fragment */
|
||||
mask = 0x3;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||
value = 3; /* follow up fragment */
|
||||
mask = 0x3;
|
||||
} else {
|
||||
value = 0; /* no fragment */
|
||||
mask = 0x3;
|
||||
}
|
||||
u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
|
||||
u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
|
||||
u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
|
||||
|
||||
u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
|
||||
u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
|
||||
u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
|
||||
|
||||
/* Lookup verdict based on the 2 + 2 input bits */
|
||||
u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
|
||||
|
||||
if (vdt == FRAG_INVAL) {
|
||||
NL_SET_ERR_MSG_MOD(st->fco->common.extack,
|
||||
"Match on invalid fragment flag combination");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Extract VCAP fragment key and mask from verdict */
|
||||
value = (vdt >> 4) & 0x3;
|
||||
mask = vdt & 0x3;
|
||||
|
||||
err = vcap_rule_add_key_u32(st->vrule,
|
||||
VCAP_KF_L3_FRAGMENT_TYPE,
|
||||
value, mask);
|
||||
|
@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
dma_addr_t dma_addr;
|
||||
int rx_packets = 0;
|
||||
u8 desc_status;
|
||||
u16 pkt_len;
|
||||
u16 desc_len;
|
||||
u8 die_dt;
|
||||
int entry;
|
||||
int limit;
|
||||
int i;
|
||||
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
|
||||
stats = &priv->stats[q];
|
||||
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
|
||||
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
|
||||
break;
|
||||
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
desc_status = desc->msc;
|
||||
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
|
||||
/* We use 0-byte descriptors to mark the DMA mapping errors */
|
||||
if (!pkt_len)
|
||||
if (!desc_len)
|
||||
continue;
|
||||
|
||||
if (desc_status & MSC_MC)
|
||||
@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
switch (die_dt) {
|
||||
case DT_FSINGLE:
|
||||
skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_put(skb, pkt_len);
|
||||
skb_put(skb, desc_len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum_gbeth(skb);
|
||||
napi_gro_receive(&priv->napi[q], skb);
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
stats->rx_bytes += desc_len;
|
||||
break;
|
||||
case DT_FSTART:
|
||||
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
break;
|
||||
case DT_FMID:
|
||||
skb = ravb_get_skb_gbeth(ndev, entry, desc);
|
||||
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
|
||||
priv->rx_1st_skb->len,
|
||||
skb->data,
|
||||
pkt_len);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
desc_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
case DT_FEND:
|
||||
@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
|
||||
priv->rx_1st_skb->len,
|
||||
skb->data,
|
||||
pkt_len);
|
||||
skb_put(priv->rx_1st_skb, pkt_len);
|
||||
desc_len);
|
||||
skb_put(priv->rx_1st_skb, desc_len);
|
||||
dev_kfree_skb(skb);
|
||||
priv->rx_1st_skb->protocol =
|
||||
eth_type_trans(priv->rx_1st_skb, ndev);
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum_gbeth(skb);
|
||||
ravb_rx_csum_gbeth(priv->rx_1st_skb);
|
||||
stats->rx_bytes += priv->rx_1st_skb->len;
|
||||
napi_gro_receive(&priv->napi[q],
|
||||
priv->rx_1st_skb);
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
const struct ravb_hw_info *info = priv->info;
|
||||
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
|
||||
priv->cur_rx[q];
|
||||
struct net_device_stats *stats = &priv->stats[q];
|
||||
struct ravb_ex_rx_desc *desc;
|
||||
unsigned int limit, i;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma_addr;
|
||||
struct timespec64 ts;
|
||||
int rx_packets = 0;
|
||||
u8 desc_status;
|
||||
u16 pkt_len;
|
||||
int limit;
|
||||
int entry;
|
||||
|
||||
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
|
||||
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
|
||||
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
|
||||
break;
|
||||
|
||||
boguscnt = min(boguscnt, *quota);
|
||||
limit = boguscnt;
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
while (desc->die_dt != DT_FEMPTY) {
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
desc_status = desc->msc;
|
||||
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
|
||||
|
||||
if (--boguscnt < 0)
|
||||
break;
|
||||
|
||||
/* We use 0-byte descriptors to mark the DMA mapping errors */
|
||||
if (!pkt_len)
|
||||
continue;
|
||||
@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
ravb_rx_csum(skb);
|
||||
napi_gro_receive(&priv->napi[q], skb);
|
||||
stats->rx_packets++;
|
||||
rx_packets++;
|
||||
stats->rx_bytes += pkt_len;
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
desc->die_dt = DT_FEMPTY;
|
||||
}
|
||||
|
||||
*quota -= limit - (++boguscnt);
|
||||
|
||||
return boguscnt <= 0;
|
||||
stats->rx_packets += rx_packets;
|
||||
*quota -= rx_packets;
|
||||
return *quota == 0;
|
||||
}
|
||||
|
||||
/* Packet receive function for Ethernet AVB */
|
||||
|
@ -553,6 +553,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
|
||||
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
|
||||
|
||||
struct mac_link {
|
||||
u32 caps;
|
||||
u32 speed_mask;
|
||||
u32 speed10;
|
||||
u32 speed100;
|
||||
|
@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
|
||||
|
||||
priv->dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000;
|
||||
/* The loopback bit seems to be re-set when link change
|
||||
* Simply mask it each time
|
||||
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
|
||||
|
@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
|
||||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000;
|
||||
mac->link.duplex = GMAC_CONTROL_DM;
|
||||
mac->link.speed10 = GMAC_CONTROL_PS;
|
||||
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
|
||||
|
@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
|
||||
dev_info(priv->device, "\tDWMAC100\n");
|
||||
|
||||
mac->pcsr = priv->ioaddr;
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100;
|
||||
mac->link.duplex = MAC_CONTROL_F;
|
||||
mac->link.speed10 = 0;
|
||||
mac->link.speed100 = 0;
|
||||
|
@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
|
||||
|
||||
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
|
||||
{
|
||||
priv->phylink_config.mac_capabilities |= MAC_2500FD;
|
||||
if (priv->plat->tx_queues_to_use > 1)
|
||||
priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
else
|
||||
priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
}
|
||||
|
||||
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
|
||||
@ -1378,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
|
||||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
|
||||
mac->link.duplex = GMAC_CONFIG_DM;
|
||||
mac->link.speed10 = GMAC_CONFIG_PS;
|
||||
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
|
||||
|
@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
|
||||
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
|
||||
}
|
||||
|
||||
static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
|
||||
{
|
||||
priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
|
||||
MAC_10000FD | MAC_25000FD |
|
||||
MAC_40000FD | MAC_50000FD |
|
||||
MAC_100000FD;
|
||||
}
|
||||
|
||||
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
|
||||
{
|
||||
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
|
||||
@ -1540,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
|
||||
|
||||
const struct stmmac_ops dwxgmac210_ops = {
|
||||
.core_init = dwxgmac2_core_init,
|
||||
.phylink_get_caps = xgmac_phylink_get_caps,
|
||||
.set_mac = dwxgmac2_set_mac,
|
||||
.rx_ipc = dwxgmac2_rx_ipc,
|
||||
.rx_queue_enable = dwxgmac2_rx_queue_enable,
|
||||
@ -1601,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
|
||||
|
||||
const struct stmmac_ops dwxlgmac2_ops = {
|
||||
.core_init = dwxgmac2_core_init,
|
||||
.phylink_get_caps = xgmac_phylink_get_caps,
|
||||
.set_mac = dwxgmac2_set_mac,
|
||||
.rx_ipc = dwxgmac2_rx_ipc,
|
||||
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
|
||||
@ -1661,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
|
||||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_1000FD | MAC_2500FD | MAC_5000FD |
|
||||
MAC_10000FD;
|
||||
mac->link.duplex = 0;
|
||||
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
|
||||
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
|
||||
@ -1698,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
|
||||
if (mac->multicast_filter_bins)
|
||||
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
|
||||
|
||||
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_1000FD | MAC_2500FD | MAC_5000FD |
|
||||
MAC_10000FD | MAC_25000FD |
|
||||
MAC_40000FD | MAC_50000FD |
|
||||
MAC_100000FD;
|
||||
mac->link.duplex = 0;
|
||||
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
|
||||
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
|
||||
|
@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void stmmac_set_half_duplex(struct stmmac_priv *priv)
|
||||
{
|
||||
/* Half-Duplex can only work with single tx queue */
|
||||
if (priv->plat->tx_queues_to_use > 1)
|
||||
priv->phylink_config.mac_capabilities &=
|
||||
~(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
else
|
||||
priv->phylink_config.mac_capabilities |=
|
||||
(MAC_10HD | MAC_100HD | MAC_1000HD);
|
||||
}
|
||||
|
||||
static int stmmac_phy_setup(struct stmmac_priv *priv)
|
||||
{
|
||||
struct stmmac_mdio_bus_data *mdio_bus_data;
|
||||
@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
|
||||
xpcs_get_interfaces(priv->hw->xpcs,
|
||||
priv->phylink_config.supported_interfaces);
|
||||
|
||||
priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
|
||||
MAC_10FD | MAC_100FD |
|
||||
MAC_1000FD;
|
||||
|
||||
stmmac_set_half_duplex(priv);
|
||||
|
||||
/* Get the MAC specific capabilities */
|
||||
stmmac_mac_phylink_get_caps(priv);
|
||||
|
||||
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||
|
||||
max_speed = priv->plat->max_speed;
|
||||
if (max_speed)
|
||||
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||
@ -7342,6 +7327,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret = 0, i;
|
||||
int max_speed;
|
||||
|
||||
if (netif_running(dev))
|
||||
stmmac_release(dev);
|
||||
@ -7355,7 +7341,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
|
||||
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
|
||||
rx_cnt);
|
||||
|
||||
stmmac_set_half_duplex(priv);
|
||||
stmmac_mac_phylink_get_caps(priv);
|
||||
|
||||
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
|
||||
|
||||
max_speed = priv->plat->max_speed;
|
||||
if (max_speed)
|
||||
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
|
||||
|
||||
stmmac_napi_add(dev);
|
||||
|
||||
if (netif_running(dev))
|
||||
|
@ -2793,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
|
||||
|
||||
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
||||
{
|
||||
struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
|
||||
struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
|
||||
struct device *dev = common->dev;
|
||||
struct am65_cpsw_port *port;
|
||||
int ret = 0, i;
|
||||
@ -2805,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* The DMA Channels are not guaranteed to be in a clean state.
|
||||
* Reset and disable them to ensure that they are back to the
|
||||
* clean state and ready to be used.
|
||||
*/
|
||||
for (i = 0; i < common->tx_ch_num; i++) {
|
||||
k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
|
||||
am65_cpsw_nuss_tx_cleanup);
|
||||
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
|
||||
}
|
||||
|
||||
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
|
||||
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
|
||||
am65_cpsw_nuss_rx_cleanup, !!i);
|
||||
|
||||
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
|
||||
|
||||
ret = am65_cpsw_nuss_register_devlink(common);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2125,14 +2125,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
tun_is_little_endian(tun), true,
|
||||
vlan_hlen)) {
|
||||
struct skb_shared_info *sinfo = skb_shinfo(skb);
|
||||
pr_err("unexpected GSO type: "
|
||||
"0x%x, gso_size %d, hdr_len %d\n",
|
||||
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
|
||||
tun16_to_cpu(tun, gso.hdr_len));
|
||||
print_hex_dump(KERN_ERR, "tun: ",
|
||||
DUMP_PREFIX_NONE,
|
||||
16, 1, skb->head,
|
||||
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
|
||||
|
||||
if (net_ratelimit()) {
|
||||
netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
|
||||
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
|
||||
tun16_to_cpu(tun, gso.hdr_len));
|
||||
print_hex_dump(KERN_ERR, "tun: ",
|
||||
DUMP_PREFIX_NONE,
|
||||
16, 1, skb->head,
|
||||
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
||||
netif_set_tso_max_size(dev->net, 16384);
|
||||
|
||||
ax88179_reset(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1695,7 +1697,6 @@ static const struct driver_info ax88179_info = {
|
||||
.unbind = ax88179_unbind,
|
||||
.status = ax88179_status,
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
@ -1708,7 +1709,6 @@ static const struct driver_info ax88178a_info = {
|
||||
.unbind = ax88179_unbind,
|
||||
.status = ax88179_status,
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
|
@ -1431,6 +1431,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
|
||||
{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
|
||||
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
|
||||
{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
@ -292,13 +292,16 @@ out:
|
||||
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||
{
|
||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
|
||||
dmb->cpu_addr, dmb->dma_addr);
|
||||
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
|
||||
DMA_FROM_DEVICE);
|
||||
folio_put(virt_to_folio(dmb->cpu_addr));
|
||||
}
|
||||
|
||||
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||
{
|
||||
struct folio *folio;
|
||||
unsigned long bit;
|
||||
int rc;
|
||||
|
||||
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
|
||||
return -EINVAL;
|
||||
@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
|
||||
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
|
||||
return -EINVAL;
|
||||
|
||||
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
|
||||
&dmb->dma_addr,
|
||||
GFP_KERNEL | __GFP_NOWARN |
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY);
|
||||
if (!dmb->cpu_addr)
|
||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
|
||||
__GFP_NORETRY, get_order(dmb->dmb_len));
|
||||
|
||||
return dmb->cpu_addr ? 0 : -ENOMEM;
|
||||
if (!folio) {
|
||||
rc = -ENOMEM;
|
||||
goto out_bit;
|
||||
}
|
||||
|
||||
dmb->cpu_addr = folio_address(folio);
|
||||
dmb->dma_addr = dma_map_page(&ism->pdev->dev,
|
||||
virt_to_page(dmb->cpu_addr), 0,
|
||||
dmb->dmb_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree(dmb->cpu_addr);
|
||||
out_bit:
|
||||
clear_bit(dmb->sba_idx, ism->sba_bitmap);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
|
||||
|
@ -108,7 +108,7 @@ struct udp_sock {
|
||||
#define udp_assign_bit(nr, sk, val) \
|
||||
assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
|
||||
|
||||
#define UDP_MAX_SEGMENTS (1 << 6UL)
|
||||
#define UDP_MAX_SEGMENTS (1 << 7UL)
|
||||
|
||||
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
|
||||
|
||||
|
@ -336,7 +336,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
|
||||
int nf_flow_table_offload_init(void);
|
||||
void nf_flow_table_offload_exit(void);
|
||||
|
||||
static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
|
||||
static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
|
||||
{
|
||||
__be16 proto;
|
||||
|
||||
@ -352,6 +352,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
|
||||
{
|
||||
if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
|
||||
return false;
|
||||
|
||||
*inner_proto = __nf_flow_pppoe_proto(skb);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
|
||||
#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
|
||||
#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
|
||||
|
@ -307,9 +307,23 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
|
||||
return (void *)priv;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* enum nft_iter_type - nftables set iterator type
|
||||
*
|
||||
* @NFT_ITER_READ: read-only iteration over set elements
|
||||
* @NFT_ITER_UPDATE: iteration under mutex to update set element state
|
||||
*/
|
||||
enum nft_iter_type {
|
||||
NFT_ITER_UNSPEC,
|
||||
NFT_ITER_READ,
|
||||
NFT_ITER_UPDATE,
|
||||
};
|
||||
|
||||
struct nft_set;
|
||||
struct nft_set_iter {
|
||||
u8 genmask;
|
||||
enum nft_iter_type type:8;
|
||||
unsigned int count;
|
||||
unsigned int skip;
|
||||
int err;
|
||||
|
@ -117,6 +117,7 @@ struct Qdisc {
|
||||
struct qdisc_skb_head q;
|
||||
struct gnet_stats_basic_sync bstats;
|
||||
struct gnet_stats_queue qstats;
|
||||
int owner;
|
||||
unsigned long state;
|
||||
unsigned long state2; /* must be written under qdisc spinlock */
|
||||
struct Qdisc *next_sched;
|
||||
|
@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
return netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
static int br_pass_frame_up(struct sk_buff *skb)
|
||||
static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
|
||||
{
|
||||
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
|
||||
struct net_bridge *br = netdev_priv(brdev);
|
||||
@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
|
||||
br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
|
||||
BR_MCAST_DIR_TX);
|
||||
|
||||
BR_INPUT_SKB_CB(skb)->promisc = promisc;
|
||||
|
||||
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
|
||||
dev_net(indev), NULL, skb, indev, NULL,
|
||||
br_netif_receive_skb);
|
||||
@ -82,6 +84,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
|
||||
struct net_bridge_mcast *brmctx;
|
||||
struct net_bridge_vlan *vlan;
|
||||
struct net_bridge *br;
|
||||
bool promisc;
|
||||
u16 vid = 0;
|
||||
u8 state;
|
||||
|
||||
@ -137,7 +140,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
|
||||
if (p->flags & BR_LEARNING)
|
||||
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
|
||||
|
||||
local_rcv = !!(br->dev->flags & IFF_PROMISC);
|
||||
promisc = !!(br->dev->flags & IFF_PROMISC);
|
||||
local_rcv = promisc;
|
||||
|
||||
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
|
||||
/* by definition the broadcast is also a multicast address */
|
||||
if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
|
||||
@ -200,7 +205,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
|
||||
unsigned long now = jiffies;
|
||||
|
||||
if (test_bit(BR_FDB_LOCAL, &dst->flags))
|
||||
return br_pass_frame_up(skb);
|
||||
return br_pass_frame_up(skb, false);
|
||||
|
||||
if (now != dst->used)
|
||||
dst->used = now;
|
||||
@ -213,7 +218,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
|
||||
}
|
||||
|
||||
if (local_rcv)
|
||||
return br_pass_frame_up(skb);
|
||||
return br_pass_frame_up(skb, promisc);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
@ -386,6 +391,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
|
||||
goto forward;
|
||||
}
|
||||
|
||||
BR_INPUT_SKB_CB(skb)->promisc = false;
|
||||
|
||||
/* The else clause should be hit when nf_hook():
|
||||
* - returns < 0 (drop/error)
|
||||
* - returns = 0 (stolen/nf_queue)
|
||||
|
@ -600,11 +600,17 @@ static unsigned int br_nf_local_in(void *priv,
|
||||
struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
{
|
||||
bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
|
||||
struct nf_conntrack *nfct = skb_nfct(skb);
|
||||
const struct nf_ct_hook *ct_hook;
|
||||
struct nf_conn *ct;
|
||||
int ret;
|
||||
|
||||
if (promisc) {
|
||||
nf_reset_ct(skb);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
if (!nfct || skb->pkt_type == PACKET_HOST)
|
||||
return NF_ACCEPT;
|
||||
|
||||
|
@ -589,6 +589,7 @@ struct br_input_skb_cb {
|
||||
#endif
|
||||
u8 proxyarp_replied:1;
|
||||
u8 src_port_isolated:1;
|
||||
u8 promisc:1;
|
||||
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
||||
u8 vlan_filtered:1;
|
||||
#endif
|
||||
|
@ -294,18 +294,24 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
|
||||
static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
|
||||
struct nf_conntrack *nfct = skb_nfct(skb);
|
||||
struct nf_conn *ct;
|
||||
|
||||
if (skb->pkt_type == PACKET_HOST)
|
||||
if (promisc) {
|
||||
nf_reset_ct(skb);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
if (!nfct || skb->pkt_type == PACKET_HOST)
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* nf_conntrack_confirm() cannot handle concurrent clones,
|
||||
* this happens for broad/multicast frames with e.g. macvlan on top
|
||||
* of the bridge device.
|
||||
*/
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
|
||||
ct = container_of(nfct, struct nf_conn, ct_general);
|
||||
if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* let inet prerouting call conntrack again */
|
||||
|
@ -3775,6 +3775,10 @@ no_lock_out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (unlikely(READ_ONCE(q->owner) == smp_processor_id())) {
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
/*
|
||||
* Heuristic to force contended enqueues to serialize on a
|
||||
* separate lock before trying to get qdisc main lock.
|
||||
@ -3814,7 +3818,9 @@ no_lock_out:
|
||||
qdisc_run_end(q);
|
||||
rc = NET_XMIT_SUCCESS;
|
||||
} else {
|
||||
WRITE_ONCE(q->owner, smp_processor_id());
|
||||
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
|
||||
WRITE_ONCE(q->owner, -1);
|
||||
if (qdisc_run_begin(q)) {
|
||||
if (unlikely(contended)) {
|
||||
spin_unlock(&q->busylock);
|
||||
|
@ -1050,6 +1050,11 @@ next:
|
||||
e++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Don't let NLM_DONE coalesce into a message, even if it could.
|
||||
* Some user space expects NLM_DONE in a separate recv().
|
||||
*/
|
||||
err = skb->len;
|
||||
out:
|
||||
|
||||
cb->args[1] = e;
|
||||
|
@ -21,7 +21,8 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
|
||||
proto = veth->h_vlan_encapsulated_proto;
|
||||
break;
|
||||
case htons(ETH_P_PPP_SES):
|
||||
proto = nf_flow_pppoe_proto(skb);
|
||||
if (!nf_flow_pppoe_proto(skb, &proto))
|
||||
return NF_ACCEPT;
|
||||
break;
|
||||
default:
|
||||
proto = skb->protocol;
|
||||
|
@ -157,7 +157,7 @@ static void nf_flow_tuple_encap(struct sk_buff *skb,
|
||||
tuple->encap[i].proto = skb->protocol;
|
||||
break;
|
||||
case htons(ETH_P_PPP_SES):
|
||||
phdr = (struct pppoe_hdr *)skb_mac_header(skb);
|
||||
phdr = (struct pppoe_hdr *)skb_network_header(skb);
|
||||
tuple->encap[i].id = ntohs(phdr->sid);
|
||||
tuple->encap[i].proto = skb->protocol;
|
||||
break;
|
||||
@ -273,10 +273,11 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
|
||||
return NF_STOLEN;
|
||||
}
|
||||
|
||||
static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
|
||||
static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
|
||||
u32 *offset)
|
||||
{
|
||||
struct vlan_ethhdr *veth;
|
||||
__be16 inner_proto;
|
||||
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_8021Q):
|
||||
@ -287,7 +288,8 @@ static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
|
||||
}
|
||||
break;
|
||||
case htons(ETH_P_PPP_SES):
|
||||
if (nf_flow_pppoe_proto(skb) == proto) {
|
||||
if (nf_flow_pppoe_proto(skb, &inner_proto) &&
|
||||
inner_proto == proto) {
|
||||
*offset += PPPOE_SES_HLEN;
|
||||
return true;
|
||||
}
|
||||
@ -316,7 +318,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb,
|
||||
skb_reset_network_header(skb);
|
||||
break;
|
||||
case htons(ETH_P_PPP_SES):
|
||||
skb->protocol = nf_flow_pppoe_proto(skb);
|
||||
skb->protocol = __nf_flow_pppoe_proto(skb);
|
||||
skb_pull(skb, PPPOE_SES_HLEN);
|
||||
skb_reset_network_header(skb);
|
||||
break;
|
||||
|
@ -594,6 +594,12 @@ static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_set_iter *iter,
|
||||
struct nft_elem_priv *elem_priv)
|
||||
{
|
||||
struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
|
||||
if (!nft_set_elem_active(ext, iter->genmask))
|
||||
return 0;
|
||||
|
||||
nft_set_elem_change_active(ctx->net, set, ext);
|
||||
nft_setelem_data_deactivate(ctx->net, set, elem_priv);
|
||||
|
||||
return 0;
|
||||
@ -617,6 +623,7 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
|
||||
if (!nft_set_elem_active(ext, genmask))
|
||||
continue;
|
||||
|
||||
nft_set_elem_change_active(ctx->net, set, ext);
|
||||
nft_setelem_data_deactivate(ctx->net, set, catchall->elem);
|
||||
break;
|
||||
}
|
||||
@ -626,6 +633,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
struct nft_set_iter iter = {
|
||||
.genmask = nft_genmask_next(ctx->net),
|
||||
.type = NFT_ITER_UPDATE,
|
||||
.fn = nft_mapelem_deactivate,
|
||||
};
|
||||
|
||||
@ -3060,7 +3068,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
|
||||
{
|
||||
const struct nft_expr_type *type, *candidate = NULL;
|
||||
|
||||
list_for_each_entry(type, &nf_tables_expressions, list) {
|
||||
list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
|
||||
if (!nla_strcmp(nla, type->name)) {
|
||||
if (!type->family && !candidate)
|
||||
candidate = type;
|
||||
@ -3092,9 +3100,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
|
||||
if (nla == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
rcu_read_lock();
|
||||
type = __nft_expr_type_get(family, nla);
|
||||
if (type != NULL && try_module_get(type->owner))
|
||||
if (type != NULL && try_module_get(type->owner)) {
|
||||
rcu_read_unlock();
|
||||
return type;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
lockdep_nfnl_nft_mutex_not_held();
|
||||
#ifdef CONFIG_MODULES
|
||||
@ -3875,6 +3887,9 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
const struct nft_data *data;
|
||||
int err;
|
||||
|
||||
if (!nft_set_elem_active(ext, iter->genmask))
|
||||
return 0;
|
||||
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
|
||||
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
|
||||
return 0;
|
||||
@ -3898,17 +3913,20 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
|
||||
int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
u8 genmask = nft_genmask_next(ctx->net);
|
||||
struct nft_set_iter dummy_iter = {
|
||||
.genmask = nft_genmask_next(ctx->net),
|
||||
};
|
||||
struct nft_set_elem_catchall *catchall;
|
||||
|
||||
struct nft_set_ext *ext;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
if (!nft_set_elem_active(ext, genmask))
|
||||
if (!nft_set_elem_active(ext, dummy_iter.genmask))
|
||||
continue;
|
||||
|
||||
ret = nft_setelem_validate(ctx, set, NULL, catchall->elem);
|
||||
ret = nft_setelem_validate(ctx, set, &dummy_iter, catchall->elem);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -5397,6 +5415,11 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
|
||||
const struct nft_set_iter *iter,
|
||||
struct nft_elem_priv *elem_priv)
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
|
||||
if (!nft_set_elem_active(ext, iter->genmask))
|
||||
return 0;
|
||||
|
||||
return nft_setelem_data_validate(ctx, set, elem_priv);
|
||||
}
|
||||
|
||||
@ -5441,6 +5464,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
}
|
||||
|
||||
iter.genmask = nft_genmask_next(ctx->net);
|
||||
iter.type = NFT_ITER_UPDATE;
|
||||
iter.skip = 0;
|
||||
iter.count = 0;
|
||||
iter.err = 0;
|
||||
@ -5488,6 +5512,13 @@ static int nft_mapelem_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_set_iter *iter,
|
||||
struct nft_elem_priv *elem_priv)
|
||||
{
|
||||
struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
|
||||
/* called from abort path, reverse check to undo changes. */
|
||||
if (nft_set_elem_active(ext, iter->genmask))
|
||||
return 0;
|
||||
|
||||
nft_clear(ctx->net, ext);
|
||||
nft_setelem_data_activate(ctx->net, set, elem_priv);
|
||||
|
||||
return 0;
|
||||
@ -5505,6 +5536,7 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx,
|
||||
if (!nft_set_elem_active(ext, genmask))
|
||||
continue;
|
||||
|
||||
nft_clear(ctx->net, ext);
|
||||
nft_setelem_data_activate(ctx->net, set, catchall->elem);
|
||||
break;
|
||||
}
|
||||
@ -5514,6 +5546,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
struct nft_set_iter iter = {
|
||||
.genmask = nft_genmask_next(ctx->net),
|
||||
.type = NFT_ITER_UPDATE,
|
||||
.fn = nft_mapelem_activate,
|
||||
};
|
||||
|
||||
@ -5778,6 +5811,9 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
struct nft_set_dump_args *args;
|
||||
|
||||
if (!nft_set_elem_active(ext, iter->genmask))
|
||||
return 0;
|
||||
|
||||
if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
|
||||
return 0;
|
||||
|
||||
@ -5888,6 +5924,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
args.skb = skb;
|
||||
args.reset = dump_ctx->reset;
|
||||
args.iter.genmask = nft_genmask_cur(net);
|
||||
args.iter.type = NFT_ITER_READ;
|
||||
args.iter.skip = cb->args[0];
|
||||
args.iter.count = 0;
|
||||
args.iter.err = 0;
|
||||
@ -6627,7 +6664,7 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set,
|
||||
struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
|
||||
if (nft_setelem_is_catchall(set, elem_priv)) {
|
||||
nft_set_elem_change_active(net, set, ext);
|
||||
nft_clear(net, ext);
|
||||
} else {
|
||||
set->ops->activate(net, set, elem_priv);
|
||||
}
|
||||
@ -7186,6 +7223,16 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
|
||||
}
|
||||
}
|
||||
|
||||
static int nft_setelem_active_next(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
struct nft_elem_priv *elem_priv)
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
|
||||
return nft_set_elem_active(ext, genmask);
|
||||
}
|
||||
|
||||
static void nft_setelem_data_activate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
struct nft_elem_priv *elem_priv)
|
||||
@ -7309,8 +7356,12 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
|
||||
const struct nft_set_iter *iter,
|
||||
struct nft_elem_priv *elem_priv)
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
struct nft_trans *trans;
|
||||
|
||||
if (!nft_set_elem_active(ext, iter->genmask))
|
||||
return 0;
|
||||
|
||||
trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
|
||||
sizeof(struct nft_trans_elem), GFP_ATOMIC);
|
||||
if (!trans)
|
||||
@ -7372,6 +7423,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
|
||||
{
|
||||
struct nft_set_iter iter = {
|
||||
.genmask = genmask,
|
||||
.type = NFT_ITER_UPDATE,
|
||||
.fn = nft_setelem_flush,
|
||||
};
|
||||
|
||||
@ -7607,7 +7659,7 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
|
||||
{
|
||||
const struct nft_object_type *type;
|
||||
|
||||
list_for_each_entry(type, &nf_tables_objects, list) {
|
||||
list_for_each_entry_rcu(type, &nf_tables_objects, list) {
|
||||
if (type->family != NFPROTO_UNSPEC &&
|
||||
type->family != family)
|
||||
continue;
|
||||
@ -7623,9 +7675,13 @@ nft_obj_type_get(struct net *net, u32 objtype, u8 family)
|
||||
{
|
||||
const struct nft_object_type *type;
|
||||
|
||||
rcu_read_lock();
|
||||
type = __nft_obj_type_get(objtype, family);
|
||||
if (type != NULL && try_module_get(type->owner))
|
||||
if (type != NULL && try_module_get(type->owner)) {
|
||||
rcu_read_unlock();
|
||||
return type;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
lockdep_nfnl_nft_mutex_not_held();
|
||||
#ifdef CONFIG_MODULES
|
||||
@ -10598,8 +10654,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
|
||||
case NFT_MSG_DESTROYSETELEM:
|
||||
te = (struct nft_trans_elem *)trans->data;
|
||||
|
||||
nft_setelem_data_activate(net, te->set, te->elem_priv);
|
||||
nft_setelem_activate(net, te->set, te->elem_priv);
|
||||
if (!nft_setelem_active_next(net, te->set, te->elem_priv)) {
|
||||
nft_setelem_data_activate(net, te->set, te->elem_priv);
|
||||
nft_setelem_activate(net, te->set, te->elem_priv);
|
||||
}
|
||||
if (!nft_setelem_is_catchall(te->set, te->elem_priv))
|
||||
te->set->ndeact--;
|
||||
|
||||
@ -10787,6 +10845,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
|
||||
{
|
||||
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
|
||||
|
||||
if (!nft_set_elem_active(ext, iter->genmask))
|
||||
return 0;
|
||||
|
||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
|
||||
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
|
||||
return 0;
|
||||
@ -10871,6 +10932,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
|
||||
continue;
|
||||
|
||||
iter.genmask = nft_genmask_next(ctx->net);
|
||||
iter.type = NFT_ITER_UPDATE;
|
||||
iter.skip = 0;
|
||||
iter.count = 0;
|
||||
iter.err = 0;
|
||||
|
@ -216,6 +216,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
|
||||
return 0;
|
||||
|
||||
iter.genmask = nft_genmask_next(ctx->net);
|
||||
iter.type = NFT_ITER_UPDATE;
|
||||
iter.skip = 0;
|
||||
iter.count = 0;
|
||||
iter.err = 0;
|
||||
|
@ -172,7 +172,7 @@ static void nft_bitmap_activate(const struct net *net,
|
||||
nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
|
||||
/* Enter 11 state. */
|
||||
priv->bitmap[idx] |= (genmask << off);
|
||||
nft_set_elem_change_active(net, set, &be->ext);
|
||||
nft_clear(net, &be->ext);
|
||||
}
|
||||
|
||||
static void nft_bitmap_flush(const struct net *net,
|
||||
@ -222,8 +222,6 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
|
||||
list_for_each_entry_rcu(be, &priv->list, head) {
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&be->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
iter->err = iter->fn(ctx, set, iter, &be->priv);
|
||||
|
||||
|
@ -199,7 +199,7 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
|
||||
{
|
||||
struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv);
|
||||
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
nft_clear(net, &he->ext);
|
||||
}
|
||||
|
||||
static void nft_rhash_flush(const struct net *net,
|
||||
@ -286,8 +286,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&he->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
iter->err = iter->fn(ctx, set, iter, &he->priv);
|
||||
if (iter->err < 0)
|
||||
@ -599,7 +597,7 @@ static void nft_hash_activate(const struct net *net, const struct nft_set *set,
|
||||
{
|
||||
struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv);
|
||||
|
||||
nft_set_elem_change_active(net, set, &he->ext);
|
||||
nft_clear(net, &he->ext);
|
||||
}
|
||||
|
||||
static void nft_hash_flush(const struct net *net,
|
||||
@ -652,8 +650,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
hlist_for_each_entry_rcu(he, &priv->table[i], node) {
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&he->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
iter->err = iter->fn(ctx, set, iter, &he->priv);
|
||||
if (iter->err < 0)
|
||||
|
@ -1847,7 +1847,7 @@ static void nft_pipapo_activate(const struct net *net,
|
||||
{
|
||||
struct nft_pipapo_elem *e = nft_elem_priv_cast(elem_priv);
|
||||
|
||||
nft_set_elem_change_active(net, set, &e->ext);
|
||||
nft_clear(net, &e->ext);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2077,6 +2077,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
|
||||
rules_fx = rules_f0;
|
||||
|
||||
nft_pipapo_for_each_field(f, i, m) {
|
||||
bool last = i == m->field_count - 1;
|
||||
|
||||
if (!pipapo_match_field(f, start, rules_fx,
|
||||
match_start, match_end))
|
||||
break;
|
||||
@ -2089,16 +2091,18 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
|
||||
|
||||
match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
|
||||
match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
|
||||
}
|
||||
|
||||
if (i == m->field_count) {
|
||||
priv->dirty = true;
|
||||
pipapo_drop(m, rulemap);
|
||||
return;
|
||||
if (last && f->mt[rulemap[i].to].e == e) {
|
||||
priv->dirty = true;
|
||||
pipapo_drop(m, rulemap);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
first_rule += rules_f0;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(1); /* elem_priv not found */
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2115,13 +2119,15 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_iter *iter)
|
||||
{
|
||||
struct nft_pipapo *priv = nft_set_priv(set);
|
||||
struct net *net = read_pnet(&set->net);
|
||||
const struct nft_pipapo_match *m;
|
||||
const struct nft_pipapo_field *f;
|
||||
unsigned int i, r;
|
||||
|
||||
WARN_ON_ONCE(iter->type != NFT_ITER_READ &&
|
||||
iter->type != NFT_ITER_UPDATE);
|
||||
|
||||
rcu_read_lock();
|
||||
if (iter->genmask == nft_genmask_cur(net))
|
||||
if (iter->type == NFT_ITER_READ)
|
||||
m = rcu_dereference(priv->match);
|
||||
else
|
||||
m = priv->clone;
|
||||
@ -2143,9 +2149,6 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
|
||||
e = f->mt[r].e;
|
||||
|
||||
if (!nft_set_elem_active(&e->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
iter->err = iter->fn(ctx, set, iter, &e->priv);
|
||||
if (iter->err < 0)
|
||||
goto out;
|
||||
|
@ -532,7 +532,7 @@ static void nft_rbtree_activate(const struct net *net,
|
||||
{
|
||||
struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
|
||||
|
||||
nft_set_elem_change_active(net, set, &rbe->ext);
|
||||
nft_clear(net, &rbe->ext);
|
||||
}
|
||||
|
||||
static void nft_rbtree_flush(const struct net *net,
|
||||
@ -600,8 +600,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
|
||||
|
||||
if (iter->count < iter->skip)
|
||||
goto cont;
|
||||
if (!nft_set_elem_active(&rbe->ext, iter->genmask))
|
||||
goto cont;
|
||||
|
||||
iter->err = iter->fn(ctx, set, iter, &rbe->priv);
|
||||
if (iter->err < 0) {
|
||||
|
@ -974,6 +974,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
||||
sch->enqueue = ops->enqueue;
|
||||
sch->dequeue = ops->dequeue;
|
||||
sch->dev_queue = dev_queue;
|
||||
sch->owner = -1;
|
||||
netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
|
||||
refcount_set(&sch->refcnt, 1);
|
||||
|
||||
|
@ -2663,7 +2663,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
|
||||
WRITE_ONCE(u->oob_skb, NULL);
|
||||
consume_skb(skb);
|
||||
}
|
||||
} else if (!(flags & MSG_PEEK)) {
|
||||
} else if (flags & MSG_PEEK) {
|
||||
skb = NULL;
|
||||
} else {
|
||||
skb_unlink(skb, &sk->sk_receive_queue);
|
||||
WRITE_ONCE(u->oob_skb, NULL);
|
||||
if (!WARN_ON_ONCE(skb_unref(skb)))
|
||||
@ -2741,18 +2743,16 @@ redo:
|
||||
last = skb = skb_peek(&sk->sk_receive_queue);
|
||||
last_len = last ? last->len : 0;
|
||||
|
||||
again:
|
||||
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
|
||||
if (skb) {
|
||||
skb = manage_oob(skb, sk, flags, copied);
|
||||
if (!skb) {
|
||||
if (!skb && copied) {
|
||||
unix_state_unlock(sk);
|
||||
if (copied)
|
||||
break;
|
||||
goto redo;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
again:
|
||||
if (skb == NULL) {
|
||||
if (copied >= target)
|
||||
goto unlock;
|
||||
|
@ -292,15 +292,17 @@ void ksft_test_result_code(int exit_code, const char *test_name,
|
||||
}
|
||||
|
||||
/* Docs seem to call for double space if directive is absent */
|
||||
if (!directive[0] && msg[0])
|
||||
if (!directive[0] && msg)
|
||||
directive = " # ";
|
||||
|
||||
va_start(args, msg);
|
||||
printf("%s %u %s%s", tap_code, ksft_test_num(), test_name, directive);
|
||||
errno = saved_errno;
|
||||
vprintf(msg, args);
|
||||
if (msg) {
|
||||
va_start(args, msg);
|
||||
vprintf(msg, args);
|
||||
va_end(args);
|
||||
}
|
||||
printf("\n");
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static inline __noreturn int ksft_exit_pass(void)
|
||||
|
@ -1205,7 +1205,7 @@ void __run_test(struct __fixture_metadata *f,
|
||||
diagnostic = "unknown";
|
||||
|
||||
ksft_test_result_code(t->exit_code, test_name,
|
||||
diagnostic ? "%s" : "", diagnostic);
|
||||
diagnostic ? "%s" : NULL, diagnostic);
|
||||
}
|
||||
|
||||
static int test_harness_run(int argc, char **argv)
|
||||
|
@ -86,7 +86,7 @@ static void netstat_read_type(FILE *fnetstat, struct netstat **dest, char *line)
|
||||
|
||||
pos = strchr(line, ' ') + 1;
|
||||
|
||||
if (fscanf(fnetstat, type->header_name) == EOF)
|
||||
if (fscanf(fnetstat, "%[^ :]", type->header_name) == EOF)
|
||||
test_error("fscanf(%s)", type->header_name);
|
||||
if (fread(&tmp, 1, 1, fnetstat) != 1 || tmp != ':')
|
||||
test_error("Unexpected netstat format (%c)", tmp);
|
||||
|
@ -17,37 +17,37 @@ static pthread_mutex_t ksft_print_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
void __test_msg(const char *buf)
|
||||
{
|
||||
pthread_mutex_lock(&ksft_print_lock);
|
||||
ksft_print_msg(buf);
|
||||
ksft_print_msg("%s", buf);
|
||||
pthread_mutex_unlock(&ksft_print_lock);
|
||||
}
|
||||
void __test_ok(const char *buf)
|
||||
{
|
||||
pthread_mutex_lock(&ksft_print_lock);
|
||||
ksft_test_result_pass(buf);
|
||||
ksft_test_result_pass("%s", buf);
|
||||
pthread_mutex_unlock(&ksft_print_lock);
|
||||
}
|
||||
void __test_fail(const char *buf)
|
||||
{
|
||||
pthread_mutex_lock(&ksft_print_lock);
|
||||
ksft_test_result_fail(buf);
|
||||
ksft_test_result_fail("%s", buf);
|
||||
pthread_mutex_unlock(&ksft_print_lock);
|
||||
}
|
||||
void __test_xfail(const char *buf)
|
||||
{
|
||||
pthread_mutex_lock(&ksft_print_lock);
|
||||
ksft_test_result_xfail(buf);
|
||||
ksft_test_result_xfail("%s", buf);
|
||||
pthread_mutex_unlock(&ksft_print_lock);
|
||||
}
|
||||
void __test_error(const char *buf)
|
||||
{
|
||||
pthread_mutex_lock(&ksft_print_lock);
|
||||
ksft_test_result_error(buf);
|
||||
ksft_test_result_error("%s", buf);
|
||||
pthread_mutex_unlock(&ksft_print_lock);
|
||||
}
|
||||
void __test_skip(const char *buf)
|
||||
{
|
||||
pthread_mutex_lock(&ksft_print_lock);
|
||||
ksft_test_result_skip(buf);
|
||||
ksft_test_result_skip("%s", buf);
|
||||
pthread_mutex_unlock(&ksft_print_lock);
|
||||
}
|
||||
|
||||
|
@ -256,8 +256,6 @@ static int test_wait_fds(int sk[], size_t nr, bool is_writable[],
|
||||
|
||||
static void test_client_active_rst(unsigned int port)
|
||||
{
|
||||
/* one in queue, another accept()ed */
|
||||
unsigned int wait_for = backlog + 2;
|
||||
int i, sk[3], err;
|
||||
bool is_writable[ARRAY_SIZE(sk)] = {false};
|
||||
unsigned int last = ARRAY_SIZE(sk) - 1;
|
||||
@ -275,16 +273,20 @@ static void test_client_active_rst(unsigned int port)
|
||||
for (i = 0; i < last; i++) {
|
||||
err = _test_connect_socket(sk[i], this_ip_dest, port,
|
||||
(i == 0) ? TEST_TIMEOUT_SEC : -1);
|
||||
|
||||
if (err < 0)
|
||||
test_error("failed to connect()");
|
||||
}
|
||||
|
||||
synchronize_threads(); /* 2: connection accept()ed, another queued */
|
||||
err = test_wait_fds(sk, last, is_writable, wait_for, TEST_TIMEOUT_SEC);
|
||||
synchronize_threads(); /* 2: two connections: one accept()ed, another queued */
|
||||
err = test_wait_fds(sk, last, is_writable, last, TEST_TIMEOUT_SEC);
|
||||
if (err < 0)
|
||||
test_error("test_wait_fds(): %d", err);
|
||||
|
||||
/* async connect() with third sk to get into request_sock_queue */
|
||||
err = _test_connect_socket(sk[last], this_ip_dest, port, -1);
|
||||
if (err < 0)
|
||||
test_error("failed to connect()");
|
||||
|
||||
synchronize_threads(); /* 3: close listen socket */
|
||||
if (test_client_verify(sk[0], packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC))
|
||||
test_fail("Failed to send data on connected socket");
|
||||
@ -292,13 +294,14 @@ static void test_client_active_rst(unsigned int port)
|
||||
test_ok("Verified established tcp connection");
|
||||
|
||||
synchronize_threads(); /* 4: finishing up */
|
||||
err = _test_connect_socket(sk[last], this_ip_dest, port, -1);
|
||||
if (err < 0)
|
||||
test_error("failed to connect()");
|
||||
|
||||
synchronize_threads(); /* 5: closed active sk */
|
||||
err = test_wait_fds(sk, ARRAY_SIZE(sk), NULL,
|
||||
wait_for, TEST_TIMEOUT_SEC);
|
||||
/*
|
||||
* Wait for 2 connections: one accepted, another in the accept queue,
|
||||
* the one in request_sock_queue won't get fully established, so
|
||||
* doesn't receive an active RST, see inet_csk_listen_stop().
|
||||
*/
|
||||
err = test_wait_fds(sk, last, NULL, last, TEST_TIMEOUT_SEC);
|
||||
if (err < 0)
|
||||
test_error("select(): %d", err);
|
||||
|
||||
|
@ -21,7 +21,7 @@ static void make_listen(int sk)
|
||||
static void test_vefify_ao_info(int sk, struct tcp_ao_info_opt *info,
|
||||
const char *tst)
|
||||
{
|
||||
struct tcp_ao_info_opt tmp;
|
||||
struct tcp_ao_info_opt tmp = {};
|
||||
socklen_t len = sizeof(tmp);
|
||||
|
||||
if (getsockopt(sk, IPPROTO_TCP, TCP_AO_INFO, &tmp, &len))
|
||||
|
@ -34,7 +34,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef UDP_MAX_SEGMENTS
|
||||
#define UDP_MAX_SEGMENTS (1 << 6UL)
|
||||
#define UDP_MAX_SEGMENTS (1 << 7UL)
|
||||
#endif
|
||||
|
||||
#define CONST_MTU_TEST 1500
|
||||
|
Loading…
Reference in New Issue
Block a user