2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Encap offset calculation is incorrect in esp6, from Sabrina Dubroca.

 2) Better parameter validation in pfkey_dump(), from Mark Salyzyn.

 3) Fix several clang issues on powerpc in selftests, from Tanner Love.

 4) cmsghdr_from_user_compat_to_kern() uses the wrong length, from Al
    Viro.

 5) Out of bounds access in mlx5e driver, from Raed Salem.

 6) Fix transfer buffer memleak in lan78xx, from Johan Havold.

 7) RCU fixups in rhashtable, from Herbert Xu.

 8) Fix ipv6 nexthop refcnt leak, from Xiyu Yang.

 9) vxlan FDB dump must be done under RCU, from Ido Schimmel.

10) Fix use after free in mlxsw, from Ido Schimmel.

11) Fix map leak in HASH_OF_MAPS bpf code, from Andrii Nakryiko.

12) Fix bug in mac80211 Tx ack status reporting, from Vasanthakumar
    Thiagarajan.

13) Fix memory leaks in IPV6_ADDRFORM code, from Cong Wang.

14) Fix bpf program reference count leaks in mlx5 during
    mlx5e_alloc_rq(), from Xin Xiong.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (86 commits)
  vxlan: fix memleak of fdb
  rds: Prevent kernel-infoleak in rds_notify_queue_get()
  net/sched: The error lable position is corrected in ct_init_module
  net/mlx5e: fix bpf_prog reference count leaks in mlx5e_alloc_rq
  net/mlx5e: E-Switch, Specify flow_source for rule with no in_port
  net/mlx5e: E-Switch, Add misc bit when misc fields changed for mirroring
  net/mlx5e: CT: Support restore ipv6 tunnel
  net: gemini: Fix missing clk_disable_unprepare() in error path of gemini_ethernet_port_probe()
  ionic: unlock queue mutex in error path
  atm: fix atm_dev refcnt leaks in atmtcp_remove_persistent
  net: ethernet: mtk_eth_soc: fix MTU warnings
  net: nixge: fix potential memory leak in nixge_probe()
  devlink: ignore -EOPNOTSUPP errors on dumpit
  rxrpc: Fix race between recvmsg and sendmsg on immediate call failure
  MAINTAINERS: Replace Thor Thayer as Altera Triple Speed Ethernet maintainer
  selftests/bpf: fix netdevsim trap_flow_action_cookie read
  ipv6: fix memory leaks on IPV6_ADDRFORM path
  net/bpfilter: Initialize pos in __bpfilter_process_sockopt
  igb: reinit_locked() should be called with rtnl_lock
  e1000e: continue to init PHY even when failed to disable ULP
  ...
This commit is contained in:
Linus Torvalds 2020-08-01 16:47:24 -07:00
commit ac3a0c8472
81 changed files with 783 additions and 452 deletions

View File

@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to
leverage the UDP based load balancing capability of different networks.
MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
The Bareudp tunnel module provides a generic L3 encapsulation tunnelling
support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside
a UDP tunnel.
The Bareudp tunnel module provides a generic L3 encapsulation support for
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
Special Handling
----------------

View File

@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table:
- Contains packet traps for packets that should be locally delivered after
routing, but do not match more specific packet traps (e.g.,
``ipv4_bgp``)
* - ``external_delivery``
- Contains packet traps for packets that should be routed through an
external interface (e.g., management interface) that does not belong to
the same device (e.g., switch ASIC) as the ingress interface
* - ``ipv6``
- Contains packet traps for various IPv6 control packets (e.g., Router
Advertisements)

View File

@ -782,7 +782,7 @@ F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
F: include/linux/mfd/altera-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Thor Thayer <thor.thayer@linux.intel.com>
M: Joyce Ooi <joyce.ooi@intel.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/altera/

View File

@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
if (!dev_data->persist) return 0;
if (!dev_data->persist) {
atm_dev_put(dev);
return 0;
}
dev_data->persist = 0;
if (PRIV(dev)->vcc) return 0;
if (PRIV(dev)->vcc) {
atm_dev_put(dev);
return 0;
}
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);

View File

@ -407,19 +407,34 @@ free_dst:
return err;
}
static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
{
if (bareudp->ethertype == proto)
return true;
if (!bareudp->multi_proto_mode)
return false;
if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
proto == htons(ETH_P_MPLS_MC))
return true;
if (bareudp->ethertype == htons(ETH_P_IP) &&
proto == htons(ETH_P_IPV6))
return true;
return false;
}
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
struct ip_tunnel_info *info = NULL;
int err;
if (skb->protocol != bareudp->ethertype) {
if (!bareudp->multi_proto_mode ||
(skb->protocol != htons(ETH_P_MPLS_MC) &&
skb->protocol != htons(ETH_P_IPV6))) {
err = -EINVAL;
goto tx_error;
}
if (!bareudp_proto_valid(bareudp, skb->protocol)) {
err = -EINVAL;
goto tx_error;
}
info = skb_tunnel_info(skb);

View File

@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
clk_disable_unprepare(port->pclk);
return PTR_ERR(port->reset);
}
reset_control_reset(port->reset);
@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED,
port_names[port->id],
port);
if (ret)
if (ret) {
clk_disable_unprepare(port->pclk);
return ret;
}
ret = register_netdev(netdev);
if (!ret) {

View File

@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast;
dma_addr_t dma;
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
int ret;
ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret < 0))
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else if (type == DESC_TYPE_FRAGLIST_SKB) {
if (type == DESC_TYPE_FRAGLIST_SKB ||
type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
if (unlikely(ret < 0))
goto fill_err;
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
goto fill_err;
@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return;
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
netif_carrier_on(netdev);
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {

View File

@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules
*/
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret;
}
/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list)
{
@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_fd_rule_tuples new_tuples;
struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
u16 tmp_queue_id;
@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
memset(&new_tuples, 0, sizeof(new_tuples));
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
spin_lock_bh(&hdev->fd_rule_lock);
/* when there is already fd rule existed add by user,
* arfs should not work
*/
spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif
}
/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
if (!enable)
if (!enable) {
spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear);
else
spin_unlock_bh(&hdev->fd_rule_lock);
} else {
hclge_restore_fd_entries(handle);
}
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
/* When device is resetting or reset failed, firmware is unable to
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}

View File

@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
/* When device is resetting or reset failed, firmware is unable to
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
int ret;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock();
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
dev_warn(&hdev->pdev->dev,
"is resetting when updating port based vlan info\n");
rtnl_unlock();
return;
}
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret) {
rtnl_unlock();
return;
}
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size);
hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (!ret) {
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = state;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
}
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}

View File

@ -3206,7 +3206,7 @@ req_rx_irq_failed:
req_tx_irq_failed:
for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
irq_dispose_mapping(adapter->tx_scrq[j]->irq);
}
release_sub_crqs(adapter, 1);
return rc;

View File

@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
if (ret_val) {
if (ret_val)
e_warn("Failed to disable ULP\n");
goto out;
}
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {

View File

@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
rtnl_lock();
/* If we're already down or resetting, just bail */
if (test_bit(__IGB_DOWN, &adapter->state) ||
test_bit(__IGB_RESETTING, &adapter->state)) {
rtnl_unlock();
return;
}
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
rtnl_unlock();
}
/**

View File

@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
if (!netif_running(pf->netdev))
return;
rtnl_lock();
otx2_stop(pf->netdev);
pf->reset_count++;
otx2_open(pf->netdev);
netif_trans_update(pf->netdev);
rtnl_unlock();
}
static const struct net_device_ops otx2_netdev_ops = {
@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);

View File

@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
cancel_work_sync(&vf->reset_task);
unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);

View File

@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
return 0;
}
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
phy_interface_t interface, int speed)
{
u32 val;
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
mtk_w32(eth, TRGMII_MODE, INTF_MODE);
val = 500000000;
ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
return;
}
val = (speed == SPEED_1000) ?
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
mtk_w32(eth, val, INTF_MODE);
@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
if (state->interface !=
PHY_INTERFACE_MODE_TRGMII)
mtk_gmac0_rgmii_adjust(mac->hw,
state->speed);
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface,
state->speed);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
return 0;
free_netdev:

View File

@ -4356,12 +4356,14 @@ end:
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {

View File

@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
return false;
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
}

View File

@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
}
}
tun_dst = tun_rx_dst(enc_opts.key.len);
if (!tun_dst) {
WARN_ON_ONCE(true);
if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, TUNNEL_KEY,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
} else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, 0, TUNNEL_KEY,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
} else {
netdev_dbg(priv->netdev,
"Couldn't restore tunnel, unsupported addr_type: %d\n",
key.enc_control.addr_type);
return false;
}
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
0, /* label */
key.enc_tp.src, key.enc_tp.dst,
key32_to_tunnel_id(key.enc_key_id.keyid),
TUNNEL_KEY);
if (!tun_dst) {
netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
return false;
}
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,

View File

@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
}
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
return 0;
}

View File

@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
return 0;
}

View File

@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
return 0;
}

View File

@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
return err;
goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
return err;
goto err_rq_wq_destroy;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
enum mlx5_port_status state)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int vport_admin_state;
mlx5_set_port_admin_status(mdev, state);
if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
return;
if (state == MLX5_PORT_UP)
vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
else
vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@ -3101,7 +3120,7 @@ int mlx5e_open(struct net_device *netdev)
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
if (!err)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
return err;
@ -3135,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev)
return -ENODEV;
mutex_lock(&priv->state_lock);
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@ -5182,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
@ -5390,6 +5409,8 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
}

View File

@ -936,6 +936,7 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);

View File

@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}

View File

@ -1608,7 +1608,7 @@ abort:
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
esw_destroy_tsar(esw);
return err;
}
@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_disable(esw);
esw_destroy_tsar(esw);
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
esw_destroy_tsar(esw);
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
int other_vport = 1;
int err = 0;
if (!ESW_ALLOWED(esw))
@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vport == MLX5_VPORT_UPLINK) {
opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
other_vport = 0;
vport = 0;
}
mutex_lock(&esw->state_lock);
err = mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport, 1, link_state);
err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
mlx5_core_warn(esw->dev,
"Failed to set vport %d link state, err = %d",
vport, err);
mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
vport, opmod, err);
goto unlock;
}
@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vlan > 4095 || qos > 7)
@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u8 set_flags = 0;
int err;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;

View File

@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
return ERR_PTR(-EOPNOTSUPP);

View File

@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
static void
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
attr->in_rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
struct mlx5_flow_handle *
@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;

View File

@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
/* If reverse if false then return the first flow table in next priority of
/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_namespace *root_ns = &root->ns;
struct fs_prio *iter_prio;
struct fs_prio *prio;
fs_get_obj(prio, ns->node.parent);
list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
if (iter_prio == prio &&
!list_is_last(&prio->node.children, &iter_prio->node.list))
return list_next_entry(iter_prio, node.list);
}
return NULL;
}
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
struct mlx5_flow_act *flow_act)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct fs_prio *prio;
bool next_ns;
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
prio = find_fwd_ns_prio(root, ft->ns);
else
fs_get_obj(prio, ft->node.parent);
next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
return (prio) ? find_next_chained_ft(prio) : NULL;
return find_next_chained_ft(prio);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,

View File

@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
if (on) {
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
pin_mode = MLX5_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE;
} else {
pin = rq->extts.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
if (on) {
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
rq->perout.index);
if (pin < 0)
return -EBUSY;
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
rq->perout.index);
if (pin < 0)
return -EBUSY;
if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
pin = rq->perout.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
return 0;
}
enum {
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
};
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
switch (func) {
case PTP_PF_NONE:
return 0;
case PTP_PF_EXTTS:
return !(clock->pps_info.pin_caps[pin] &
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
case PTP_PF_PEROUT:
return !(clock->pps_info.pin_caps[pin] &
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.verify = NULL,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
u32 *mtpps, u32 mtpps_size)
{
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
MLX5_SET(mtpps_reg, in, pin, pin);
return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
{
struct mlx5_core_dev *mdev = clock->mdev;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
if (err || !MLX5_GET(mtpps_reg, out, enable))
return PTP_PF_NONE;
mode = MLX5_GET(mtpps_reg, out, pin_mode);
if (mode == MLX5_PIN_MODE_IN)
return PTP_PF_EXTTS;
else if (mode == MLX5_PIN_MODE_OUT)
return PTP_PF_PEROUT;
return PTP_PF_NONE;
}
static int mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
clock->ptp_info.pin_config[i].chan = i;
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
clock->ptp_info.pin_config[i].chan = 0;
}
return 0;

View File

@ -1814,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
bulk_list, cb, cb_priv, tid);
if (err) {
kfree(trans);
kfree_rcu(trans, rcu);
return err;
}
return 0;
@ -2051,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
break;
}
}
rcu_read_unlock();
if (!found)
if (!found) {
rcu_read_unlock();
goto drop;
}
rxl->func(skb, local_port, rxl_item->priv);
rcu_read_unlock();
return;
drop:

View File

@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,

View File

@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
/* Packets with link-local destination IP arriving to the router
* are trapped to the CPU, so no need to program specific routes
* for them. Only allow prefix routes (usually one fe80::/64) so
* that packets are trapped for the right reason.
*/
if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
(rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
return true;
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
goto err_register_inetaddr_notifier;
router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
err = register_inet6addr_notifier(&router->inet6addr_nb);
if (err)
goto err_register_inet6addr_notifier;
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_init;
mlxsw_sp->router->netevent_nb.notifier_call =
mlxsw_sp_router_netevent_event;
err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
if (err)
goto err_register_netevent_notifier;
err = mlxsw_sp_mp_hash_init(mlxsw_sp);
if (err)
goto err_mp_hash_init;
@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
goto err_register_inetaddr_notifier;
router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
err = register_inet6addr_notifier(&router->inet6addr_nb);
if (err)
goto err_register_inet6addr_notifier;
mlxsw_sp->router->netevent_nb.notifier_call =
mlxsw_sp_router_netevent_event;
err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
if (err)
goto err_register_netevent_notifier;
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
err_dscp_init:
err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
unregister_inet6addr_notifier(&router->inet6addr_nb);
err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
@ -8174,10 +8170,6 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
unregister_inet6addr_notifier(&router->inet6addr_nb);
err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}

View File

@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
{
.policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
},
{
.policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
},
};
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@ -421,6 +424,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
.priority = 2,
},
{
.group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
.priority = 1,
},
{
.group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
.trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
TRAP),
.listeners_arr = {
MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
false),
MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
TRAP_TO_CPU, false),
},
},
{

View File

@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
if (unlikely(!skb_match))
continue;
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb_match, &shhwtstamps);
dev_kfree_skb_any(skb_match);
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);

View File

@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
err = nixge_of_get_resources(pdev);
if (err)
return err;
goto free_netdev;
__nixge_hw_set_mac_address(ndev);
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
if (priv->tx_irq < 0) {
netdev_err(ndev, "could not find 'tx' irq");
return priv->tx_irq;
err = priv->tx_irq;
goto free_netdev;
}
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
if (priv->rx_irq < 0) {
netdev_err(ndev, "could not find 'rx' irq");
return priv->rx_irq;
err = priv->rx_irq;
goto free_netdev;
}
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;

View File

@ -2001,7 +2001,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
if (err)
return err;
goto reset_out;
}
if (cb)
@ -2011,6 +2011,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
err = ionic_open(lif->netdev);
netif_device_attach(lif->netdev);
}
reset_out:
mutex_unlock(&lif->queue_lock);
return err;

View File

@ -2261,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
minor = get_free_serial_index();
if (minor < 0)
goto exit;
goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
tty_drv, minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
if (IS_ERR(serial->parent->dev))
goto exit2;
/* fill in specific data for later use */
serial->minor = minor;
@ -2311,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
return 0;
exit:
hso_serial_tty_unregister(serial);
exit2:
hso_serial_common_free(serial);
return -1;
}

View File

@ -377,10 +377,6 @@ struct lan78xx_net {
struct tasklet_struct bh;
struct delayed_work wq;
struct usb_host_endpoint *ep_blkin;
struct usb_host_endpoint *ep_blkout;
struct usb_host_endpoint *ep_intr;
int msg_enable;
struct urb *urb_intr;
@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
static int
lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
{
int tmp;
struct usb_host_interface *alt = NULL;
struct usb_host_endpoint *in = NULL, *out = NULL;
struct usb_host_endpoint *status = NULL;
for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
unsigned ep;
in = NULL;
out = NULL;
status = NULL;
alt = intf->altsetting + tmp;
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
int intr = 0;
e = alt->endpoint + ep;
switch (e->desc.bmAttributes) {
case USB_ENDPOINT_XFER_INT:
if (!usb_endpoint_dir_in(&e->desc))
continue;
intr = 1;
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_BULK:
break;
default:
continue;
}
if (usb_endpoint_dir_in(&e->desc)) {
if (!intr && !in)
in = e;
else if (intr && !status)
status = e;
} else {
if (!out)
out = e;
}
}
if (in && out)
break;
}
if (!alt || !in || !out)
return -EINVAL;
dev->pipe_in = usb_rcvbulkpipe(dev->udev,
in->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
dev->pipe_out = usb_sndbulkpipe(dev->udev,
out->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
dev->ep_intr = status;
return 0;
}
static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
{
struct lan78xx_priv *pdata = NULL;
int ret;
int i;
ret = lan78xx_get_endpoints(dev, intf);
if (ret) {
netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
ret);
return ret;
}
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = (struct lan78xx_priv *)(dev->data[0]);
@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
static int lan78xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
struct lan78xx_net *dev;
struct net_device *netdev;
struct usb_device *udev;
@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf,
mutex_init(&dev->stats.access_lock);
if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
ret = -ENODEV;
goto out2;
}
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
ret = -ENODEV;
goto out2;
}
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
ret = -ENODEV;
goto out2;
}
ep_intr = &intf->cur_altsetting->endpoint[2];
if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
ret = -ENODEV;
goto out2;
}
dev->pipe_intr = usb_rcvintpipe(dev->udev,
usb_endpoint_num(&ep_intr->desc));
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
dev->pipe_intr = usb_rcvintpipe(dev->udev,
dev->ep_intr->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK);
period = dev->ep_intr->desc.bInterval;
period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {
@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf,
usb_fill_int_urb(dev->urb_intr, dev->udev,
dev->pipe_intr, buf, maxp,
intr_complete, dev, period);
dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
}

View File

@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
rcu_read_lock();
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, NULL);
if (err < 0)
if (err < 0) {
rcu_read_unlock();
goto out;
}
skip_nh:
*idx += 1;
continue;
@ -1403,12 +1406,15 @@ skip_nh:
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
if (err < 0)
if (err < 0) {
rcu_read_unlock();
goto out;
}
skip:
*idx += 1;
}
}
rcu_read_unlock();
}
out:
return err;
@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f, true, true);
if (is_zero_ether_addr(f->eth_addr) &&
f->vni == vxlan->cfg.vni)
continue;
vxlan_fdb_destroy(vxlan, f, true, true);
}
spin_unlock_bh(&vxlan->hash_lock[h]);
}

View File

@ -4381,6 +4381,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
};
struct mlx5_ifc_arm_monitor_counter_in_bits {

View File

@ -84,7 +84,7 @@ struct bucket_table {
struct lockdep_map dep_map;
struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/*
@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash);
struct rhash_lock_head __rcu **rht_bucket_nested(
const struct bucket_table *tbl, unsigned int hash);
struct rhash_lock_head __rcu **__rht_bucket_nested(
const struct bucket_table *tbl, unsigned int hash);
struct rhash_lock_head __rcu **rht_bucket_nested_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
static inline struct rhash_lock_head *const *rht_bucket(
static inline struct rhash_lock_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_lock_head **rht_bucket_var(
static inline struct rhash_lock_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_lock_head **rht_bucket_insert(
static inline struct rhash_lock_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert(
*/
static inline void rht_lock(struct bucket_table *tbl,
struct rhash_lock_head **bkt)
struct rhash_lock_head __rcu **bkt)
{
local_bh_disable();
bit_spin_lock(0, (unsigned long *)bkt);
@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl,
}
static inline void rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head **bucket,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
{
local_bh_disable();
@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
}
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head **bkt)
struct rhash_lock_head __rcu **bkt)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
local_bh_enable();
}
static inline struct rhash_head __rcu *__rht_ptr(
struct rhash_lock_head *const *bkt)
static inline struct rhash_head *__rht_ptr(
struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
{
return (struct rhash_head __rcu *)
((unsigned long)*bkt & ~BIT(0) ?:
return (struct rhash_head *)
((unsigned long)p & ~BIT(0) ?:
(unsigned long)RHT_NULLS_MARKER(bkt));
}
@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr(
* access is guaranteed, such as when destroying the table.
*/
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head *const *bkt)
struct rhash_lock_head __rcu *const *bkt)
{
struct rhash_head __rcu *p = __rht_ptr(bkt);
return rcu_dereference(p);
return __rht_ptr(rcu_dereference(*bkt), bkt);
}
static inline struct rhash_head *rht_ptr(
struct rhash_lock_head *const *bkt,
struct rhash_lock_head __rcu *const *bkt,
struct bucket_table *tbl,
unsigned int hash)
{
return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head *const *bkt)
struct rhash_lock_head __rcu *const *bkt)
{
return rcu_dereference_protected(__rht_ptr(bkt), 1);
return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
}
static inline void rht_assign_locked(struct rhash_lock_head **bkt,
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
if (rht_is_a_nulls(obj))
obj = NULL;
rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head **bkt,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*p, obj);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
local_bh_enable();
@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
struct rhash_lock_head *const *bkt;
struct rhash_lock_head __rcu *const *bkt;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast(
.ht = ht,
.key = key,
};
struct rhash_lock_head **bkt;
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
{
struct rhash_lock_head **bkt;
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;
@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
{
struct rhash_lock_head **bkt;
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;

View File

@ -274,6 +274,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
void __ipv6_sock_ac_close(struct sock *sk);
void ipv6_sock_ac_close(struct sock *sk);
int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);

View File

@ -718,6 +718,7 @@ enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_PIM,
DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB,
DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY,
DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY,
DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6,
DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT,
DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
@ -915,6 +916,8 @@ enum devlink_trap_group_generic_id {
"uc_loopback"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \
"local_delivery"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \
"external_delivery"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \
"ipv6"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \

View File

@ -941,7 +941,7 @@ struct xfrm_dst {
static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
if (dst->xfrm) {
if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
return xdst->path;
@ -953,7 +953,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
if (dst->xfrm) {
if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
return xdst->child;
}
@ -1630,13 +1630,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
const struct xfrm_mark *mark,
u32 if_id, u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
int dir, u32 id, int delete, int *err);
struct xfrm_policy *xfrm_policy_byid(struct net *net,
const struct xfrm_mark *mark, u32 if_id,
u8 type, int dir, u32 id, int delete,
int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);

View File

@ -4058,6 +4058,11 @@ static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
const char *tname, *sym;
u32 btf_id, i;
if (!btf_vmlinux) {
bpf_log(log, "btf_vmlinux doesn't exist\n");
return -EINVAL;
}
if (IS_ERR(btf_vmlinux)) {
bpf_log(log, "btf_vmlinux is malformed\n");
return -EINVAL;

View File

@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
htab_elem_free(htab, l);
}
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
{
struct bpf_map *map = &htab->map;
void *ptr;
if (map->ops->map_fd_put_ptr) {
void *ptr = fd_htab_map_get_ptr(map, l);
ptr = fd_htab_map_get_ptr(map, l);
map->ops->map_fd_put_ptr(ptr);
}
}
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{
htab_put_fd_value(htab, l);
if (htab_is_prealloc(htab)) {
__pcpu_freelist_push(&htab->freelist, &l->fnode);
@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
*/
pl_new = this_cpu_ptr(htab->extra_elems);
l_new = *pl_new;
htab_put_fd_value(htab, old_elem);
*pl_new = old_elem;
} else {
struct pcpu_freelist_node *l;

View File

@ -31,7 +31,7 @@
union nested_table {
union nested_table __rcu *table;
struct rhash_lock_head *bucket;
struct rhash_lock_head __rcu *bucket;
};
static u32 head_hashfn(struct rhashtable *ht,
@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
}
static int rhashtable_rehash_one(struct rhashtable *ht,
struct rhash_lock_head **bkt,
struct rhash_lock_head __rcu **bkt,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
int err;
if (!bkt)
@ -485,7 +485,7 @@ fail:
}
static void *rhashtable_lookup_one(struct rhashtable *ht,
struct rhash_lock_head **bkt,
struct rhash_lock_head __rcu **bkt,
struct bucket_table *tbl, unsigned int hash,
const void *key, struct rhash_head *obj)
{
@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
return ERR_PTR(-ENOENT);
}
static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
struct rhash_lock_head **bkt,
struct bucket_table *tbl,
unsigned int hash,
struct rhash_head *obj,
void *data)
static struct bucket_table *rhashtable_insert_one(
struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
void *data)
{
struct bucket_table *new_tbl;
struct rhash_head *head;
@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
{
struct bucket_table *new_tbl;
struct bucket_table *tbl;
struct rhash_lock_head **bkt;
struct rhash_lock_head __rcu **bkt;
unsigned int hash;
void *data;
@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
struct rhash_lock_head __rcu **__rht_bucket_nested(
const struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
struct rhash_lock_head __rcu **rht_bucket_nested(
const struct bucket_table *tbl, unsigned int hash)
{
static struct rhash_lock_head *rhnull;
static struct rhash_lock_head __rcu *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash)
struct rhash_lock_head __rcu **rht_bucket_nested_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);

View File

@ -39,7 +39,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
{
struct mbox_request req;
struct mbox_reply reply;
loff_t pos;
loff_t pos = 0;
ssize_t n;
int ret = -EFAULT;

View File

@ -202,7 +202,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
/* Advance. */
kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp);
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, cmsg.cmsg_len);
}
/*

View File

@ -1065,7 +1065,9 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP) {
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
mutex_unlock(&devlink->lock);
goto out;
}
@ -1266,7 +1268,9 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
devlink, devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP) {
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
mutex_unlock(&devlink->lock);
goto out;
}
@ -1498,7 +1502,9 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP) {
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
mutex_unlock(&devlink->lock);
goto out;
}
@ -3299,7 +3305,9 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err && err != -EOPNOTSUPP) {
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
mutex_unlock(&devlink->lock);
goto out;
}
@ -3569,7 +3577,9 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err && err != -EOPNOTSUPP) {
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
mutex_unlock(&devlink->lock);
goto out;
}
@ -4518,7 +4528,9 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
cb->extack);
mutex_unlock(&devlink->lock);
if (err && err != -EOPNOTSUPP)
if (err == -EOPNOTSUPP)
err = 0;
else if (err)
break;
idx++;
}
@ -8567,6 +8579,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = {
DEVLINK_TRAP_GROUP(PIM),
DEVLINK_TRAP_GROUP(UC_LB),
DEVLINK_TRAP_GROUP(LOCAL_DELIVERY),
DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY),
DEVLINK_TRAP_GROUP(IPV6),
DEVLINK_TRAP_GROUP(PTP_EVENT),
DEVLINK_TRAP_GROUP(PTP_GENERAL),

View File

@ -1864,7 +1864,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
struct key_vector *local_l = NULL, *local_tp;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
hlist_for_each_entry(fa, &l->leaf, fa_list) {
struct fib_alias *new_fa;
if (local_tb->tb_id != fa->tb_id)

View File

@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
return 0;
}
void ipv6_sock_ac_close(struct sock *sk)
void __ipv6_sock_ac_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net_device *dev = NULL;
@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk)
struct net *net = sock_net(sk);
int prev_index;
if (!np->ipv6_ac_list)
return;
rtnl_lock();
ASSERT_RTNL();
pac = np->ipv6_ac_list;
np->ipv6_ac_list = NULL;
@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk)
sock_kfree_s(sk, pac, sizeof(*pac));
pac = next;
}
}
void ipv6_sock_ac_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
if (!np->ipv6_ac_list)
return;
rtnl_lock();
__ipv6_sock_ac_close(sk);
rtnl_unlock();
}

View File

@ -805,10 +805,17 @@ int esp6_input_done2(struct sk_buff *skb, int err)
if (x->encap) {
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
int offset = skb_network_offset(skb) + sizeof(*ip6h);
struct xfrm_encap_tmpl *encap = x->encap;
struct udphdr *uh = (void *)(skb_network_header(skb) + hdr_len);
struct tcphdr *th = (void *)(skb_network_header(skb) + hdr_len);
__be16 source;
u8 nexthdr = ip6h->nexthdr;
__be16 frag_off, source;
struct udphdr *uh;
struct tcphdr *th;
offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
uh = (void *)(skb->data + offset);
th = (void *)(skb->data + offset);
hdr_len += offset;
switch (x->encap->encap_type) {
case TCP_ENCAP_ESPINTCP:

View File

@ -240,6 +240,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
fl6_free_socklist(sk);
__ipv6_sock_mc_close(sk);
__ipv6_sock_ac_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so

View File

@ -3685,14 +3685,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
rt->fib6_src.plen = cfg->fc_src_len;
#endif
if (nh) {
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
goto out;
}
if (rt->fib6_src.plen) {
NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
goto out;
}
if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
goto out;
}
rt->nh = nh;
fib6_nh = nexthop_fib6_nh(rt->nh);
} else {

View File

@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
if ((xfilter->sadb_x_filter_splen >=
(sizeof(xfrm_address_t) << 3)) ||
(xfilter->sadb_x_filter_dplen >=
(sizeof(xfrm_address_t) << 3))) {
mutex_unlock(&pfk->dump_lock);
return -EINVAL;
}
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
if (filter == NULL) {
mutex_unlock(&pfk->dump_lock);
@ -2400,7 +2407,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
return err;
}
xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
1, &err);
security_xfrm_policy_free(pol_ctx);
@ -2651,7 +2658,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
return -EINVAL;
delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
dir, pol->sadb_x_policy_id, delete, &err);
if (xp == NULL)
return -ENOENT;

View File

@ -2166,6 +2166,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
ieee80211_stop_mesh(sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
kfree(sdata->u.mesh.ie);
mutex_unlock(&sdata->local->mtx);
return 0;

View File

@ -617,6 +617,19 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sband_iftype_data *iftd;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
iftd = ieee80211_get_sband_iftype_data(sband,
NL80211_IFTYPE_MESH_POINT);
/* The device doesn't support HE in mesh mode or at all */
if (!iftd)
return 0;
ieee80211_ie_build_he_6ghz_cap(sdata, skb);
return 0;
}

View File

@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
del_timer_sync(&mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries);
mesh_path_flush_pending(mpath);
kfree_rcu(mpath, rcu);
}

View File

@ -1923,9 +1923,7 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
if (sta) {
tx_pending = atomic_sub_return(tx_airtime,
&sta->airtime[ac].aql_tx_pending);
if (WARN_ONCE(tx_pending < 0,
"STA %pM AC %d txq pending airtime underflow: %u, %u",
sta->addr, ac, tx_pending, tx_airtime))
if (tx_pending < 0)
atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending,
tx_pending, 0);
}

View File

@ -4230,11 +4230,12 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
goto out_free;
memset(info, 0, sizeof(*info));
if (unlikely(!multicast && skb->sk &&
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
ieee80211_store_ack_skb(local, skb, &info->flags, NULL);
memset(info, 0, sizeof(*info));
info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
&info->flags, NULL);
if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) {
if (sdata->control_port_no_encrypt)

View File

@ -2878,6 +2878,10 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!iftd))
return;
/* Check for device HE 6 GHz capability before adding element */
if (!iftd->he_6ghz_capa.capa)
return;
cap = le16_to_cpu(iftd->he_6ghz_capa.capa);
cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS;

View File

@ -1833,7 +1833,7 @@ do_connect:
/* on successful connect, the msk state will be moved to established by
* subflow_finish_connect()
*/
if (!err || err == EINPROGRESS)
if (!err || err == -EINPROGRESS)
mptcp_copy_inaddrs(sock->sk, ssock->sk);
else
inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));

View File

@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
{
struct rds_notifier *notifier;
struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
struct rds_rdma_notify cmsg;
unsigned int count = 0, max_messages = ~0U;
unsigned long flags;
LIST_HEAD(copy);
int err = 0;
memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
/* put_cmsg copies to user space and thus may sleep. We can't do this
* with rs_lock held, so first grab as many notifications as we can stuff

View File

@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
*/
ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
goto error_attached_to_socket;
trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
atomic_read(&call->usage), here, NULL);
@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
error_dup_user_ID:
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
ret = -EEXIST;
error:
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, ret);
RX_CALL_DEAD, -EEXIST);
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
atomic_read(&call->usage), here, ERR_PTR(ret));
atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
_leave(" = %d", ret);
return ERR_PTR(ret);
_leave(" = -EEXIST");
return ERR_PTR(-EEXIST);
/* We got an error, but the call is attached to the socket and is in
* need of release. However, we might now race with recvmsg() when
* completing the call queues it. Return 0 from sys_sendmsg() and
* leave the error to recvmsg() to deal with.
*/
error_attached_to_socket:
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
atomic_read(&call->usage), here, ERR_PTR(ret));
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, ret);
_leave(" = c=%08x [err]", call->debug_id);
return call;
}
/*

View File

@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_cwnd = call->cong_cwnd;
spin_lock_bh(&conn->params.peer->lock);
hlist_del_rcu(&call->error_link);
spin_unlock_bh(&conn->params.peer->lock);
if (!hlist_unhashed(&call->error_link)) {
spin_lock_bh(&call->peer->lock);
hlist_del_rcu(&call->error_link);
spin_unlock_bh(&call->peer->lock);
}
if (rxrpc_is_client_call(call))
return rxrpc_disconnect_client_call(call);

View File

@ -620,7 +620,7 @@ try_again:
goto error_unlock_call;
}
if (msg->msg_name) {
if (msg->msg_name && call->peer) {
struct sockaddr_rxrpc *srx = msg->msg_name;
size_t len = sizeof(call->peer->srx);

View File

@ -681,6 +681,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (IS_ERR(call))
return PTR_ERR(call);
/* ... and we have the call lock. */
ret = 0;
if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
goto out_put_unlock;
} else {
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_UNINITIALISED:

View File

@ -1543,10 +1543,10 @@ static int __init ct_init_module(void)
return 0;
err_tbl_init:
destroy_workqueue(act_ct_wq);
err_register:
tcf_ct_flow_tables_uninit();
err_tbl_init:
destroy_workqueue(act_ct_wq);
return err;
}

View File

@ -13266,13 +13266,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
if (!wdev_running(wdev))
return -ENETDOWN;
}
if (!vcmd->doit)
return -EOPNOTSUPP;
} else {
wdev = NULL;
}
if (!vcmd->doit)
return -EOPNOTSUPP;
if (info->attrs[NL80211_ATTR_VENDOR_DATA]) {
data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);

View File

@ -15,6 +15,7 @@ static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
{
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, skb, skb->truesize)) {
XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR);
kfree_skb(skb);
return;
}
@ -49,23 +50,51 @@ static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb)
struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx,
strp);
struct strp_msg *rxm = strp_msg(skb);
int len = rxm->full_len - 2;
u32 nonesp_marker;
int err;
/* keepalive packet? */
if (unlikely(len == 1)) {
u8 data;
err = skb_copy_bits(skb, rxm->offset + 2, &data, 1);
if (err < 0) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
kfree_skb(skb);
return;
}
if (data == 0xff) {
kfree_skb(skb);
return;
}
}
/* drop other short messages */
if (unlikely(len <= sizeof(nonesp_marker))) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
kfree_skb(skb);
return;
}
err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker,
sizeof(nonesp_marker));
if (err < 0) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
kfree_skb(skb);
return;
}
/* remove header, leave non-ESP marker/SPI */
if (!__pskb_pull(skb, rxm->offset + 2)) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR);
kfree_skb(skb);
return;
}
if (pskb_trim(skb, rxm->full_len - 2) != 0) {
XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR);
kfree_skb(skb);
return;
}
@ -91,7 +120,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb)
return err;
len = be16_to_cpu(blen);
if (len < 6)
if (len < 2)
return -EINVAL;
return len;
@ -109,8 +138,11 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
flags |= nonblock ? MSG_DONTWAIT : 0;
skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
if (!skb)
if (!skb) {
if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
copied = len;
if (copied > skb->len)
@ -213,7 +245,7 @@ retry:
return 0;
}
static int espintcp_push_msgs(struct sock *sk)
static int espintcp_push_msgs(struct sock *sk, int flags)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
struct espintcp_msg *emsg = &ctx->partial;
@ -227,12 +259,12 @@ static int espintcp_push_msgs(struct sock *sk)
ctx->tx_running = 1;
if (emsg->skb)
err = espintcp_sendskb_locked(sk, emsg, 0);
err = espintcp_sendskb_locked(sk, emsg, flags);
else
err = espintcp_sendskmsg_locked(sk, emsg, 0);
err = espintcp_sendskmsg_locked(sk, emsg, flags);
if (err == -EAGAIN) {
ctx->tx_running = 0;
return 0;
return flags & MSG_DONTWAIT ? -EAGAIN : 0;
}
if (!err)
memset(emsg, 0, sizeof(*emsg));
@ -257,7 +289,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb)
offset = skb_transport_offset(skb);
len = skb->len - offset;
espintcp_push_msgs(sk);
espintcp_push_msgs(sk, 0);
if (emsg->len) {
kfree_skb(skb);
@ -270,7 +302,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb)
emsg->len = len;
emsg->skb = skb;
espintcp_push_msgs(sk);
espintcp_push_msgs(sk, 0);
return 0;
}
@ -287,7 +319,7 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
char buf[2] = {0};
int err, end;
if (msg->msg_flags)
if (msg->msg_flags & ~MSG_DONTWAIT)
return -EOPNOTSUPP;
if (size > MAX_ESPINTCP_MSG)
@ -298,9 +330,10 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
err = espintcp_push_msgs(sk);
err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT);
if (err < 0) {
err = -ENOBUFS;
if (err != -EAGAIN || !(msg->msg_flags & MSG_DONTWAIT))
err = -ENOBUFS;
goto unlock;
}
@ -337,10 +370,9 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
tcp_rate_check_app_limited(sk);
err = espintcp_push_msgs(sk);
err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT);
/* this message could be partially sent, keep it */
if (err < 0)
goto unlock;
release_sock(sk);
return size;
@ -374,7 +406,7 @@ static void espintcp_tx_work(struct work_struct *work)
lock_sock(sk);
if (!ctx->tx_running)
espintcp_push_msgs(sk);
espintcp_push_msgs(sk, 0);
release_sock(sk);
}

View File

@ -39,7 +39,7 @@
#ifdef CONFIG_XFRM_STATISTICS
#include <net/snmp.h>
#endif
#ifdef CONFIG_INET_ESPINTCP
#ifdef CONFIG_XFRM_ESPINTCP
#include <net/espintcp.h>
#endif
@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
spin_unlock_bh(&pq->hold_queue.lock);
}
static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
struct xfrm_policy *pol)
static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
struct xfrm_policy *pol)
{
if (policy->mark.v == pol->mark.v &&
policy->priority == pol->priority)
return true;
return false;
return mark->v == pol->mark.v && mark->m == pol->mark.m;
}
static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
if (pol->type == policy->type &&
pol->if_id == policy->if_id &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_policy_mark_match(policy, pol) &&
xfrm_policy_mark_match(&policy->mark, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
delpol = pol;
@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
if (pol->type == policy->type &&
pol->if_id == policy->if_id &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_policy_mark_match(policy, pol) &&
xfrm_policy_mark_match(&policy->mark, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl)
@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
EXPORT_SYMBOL(xfrm_policy_insert);
static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_selector *sel,
__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx)
{
struct xfrm_policy *pol;
@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == type &&
pol->if_id == if_id &&
(mark & pol->mark.m) == pol->mark.v &&
xfrm_policy_mark_match(mark, pol) &&
!selector_cmp(sel, &pol->selector) &&
xfrm_sec_ctx_match(ctx, pol->security))
return pol;
@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
return NULL;
}
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err)
struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
u8 type, int dir, struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete, int *err)
{
struct xfrm_pol_inexact_bin *bin = NULL;
struct xfrm_policy *pol, *ret = NULL;
@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
u8 type, int dir, u32 id, int delete,
int *err)
struct xfrm_policy *
xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
u8 type, int dir, u32 id, int delete, int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
ret = NULL;
hlist_for_each_entry(pol, chain, byidx) {
if (pol->type == type && pol->index == id &&
pol->if_id == if_id &&
(mark & pol->mark.m) == pol->mark.v) {
pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
xfrm_pol_hold(pol);
if (delete) {
*err = security_xfrm_policy_delete(
@ -4156,7 +4149,7 @@ void __init xfrm_init(void)
seqcount_init(&xfrm_policy_hash_generation);
xfrm_input_init();
#ifdef CONFIG_INET_ESPINTCP
#ifdef CONFIG_XFRM_ESPINTCP
espintcp_init();
#endif

View File

@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
struct km_event c;
int delete;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
u32 if_id = 0;
p = nlmsg_data(nlh);
@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (attrs[XFRMA_IF_ID])
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
xfrm_mark_get(attrs, &m);
if (p->index)
xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err);
xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
p->index, delete, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel,
ctx, delete, &err);
xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
&p->sel, ctx, delete, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
u8 type = XFRM_POLICY_TYPE_MAIN;
int err = -ENOENT;
struct xfrm_mark m;
u32 mark = xfrm_mark_get(attrs, &m);
u32 if_id = 0;
err = copy_from_user_policy_type(&type, attrs);
@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (attrs[XFRMA_IF_ID])
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
xfrm_mark_get(attrs, &m);
if (p->index)
xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err);
xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
0, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
}
xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
&p->sel, ctx, 0, &err);
security_xfrm_policy_free(ctx);
}

View File

@ -5,10 +5,60 @@
#include "test_btf_map_in_map.skel.h"
static int duration;
static __u32 bpf_map_id(struct bpf_map *map)
{
struct bpf_map_info info;
__u32 info_len = sizeof(info);
int err;
memset(&info, 0, info_len);
err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
if (err)
return 0;
return info.id;
}
/*
* Trigger synchronize_rcu() in kernel.
*
* ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu()
* if looking up an existing non-NULL element or updating the map with a valid
* inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map,
* create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then
* cleanup. At the end, at least one synchronize_rcu() would be called.
*/
static int kern_sync_rcu(void)
{
int inner_map_fd, outer_map_fd, err, zero = 0;
inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
return -1;
outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
sizeof(int), inner_map_fd, 1, 0);
if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
close(inner_map_fd);
return -1;
}
err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
if (err)
err = -errno;
CHECK(err, "outer_map_update", "failed %d\n", err);
close(inner_map_fd);
close(outer_map_fd);
return err;
}
void test_btf_map_in_map(void)
{
int duration = 0, err, key = 0, val;
struct test_btf_map_in_map* skel;
int err, key = 0, val, i;
struct test_btf_map_in_map *skel;
int outer_arr_fd, outer_hash_fd;
int fd, map1_fd, map2_fd, map1_id, map2_id;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
@ -18,32 +68,78 @@ void test_btf_map_in_map(void)
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
map1_fd = bpf_map__fd(skel->maps.inner_map1);
map2_fd = bpf_map__fd(skel->maps.inner_map2);
outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
/* inner1 = input, inner2 = input + 1 */
val = bpf_map__fd(skel->maps.inner_map1);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
val = bpf_map__fd(skel->maps.inner_map2);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
map1_fd = bpf_map__fd(skel->maps.inner_map1);
bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
map2_fd = bpf_map__fd(skel->maps.inner_map2);
bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
skel->bss->input = 1;
usleep(1);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
bpf_map_lookup_elem(map1_fd, &key, &val);
CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
bpf_map_lookup_elem(map2_fd, &key, &val);
CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
/* inner1 = input + 1, inner2 = input */
val = bpf_map__fd(skel->maps.inner_map2);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
val = bpf_map__fd(skel->maps.inner_map1);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
skel->bss->input = 3;
usleep(1);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
bpf_map_lookup_elem(map1_fd, &key, &val);
CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
bpf_map_lookup_elem(map2_fd, &key, &val);
CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
for (i = 0; i < 5; i++) {
val = i % 2 ? map1_fd : map2_fd;
err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0);
if (CHECK_FAIL(err)) {
printf("failed to update hash_of_maps on iter #%d\n", i);
goto cleanup;
}
err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
if (CHECK_FAIL(err)) {
printf("failed to update hash_of_maps on iter #%d\n", i);
goto cleanup;
}
}
map1_id = bpf_map_id(skel->maps.inner_map1);
map2_id = bpf_map_id(skel->maps.inner_map2);
CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
test_btf_map_in_map__destroy(skel);
skel = NULL;
/* we need to either wait for or force synchronize_rcu(), before
* checking for "still exists" condition, otherwise map could still be
* resolvable by ID, causing false positives.
*
* Older kernels (5.8 and earlier) freed map only after two
* synchronize_rcu()s, so trigger two, to be entirely sure.
*/
CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
fd = bpf_map_get_fd_by_id(map1_id);
if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
close(fd);
goto cleanup;
}
fd = bpf_map_get_fd_by_id(map2_id);
if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
close(fd);
goto cleanup;
}
cleanup:
test_btf_map_in_map__destroy(skel);
}

View File

@ -318,6 +318,9 @@ class DebugfsDir:
continue
if os.path.isfile(p):
# We need to init trap_flow_action_cookie before read it
if f == "trap_flow_action_cookie":
cmd('echo deadbeef > %s/%s' % (path, f))
_, out = cmd('cat %s/%s' % (path, f))
dfs[f] = out.strip()
elif os.path.isdir(p):

View File

@ -112,6 +112,7 @@
"perfevent for cgroup sockopt",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT,
.expected_attach_type = BPF_CGROUP_SETSOCKOPT,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,

View File

@ -252,8 +252,6 @@ check_highest_speed_is_chosen()
fi
local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
# Remove the first speed, h1 does not advertise this speed.
unset speeds_arr[0]
max_speed=${speeds_arr[0]}
for current in ${speeds_arr[@]}; do

View File

@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
int fds[2], fds_udp[2][2], ret;
fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
typeflags, PORT_BASE, PORT_BASE + port_off);
typeflags, (uint16_t)PORT_BASE,
(uint16_t)(PORT_BASE + port_off));
fds[0] = sock_fanout_open(typeflags, 0);
fds[1] = sock_fanout_open(typeflags, 0);

View File

@ -329,8 +329,7 @@ int main(int argc, char **argv)
bool all_tests = true;
int arg_index = 0;
int failures = 0;
int s, t;
char opt;
int s, t, opt;
while ((opt = getopt_long(argc, argv, "", long_options,
&arg_index)) != -1) {

View File

@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
if (rbuf[0] != ts->data)
error(1, 0, "payload mismatch. expected %c", ts->data);
if (labs(tstop - texpect) > cfg_variance_us)
if (llabs(tstop - texpect) > cfg_variance_us)
error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
return false;

View File

@ -344,7 +344,7 @@ int main(int argc, char *argv[])
{
struct sockaddr_storage listenaddr, addr;
unsigned int max_pacing_rate = 0;
size_t total = 0;
uint64_t total = 0;
char *host = NULL;
int fd, c, on = 1;
char *buffer;
@ -473,12 +473,12 @@ int main(int argc, char *argv[])
zflg = 0;
}
while (total < FILE_SZ) {
ssize_t wr = FILE_SZ - total;
int64_t wr = FILE_SZ - total;
if (wr > chunk_size)
wr = chunk_size;
/* Note : we just want to fill the pipe with 0 bytes */
wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0);
wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0);
if (wr <= 0)
break;
total += wr;