Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix AF_XDP cq entry leak, from Ilya Maximets.

 2) Fix handling of PHY power-down on RTL8411B, from Heiner Kallweit.

 3) Add some new PCI IDs to iwlwifi, from Ihab Zhaika.

 4) Fix handling of neigh timers wrt. entries added by userspace, from
    Lorenzo Bianconi.

 5) Various cases of missing of_node_put(), from Nishka Dasgupta.

 6) The new NET_ACT_CT needs to depend upon NF_NAT, from Yue Haibing.

 7) Various RDS layer fixes, from Gerd Rausch.

 8) Fix some more fallout from TCQ_F_CAN_BYPASS generalization, from
    Cong Wang.

 9) Fix FIB source validation checks over loopback, also from Cong Wang.

10) Use promisc for unsupported number of filters, from Justin Chen.

11) Missing sibling route unlink on failure in ipv6, from Ido Schimmel.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (90 commits)
  tcp: fix tcp_set_congestion_control() use from bpf hook
  ag71xx: fix return value check in ag71xx_probe()
  ag71xx: fix error return code in ag71xx_probe()
  usb: qmi_wwan: add D-Link DWM-222 A2 device ID
  bnxt_en: Fix VNIC accounting when enabling aRFS on 57500 chips.
  net: dsa: sja1105: Fix missing unlock on error in sk_buff()
  gve: replace kfree with kvfree
  selftests/bpf: fix test_xdp_noinline on s390
  selftests/bpf: fix "valid read map access into a read-only array 1" on s390
  net/mlx5: Replace kfree with kvfree
  MAINTAINERS: update netsec driver
  ipv6: Unlink sibling route in case of failure
  liquidio: Replace vmalloc + memset with vzalloc
  udp: Fix typo in net/ipv4/udp.c
  net: bcmgenet: use promisc for unsupported filters
  ipv6: rt6_check should return NULL if 'from' is NULL
  tipc: initialize 'validated' field of received packets
  selftests: add a test case for rp_filter
  fib: relax source validation check for loopback packets
  mlxsw: spectrum: Do not process learned records with a dummy FID
  ...
This commit is contained in:
Linus Torvalds 2019-07-19 10:06:06 -07:00
commit 5f4fc6d440
102 changed files with 964 additions and 399 deletions

View File

@ -706,9 +706,9 @@ num_unsol_na
unsolicited IPv6 Neighbor Advertisements) to be issued after a
failover event. As soon as the link is up on the new slave
(possibly immediately) a peer notification is sent on the
bonding device and each VLAN sub-device. This is repeated at
each link monitor interval (arp_interval or miimon, whichever
is active) if the number is greater than 1.
bonding device and each VLAN sub-device. This is repeated at
the rate specified by peer_notif_delay if the number is
greater than 1.
The valid range is 0 - 255; the default value is 1. These options
affect only the active-backup mode. These options were added for
@ -727,6 +727,16 @@ packets_per_slave
The valid range is 0 - 65535; the default value is 1. This option
has effect only in balance-rr mode.
peer_notif_delay
Specify the delay, in milliseconds, between each peer
notification (gratuitous ARP and unsolicited IPv6 Neighbor
Advertisement) when they are issued after a failover event.
This delay should be a multiple of the link monitor interval
(arp_interval or miimon, whichever is active). The default
value is 0 which means to match the value of the link monitor
interval.
primary
A string (eth0, eth2, etc) specifying which slave is the

View File

@ -3108,9 +3108,9 @@ S: Maintained
F: arch/riscv/net/
BPF JIT for S390
M: Ilya Leoshkevich <iii@linux.ibm.com>
M: Heiko Carstens <heiko.carstens@de.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
M: Christian Borntraeger <borntraeger@de.ibm.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
@ -14873,6 +14873,7 @@ F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
SOCIONEXT (SNI) NETSEC NETWORK DRIVER
M: Jassi Brar <jaswinder.singh@linaro.org>
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/socionext/netsec.c

View File

@ -1379,7 +1379,6 @@ init_tsq(struct idt77252_dev *card)
printk("%s: can't allocate TSQ.\n", card->name);
return -1;
}
memset(card->tsq.base, 0, TSQSIZE);
card->tsq.last = card->tsq.base + TSQ_NUM_ENTRIES - 1;
card->tsq.next = card->tsq.last;

View File

@ -1955,6 +1955,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
if (idx > 15)
return -EIO;
if (ep_addr & 0x80)
idx++;
attr = ep->desc.bmAttributes;

View File

@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void)
rtnl_lock();
list_for_each_safe(list_node, n, &cfhsi_list) {
cfhsi = list_entry(list_node, struct cfhsi, list);
unregister_netdev(cfhsi->ndev);
unregister_netdevice(cfhsi->ndev);
}
rtnl_unlock();
}

View File

@ -35,6 +35,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/io.h>
/* For our NAPI weight bigger does *NOT* mean better - it means more
* D-cache misses and lots more wasted cycles than we'll ever
@ -1724,17 +1725,19 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
if (!ag->stop_desc)
if (!ag->stop_desc) {
err = -ENOMEM;
goto err_free;
}
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
mac_addr = of_get_mac_address(np);
if (mac_addr)
if (!IS_ERR(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
if (!mac_addr || !is_valid_ether_addr(ndev->dev_addr)) {
if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_random_addr(ndev->dev_addr);
}

View File

@ -1060,8 +1060,6 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
memset(ring_header->desc, 0, ring_header->size);
/* init TPD ring */
tpd_ring->dma = ring_header->dma;
offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;

View File

@ -291,7 +291,6 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
&adapter->ring_dma);
if (!adapter->ring_vir_addr)
return -ENOMEM;
memset(adapter->ring_vir_addr, 0, adapter->ring_size);
/* Init TXD Ring */
adapter->txd_dma = adapter->ring_dma ;

View File

@ -2677,8 +2677,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
mapping = txr->tx_push_mapping +
sizeof(struct tx_push_bd);
txr->data_mapping = cpu_to_le64(mapping);
memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
@ -3077,7 +3075,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
if (bp->flags & BNXT_FLAG_RFS)
if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
num_vnics += bp->rx_nr_rings;
#endif
@ -7188,6 +7186,9 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
if (bp->flags & BNXT_FLAG_CHIP_P5)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
@ -9647,7 +9648,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
return -ENOMEM;
vnics = 1;
if (bp->flags & BNXT_FLAG_RFS)
if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
vnics += rx_rings;
if (bp->flags & BNXT_FLAG_AGG_RINGS)

View File

@ -3083,39 +3083,42 @@ static void bcmgenet_timeout(struct net_device *dev)
netif_tx_wake_all_queues(dev);
}
#define MAX_MC_COUNT 16
#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
unsigned char *addr,
int *i,
int *mc)
int *i)
{
u32 reg;
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
UMAC_MDF_ADDR + (*i * 4));
bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
addr[4] << 8 | addr[5],
UMAC_MDF_ADDR + ((*i + 1) * 4));
reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
reg |= (1 << (MAX_MC_COUNT - *mc));
bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
*i += 2;
(*mc)++;
}
static void bcmgenet_set_rx_mode(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
int i, mc;
int i, nfilter;
u32 reg;
netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
/* Promiscuous mode */
/* Number of filters needed */
nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
/*
* Turn on promicuous mode for three scenarios
* 1. IFF_PROMISC flag is set
* 2. IFF_ALLMULTI flag is set
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (dev->flags & IFF_PROMISC) {
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
@ -3125,32 +3128,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
}
/* UniMac doesn't support ALLMULTI */
if (dev->flags & IFF_ALLMULTI) {
netdev_warn(dev, "ALLMULTI is not supported\n");
return;
}
/* update MDF filter */
i = 0;
mc = 0;
/* Broadcast */
bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
/* my own address.*/
bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
/* Unicast list*/
if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
return;
bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
/* Unicast */
netdev_for_each_uc_addr(ha, dev)
bcmgenet_set_mdf_addr(priv, ha->addr, &i);
if (!netdev_uc_empty(dev))
netdev_for_each_uc_addr(ha, dev)
bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
/* Multicast */
if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
return;
netdev_for_each_mc_addr(ha, dev)
bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
bcmgenet_set_mdf_addr(priv, ha->addr, &i);
/* Enable filters */
reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
}
/* Set the hardware MAC address. */

View File

@ -218,15 +218,13 @@ int octeon_setup_iq(struct octeon_device *oct,
return 0;
}
oct->instr_queue[iq_no] =
vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
if (!oct->instr_queue[iq_no])
oct->instr_queue[iq_no] =
vmalloc(sizeof(struct octeon_instr_queue));
vzalloc(sizeof(struct octeon_instr_queue));
if (!oct->instr_queue[iq_no])
return 1;
memset(oct->instr_queue[iq_no], 0,
sizeof(struct octeon_instr_queue));
oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->app_ctx = app_ctx;

View File

@ -207,7 +207,6 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
goto out_err;
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
qe->cntxt_id = qid;
memcpy(&qe->param, p, sizeof(qe->param));

View File

@ -4697,8 +4697,12 @@ int be_update_queues(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int status;
if (netif_running(netdev))
if (netif_running(netdev)) {
/* device cannot transmit now, avoid dev_watchdog timeouts */
netif_carrier_off(netdev);
be_close(netdev);
}
be_cancel_worker(adapter);

View File

@ -3144,8 +3144,6 @@ static int fec_enet_init(struct net_device *ndev)
return -ENOMEM;
}
memset(cbd_base, 0, bd_size);
/* Get the Ethernet address */
fec_get_mac(ndev);
/* make sure MAC we just acquired is programmed into the hw */

View File

@ -232,7 +232,7 @@ abort_with_mgmt_vector:
abort_with_msix_enabled:
pci_disable_msix(priv->pdev);
abort_with_msix_vectors:
kfree(priv->msix_vectors);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
return err;
}
@ -256,7 +256,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
priv->ntfy_blocks = NULL;
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
pci_disable_msix(priv->pdev);
kfree(priv->msix_vectors);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
}
@ -445,12 +445,12 @@ static int gve_alloc_rings(struct gve_priv *priv)
return 0;
free_rx:
kfree(priv->rx);
kvfree(priv->rx);
priv->rx = NULL;
free_tx_queue:
gve_tx_free_rings(priv);
free_tx:
kfree(priv->tx);
kvfree(priv->tx);
priv->tx = NULL;
return err;
}
@ -500,7 +500,7 @@ static void gve_free_rings(struct gve_priv *priv)
gve_remove_napi(priv, ntfy_idx);
}
gve_tx_free_rings(priv);
kfree(priv->tx);
kvfree(priv->tx);
priv->tx = NULL;
}
if (priv->rx) {
@ -509,7 +509,7 @@ static void gve_free_rings(struct gve_priv *priv)
gve_remove_napi(priv, ntfy_idx);
}
gve_rx_free_rings(priv);
kfree(priv->rx);
kvfree(priv->rx);
priv->rx = NULL;
}
}
@ -592,9 +592,9 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
gve_free_page(&priv->pdev->dev, qpl->pages[i],
qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
kfree(qpl->page_buses);
kvfree(qpl->page_buses);
free_pages:
kfree(qpl->pages);
kvfree(qpl->pages);
priv->num_registered_pages -= qpl->num_entries;
}
@ -635,7 +635,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
free_qpls:
for (j = 0; j <= i; j++)
gve_free_queue_page_list(priv, j);
kfree(priv->qpls);
kvfree(priv->qpls);
return err;
}
@ -644,12 +644,12 @@ static void gve_free_qpls(struct gve_priv *priv)
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
kfree(priv->qpl_cfg.qpl_id_map);
kvfree(priv->qpl_cfg.qpl_id_map);
for (i = 0; i < num_qpls; i++)
gve_free_queue_page_list(priv, i);
kfree(priv->qpls);
kvfree(priv->qpls);
}
/* Use this to schedule a reset when the device is capable of continuing
@ -1192,7 +1192,6 @@ abort_with_enabled:
pci_disable_device(pdev);
return -ENXIO;
}
EXPORT_SYMBOL(gve_probe);
static void gve_remove(struct pci_dev *pdev)
{

View File

@ -35,7 +35,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
kfree(rx->data.page_info);
kvfree(rx->data.page_info);
slots = rx->data.mask + 1;
bytes = sizeof(*rx->data.data_ring) * slots;
@ -168,7 +168,7 @@ abort_with_q_resources:
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
abort_filled:
kfree(rx->data.page_info);
kvfree(rx->data.page_info);
abort_with_slots:
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);

View File

@ -582,11 +582,6 @@ jme_setup_tx_resources(struct jme_adapter *jme)
if (unlikely(!(txring->bufinf)))
goto err_free_txring;
/*
* Initialize Transmit Descriptors
*/
memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
return 0;
err_free_txring:

View File

@ -2558,8 +2558,6 @@ static int skge_up(struct net_device *dev)
goto free_pci_mem;
}
memset(skge->mem, 0, skge->mem_size);
err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
if (err)
goto free_pci_mem;

View File

@ -4917,6 +4917,13 @@ static const struct dmi_system_id msi_blacklist[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
},
},
{
.ident = "ASUS P5W DH Deluxe",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"),
DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
},
},
{}
};

View File

@ -2548,8 +2548,10 @@ static int mtk_probe(struct platform_device *pdev)
continue;
err = mtk_add_mac(eth, mac_np);
if (err)
if (err) {
of_node_put(mac_np);
goto err_deinit_hw;
}
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {

View File

@ -1013,8 +1013,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
dma_list[i] = t;
eq->page_list[i].map = t;
memset(eq->page_list[i].buf, 0, PAGE_SIZE);
}
eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);

View File

@ -1499,7 +1499,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_NONE;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
~(BIT(FLOW_DISSECTOR_KEY_META) |
BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
@ -1522,11 +1523,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
if (mlx5e_get_tc_tun(filter_dev)) {
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
return -EOPNOTSUPP;
@ -2647,6 +2644,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
if (!key.tc_tunnel) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
return -EOPNOTSUPP;
}
hash_key = hash_encap_info(&key);

View File

@ -2450,7 +2450,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
memset(out, 0, outlen);
err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
if (err)
goto free_out;

View File

@ -1134,7 +1134,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
/* create send-to-vport group */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
@ -1293,8 +1292,6 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
return -ENOMEM;
/* create vport rx group */
memset(flow_group_in, 0, inlen);
esw_set_flow_group_source_port(esw, flow_group_in);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);

View File

@ -597,7 +597,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
err = devlink_fmsg_arr_pair_nest_end(fmsg);
free_data:
kfree(cr_data);
kvfree(cr_data);
return err;
}

View File

@ -847,7 +847,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
&mem_item->mapaddr);
if (!mem_item->buf)
return -ENOMEM;
memset(mem_item->buf, 0, mem_item->size);
q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
if (!q->elem_info) {

View File

@ -830,6 +830,7 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);

View File

@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
&prio_map);
if (!have_dscp) {
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
MLXSW_REG_QPTS_TRUST_STATE_PCP);
if (err)
netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
return err;
}
mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
&dscp_map);
err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
if (!have_dscp) {
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
MLXSW_REG_QPTS_TRUST_STATE_PCP);
if (err)
netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
return err;
}
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
MLXSW_REG_QPTS_TRUST_STATE_DSCP);
if (err) {

View File

@ -126,6 +126,16 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
{
enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY;
struct mlxsw_sp_fid_family *fid_family;
fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type];
return fid_family->start_index == fid_index;
}
bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
{
return fid->fid_family->lag_vid_valid;

View File

@ -2468,6 +2468,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
goto just_remove;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
@ -2527,6 +2530,9 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
goto just_remove;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");

View File

@ -291,8 +291,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
if (err)
if (err) {
of_node_put(portnp);
return err;
}
phy_mode = of_get_phy_mode(portnp);
if (phy_mode < 0)
@ -318,6 +320,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
dev_err(ocelot->dev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
return -EINVAL;
}

View File

@ -747,7 +747,6 @@ static int init_shared_mem(struct s2io_nic *nic)
return -ENOMEM;
}
mem_allocated += size;
memset(tmp_v_addr, 0, size);
size = sizeof(struct rxd_info) *
rxd_count[nic->rxd_mode];

View File

@ -442,10 +442,8 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
goto out_free_rq;
}
memset(rq_addr, 0, rq_size);
prq = rq_addr;
memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@ -755,7 +753,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
return -ENOMEM;
}
memset(addr, 0, sizeof(struct netxen_ring_ctx));
recv_ctx->hwctx = addr;
recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
recv_ctx->hwctx->cmd_consumer_offset =

View File

@ -4667,6 +4667,143 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8411_2);
/* The following Realtek-provided magic fixes an issue with the RX unit
* getting confused after the PHY having been powered-down.
*/
r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
mdelay(3);
r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
r8168_mac_ocp_write(tp, 0xF800, 0xE008);
r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
r8168_mac_ocp_write(tp, 0xF808, 0xE027);
r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
r8168_mac_ocp_write(tp, 0xF810, 0xC602);
r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
r8168_mac_ocp_write(tp, 0xF814, 0x0000);
r8168_mac_ocp_write(tp, 0xF816, 0xC502);
r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
r8168_mac_ocp_write(tp, 0xF820, 0x080A);
r8168_mac_ocp_write(tp, 0xF822, 0x6420);
r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
r8168_mac_ocp_write(tp, 0xF828, 0xC516);
r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
r8168_mac_ocp_write(tp, 0xF846, 0xC404);
r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
r8168_mac_ocp_write(tp, 0xF852, 0xE434);
r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
r8168_mac_ocp_write(tp, 0xF860, 0xF007);
r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
r8168_mac_ocp_write(tp, 0xF876, 0xC516);
r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
r8168_mac_ocp_write(tp, 0xF880, 0xC512);
r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
r8168_mac_ocp_write(tp, 0xF888, 0x483F);
r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
r8168_mac_ocp_write(tp, 0xF892, 0xC505);
r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
r8168_mac_ocp_write(tp, 0xF896, 0xC502);
r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
rtl_hw_aspm_clkreq_enable(tp, true);
}

View File

@ -262,7 +262,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
/* check to see if we have sane EEPROM */
signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
if (signature == 0xffff || signature == 0x0000) {
printk (KERN_WARNING "%s: Error EERPOM read %x\n",
printk (KERN_WARNING "%s: Error EEPROM read %x\n",
pci_name(pci_dev), signature);
return 0;
}
@ -359,9 +359,9 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS962 or SiS963 model, use EEPROM to store MAC address. And EEPROM
* is shared by
* LAN and 1394. When access EEPROM, send EEREQ signal to hardware first
* LAN and 1394. When accessing EEPROM, send EEREQ signal to hardware first
* and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be accessed
* by LAN, otherwise is not. After MAC address is read from EEPROM, send
* by LAN, otherwise it is not. After MAC address is read from EEPROM, send
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.

View File

@ -2570,7 +2570,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
ret = PTR_ERR(slave_data->ifphy);
dev_err(&pdev->dev,
"%d: Error retrieving port phy: %d\n", i, ret);
return ret;
goto err_node_put;
}
slave_data->slave_node = slave_node;
@ -2589,7 +2589,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
return ret;
goto err_node_put;
}
slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
@ -2607,7 +2607,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
of_node_put(mdio_node);
if (!mdio) {
dev_err(&pdev->dev, "Missing mdio platform device\n");
return -EINVAL;
ret = -EINVAL;
goto err_node_put;
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
@ -2622,7 +2623,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (slave_data->phy_if < 0) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i);
return slave_data->phy_if;
ret = slave_data->phy_if;
goto err_node_put;
}
no_phy_slave:
@ -2633,7 +2635,7 @@ no_phy_slave:
ret = ti_cm_get_macid(&pdev->dev, i,
slave_data->mac_addr);
if (ret)
return ret;
goto err_node_put;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
@ -2648,11 +2650,17 @@ no_phy_slave:
}
i++;
if (i == data->slaves)
break;
if (i == data->slaves) {
ret = 0;
goto err_node_put;
}
}
return 0;
err_node_put:
of_node_put(slave_node);
return ret;
}
static void cpsw_remove_dt(struct platform_device *pdev)
@ -2675,8 +2683,10 @@ static void cpsw_remove_dt(struct platform_device *pdev)
of_node_put(slave_data->phy_node);
i++;
if (i == data->slaves)
if (i == data->slaves) {
of_node_put(slave_node);
break;
}
}
of_platform_depopulate(&pdev->dev);

View File

@ -855,7 +855,6 @@ static int tlan_init(struct net_device *dev)
dev->name);
return -ENOMEM;
}
memset(priv->dma_storage, 0, dma_size);
priv->rx_list = (struct tlan_list *)
ALIGN((unsigned long)priv->dma_storage, 8);
priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);

View File

@ -1196,7 +1196,6 @@ static int rr_open(struct net_device *dev)
goto error;
}
rrpriv->rx_ctrl_dma = dma_addr;
memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl));
rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
&dma_addr);
@ -1205,7 +1204,6 @@ static int rr_open(struct net_device *dev)
goto error;
}
rrpriv->info_dma = dma_addr;
memset(rrpriv->info, 0, sizeof(struct rr_info));
wmb();
spin_lock_irqsave(&rrpriv->lock, flags);

View File

@ -1292,6 +1292,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */

View File

@ -3430,7 +3430,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
err = -ENOMEM;
goto err_ver;
}
memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
adapter->default_coal_mode = true;
}

View File

@ -7541,6 +7541,8 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
&vht_nss,
true);
update_bitrate_mask = false;
} else {
vht_pfr = 0;
}
mutex_lock(&ar->conf_mutex);

View File

@ -80,7 +80,9 @@
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-"
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
@ -109,6 +111,8 @@
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_C_HR_B_MODULE_FIRMWARE(api) \
IWL_QU_C_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@ -256,6 +260,30 @@ const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
@ -372,6 +400,30 @@ const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9461",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9462",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
@ -590,6 +642,7 @@ MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));

View File

@ -565,10 +565,13 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
@ -580,6 +583,10 @@ extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0;
extern const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0;
extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_jf;

View File

@ -328,6 +328,8 @@ enum {
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
#define CSR_HW_REV_TYPE_QU_B0 (0x0000334)
#define CSR_HW_REV_TYPE_QU_C0 (0x0000338)
#define CSR_HW_REV_TYPE_QUZ (0x0000354)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
#define CSR_HW_REV_TYPE_SO (0x0000370)

View File

@ -604,6 +604,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
@ -971,6 +972,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
@ -1037,6 +1039,27 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
iwl_trans->cfg = cfg;
}
/*
* This is a hack to switch from Qu B0 to Qu C0. We need to
* do this for all cfgs that use Qu B0. All this code is in
* urgent need for a refactor, but for now this is the easiest
* thing to do to support Qu C-step.
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
}
#endif
pci_set_drvdata(pdev, iwl_trans);

View File

@ -372,14 +372,9 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
/*
* Report the frame as DMA done
*/
rt2x00lib_dmadone(entry);
/*
* Check if the received data is simply too small
* to be actually valid, or if the urb is signaling
@ -388,6 +383,11 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
if (urb->actual_length < entry->queue->desc_size || urb->status)
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
/*
* Report the frame as DMA done
*/
rt2x00lib_dmadone(entry);
/*
* Schedule the delayed work for reading the RX status
* from the device.

View File

@ -747,7 +747,7 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
return size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_ctx_wide_store_ok(off, size, type, field) \
#define bpf_ctx_wide_access_ok(off, size, type, field) \
(size == sizeof(__u64) && \
off >= offsetof(type, field) && \
off + sizeof(__u64) <= offsetofend(type, field) && \

View File

@ -1064,7 +1064,8 @@ void tcp_get_default_congestion_control(struct net *net, char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
bool reinit, bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);

View File

@ -3248,7 +3248,7 @@ struct bpf_sock_addr {
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
__u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__u32 user_port; /* Allows 4-byte read and write.
@ -3260,7 +3260,7 @@ struct bpf_sock_addr {
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
__u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__bpf_md_ptr(struct bpf_sock *, sk);

View File

@ -1174,7 +1174,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
TCA_TAPRIO_ATTR_TXTIME_DELAY, /* s32 */
TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
__TCA_TAPRIO_ATTR_MAX,
};

View File

@ -1073,11 +1073,18 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
!btf_type_is_var(size_type)))
return NULL;
size = btf->resolved_sizes[size_type_id];
size_type_id = btf->resolved_ids[size_type_id];
size_type = btf_type_by_id(btf, size_type_id);
if (btf_type_nosize_or_null(size_type))
return NULL;
else if (btf_type_has_size(size_type))
size = size_type->size;
else if (btf_type_is_array(size_type))
size = btf->resolved_sizes[size_type_id];
else if (btf_type_is_ptr(size_type))
size = sizeof(void *);
else
return NULL;
}
*type_id = size_type_id;
@ -1602,7 +1609,6 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
const struct btf_type *next_type;
u32 next_type_id = t->type;
struct btf *btf = env->btf;
u32 next_type_size = 0;
next_type = btf_type_by_id(btf, next_type_id);
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
@ -1620,7 +1626,7 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
* save us a few type-following when we use it later (e.g. in
* pretty print).
*/
if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
if (!btf_type_id_size(btf, &next_type_id, NULL)) {
if (env_type_is_resolved(env, next_type_id))
next_type = btf_type_id_resolve(btf, &next_type_id);
@ -1633,7 +1639,7 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
}
}
env_stack_pop_resolved(env, next_type_id, next_type_size);
env_stack_pop_resolved(env, next_type_id, 0);
return 0;
}
@ -1645,7 +1651,6 @@ static int btf_var_resolve(struct btf_verifier_env *env,
const struct btf_type *t = v->t;
u32 next_type_id = t->type;
struct btf *btf = env->btf;
u32 next_type_size;
next_type = btf_type_by_id(btf, next_type_id);
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
@ -1675,12 +1680,12 @@ static int btf_var_resolve(struct btf_verifier_env *env,
* forward types or similar that would resolve to size of
* zero is allowed.
*/
if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
if (!btf_type_id_size(btf, &next_type_id, NULL)) {
btf_verifier_log_type(env, v->t, "Invalid type_id");
return -EINVAL;
}
env_stack_pop_resolved(env, next_type_id, next_type_size);
env_stack_pop_resolved(env, next_type_id, 0);
return 0;
}

View File

@ -1519,9 +1519,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
return -EFAULT;
}
*stack_mask |= 1ull << spi;
} else if (class == BPF_STX) {
} else if (class == BPF_STX || class == BPF_ST) {
if (*reg_mask & dreg)
/* stx shouldn't be using _scalar_ dst_reg
/* stx & st shouldn't be using _scalar_ dst_reg
* to access memory. It means backtracking
* encountered a case of pointer subtraction.
*/
@ -1540,7 +1540,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
if (!(*stack_mask & (1ull << spi)))
return 0;
*stack_mask &= ~(1ull << spi);
*reg_mask |= sreg;
if (class == BPF_STX)
*reg_mask |= sreg;
} else if (class == BPF_JMP || class == BPF_JMP32) {
if (opcode == BPF_CALL) {
if (insn->src_reg == BPF_PSEUDO_CALL)
@ -1569,10 +1570,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
if (mode == BPF_IND || mode == BPF_ABS)
/* to be analyzed */
return -ENOTSUPP;
} else if (class == BPF_ST) {
if (*reg_mask & dreg)
/* likely pointer subtraction */
return -ENOTSUPP;
}
return 0;
}
@ -6106,11 +6103,13 @@ static int check_return_code(struct bpf_verifier_env *env)
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
range = tnum_range(1, 1);
break;
case BPF_PROG_TYPE_CGROUP_SKB:
if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
range = tnum_range(0, 3);
enforce_attach_type_range = tnum_range(2, 3);
}
break;
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:

View File

@ -4335,7 +4335,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
TCP_CA_NAME_MAX-1));
name[TCP_CA_NAME_MAX-1] = 0;
ret = tcp_set_congestion_control(sk, name, false,
reinit);
reinit, true);
} else {
struct tcp_sock *tp = tcp_sk(sk);
@ -6884,20 +6884,30 @@ static bool sock_addr_is_valid_access(int off, int size,
case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
msg_src_ip6[3]):
/* Only narrow read access allowed for now. */
if (type == BPF_READ) {
bpf_ctx_record_field_size(info, size_default);
if (bpf_ctx_wide_access_ok(off, size,
struct bpf_sock_addr,
user_ip6))
return true;
if (bpf_ctx_wide_access_ok(off, size,
struct bpf_sock_addr,
msg_src_ip6))
return true;
if (!bpf_ctx_narrow_access_ok(off, size, size_default))
return false;
} else {
if (bpf_ctx_wide_store_ok(off, size,
struct bpf_sock_addr,
user_ip6))
if (bpf_ctx_wide_access_ok(off, size,
struct bpf_sock_addr,
user_ip6))
return true;
if (bpf_ctx_wide_store_ok(off, size,
struct bpf_sock_addr,
msg_src_ip6))
if (bpf_ctx_wide_access_ok(off, size,
struct bpf_sock_addr,
msg_src_ip6))
return true;
if (size != size_default)

View File

@ -1124,6 +1124,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
atomic_set(&neigh->probes,
NEIGH_VAR(neigh->parms, UCAST_PROBES));
neigh_del_timer(neigh);
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
@ -1140,6 +1141,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
}
} else if (neigh->nud_state & NUD_STALE) {
neigh_dbg(2, "neigh %p is delayed\n", neigh);
neigh_del_timer(neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
neigh_add_timer(neigh, jiffies +

View File

@ -762,7 +762,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
printk("%sdev name=%s feat=0x%pNF\n",
level, dev->name, &dev->features);
if (sk)
printk("%ssk family=%hu type=%hu proto=%hu\n",
printk("%ssk family=%hu type=%u proto=%u\n",
level, sk->sk_family, sk->sk_type, sk->sk_protocol);
if (full_pkt && headroom)

View File

@ -216,6 +216,7 @@ static struct sk_buff
if (!skb) {
dev_err_ratelimited(dp->ds->dev,
"Failed to copy stampable skb\n");
spin_unlock(&sp->data->meta_lock);
return NULL;
}
sja1105_transfer_meta(skb, meta);

View File

@ -388,6 +388,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fib_combine_itag(itag, &res);
dev_match = fib_info_nh_uses_dev(res.fi, dev);
/* This is not common, loopback packets retain skb_dst so normally they
* would not even hit this slow path.
*/
dev_match = dev_match || (res.type == RTN_LOCAL &&
dev == net->loopback_dev);
if (dev_match) {
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
return ret;

View File

@ -2785,7 +2785,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
name[val] = 0;
lock_sock(sk);
err = tcp_set_congestion_control(sk, name, true, true);
err = tcp_set_congestion_control(sk, name, true, true,
ns_capable(sock_net(sk)->user_ns,
CAP_NET_ADMIN));
release_sock(sk);
return err;
}

View File

@ -333,7 +333,8 @@ out:
* tcp_reinit_congestion_control (if the current congestion control was
* already initialized.
*/
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
bool reinit, bool cap_net_admin)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
@ -369,8 +370,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, boo
} else {
err = -EBUSY;
}
} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
err = -EPERM;
} else if (!try_module_get(ca->owner)) {
err = -EBUSY;

View File

@ -2170,7 +2170,7 @@ start_lookup:
/* Initialize UDP checksum. If exited with zero value (success),
* CHECKSUM_UNNECESSARY means, that no more checks are required.
* Otherwise, csum completion requires chacksumming packet body,
* Otherwise, csum completion requires checksumming packet body,
* including udp header and folding it to skb->csum.
*/
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,

View File

@ -1151,8 +1151,24 @@ add:
err = call_fib6_entry_notifiers(info->nl_net,
FIB_EVENT_ENTRY_ADD,
rt, extack);
if (err)
if (err) {
struct fib6_info *sibling, *next_sibling;
/* If the route has siblings, then it first
* needs to be unlinked from them.
*/
if (!rt->fib6_nsiblings)
return err;
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings,
fib6_siblings)
sibling->fib6_nsiblings--;
rt->fib6_nsiblings = 0;
list_del_init(&rt->fib6_siblings);
rt6_multipath_rebalance(next_sibling);
return err;
}
}
rcu_assign_pointer(rt->fib6_next, iter);

View File

@ -2563,7 +2563,7 @@ static struct dst_entry *rt6_check(struct rt6_info *rt,
{
u32 rt_cookie = 0;
if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
rt_cookie != cookie)
return NULL;

View File

@ -900,12 +900,17 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
RT_TOS(tos), RT_SCOPE_UNIVERSE, IPPROTO_IPV6,
0, dst, tiph->saddr, 0, 0,
sock_net_uid(tunnel->net, NULL));
rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
rt = dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr);
if (!rt) {
rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
}
if (rt->rt_type != RTN_UNICAST) {
ip_rt_put(rt);
dev->stats.tx_carrier_errors++;

View File

@ -156,6 +156,7 @@ struct rds_ib_connection {
/* To control the number of wrs from fastreg */
atomic_t i_fastreg_wrs;
atomic_t i_fastreg_inuse_count;
/* interrupt handling */
struct tasklet_struct i_send_tasklet;

View File

@ -40,6 +40,7 @@
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
#include "ib_mr.h"
/*
* Set the selected protocol version
@ -526,7 +527,6 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
attr.qp_type = IB_QPT_RC;
attr.send_cq = ic->i_send_cq;
attr.recv_cq = ic->i_recv_cq;
atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
/*
* XXX this can fail if max_*_wr is too large? Are we supposed
@ -993,6 +993,11 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ic->i_cm_id, err);
}
/* kick off "flush_worker" for all pools in order to reap
* all FRMR registrations that are still marked "FRMR_IS_INUSE"
*/
rds_ib_flush_mrs();
/*
* We want to wait for tx and rx completion to finish
* before we tear down the connection, but we have to be
@ -1005,6 +1010,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
wait_event(rds_ib_ring_empty_wait,
rds_ib_ring_empty(&ic->i_recv_ring) &&
(atomic_read(&ic->i_signaled_sends) == 0) &&
(atomic_read(&ic->i_fastreg_inuse_count) == 0) &&
(atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
tasklet_kill(&ic->i_send_tasklet);
tasklet_kill(&ic->i_recv_tasklet);
@ -1132,6 +1138,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
spin_lock_init(&ic->i_ack_lock);
#endif
atomic_set(&ic->i_signaled_sends, 0);
atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
/*
* rds_ib_conn_shutdown() waits for these to be emptied so they

View File

@ -32,6 +32,24 @@
#include "ib_mr.h"
static inline void
rds_transition_frwr_state(struct rds_ib_mr *ibmr,
enum rds_ib_fr_state old_state,
enum rds_ib_fr_state new_state)
{
if (cmpxchg(&ibmr->u.frmr.fr_state,
old_state, new_state) == old_state &&
old_state == FRMR_IS_INUSE) {
/* enforce order of ibmr->u.frmr.fr_state update
* before decrementing i_fastreg_inuse_count
*/
smp_mb__before_atomic();
atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
if (waitqueue_active(&rds_ib_ring_empty_wait))
wake_up(&rds_ib_ring_empty_wait);
}
}
static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
int npages)
{
@ -75,6 +93,8 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
pool->max_items_soft = pool->max_items;
frmr->fr_state = FRMR_IS_FREE;
init_waitqueue_head(&frmr->fr_inv_done);
init_waitqueue_head(&frmr->fr_reg_done);
return ibmr;
out_no_cigar:
@ -116,13 +136,19 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
if (unlikely(ret != ibmr->sg_len))
return ret < 0 ? ret : -EINVAL;
if (cmpxchg(&frmr->fr_state,
FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
return -EBUSY;
atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
/* Perform a WR for the fast_reg_mr. Each individual page
* in the sg list is added to the fast reg page list and placed
* inside the fast_reg_mr WR. The key used is a rolling 8bit
* counter, which should guarantee uniqueness.
*/
ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
frmr->fr_state = FRMR_IS_INUSE;
frmr->fr_reg = true;
memset(&reg_wr, 0, sizeof(reg_wr));
reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
@ -138,12 +164,23 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL);
if (unlikely(ret)) {
/* Failure here can be because of -ENOMEM as well */
frmr->fr_state = FRMR_IS_STALE;
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
atomic_inc(&ibmr->ic->i_fastreg_wrs);
if (printk_ratelimit())
pr_warn("RDS/IB: %s returned error(%d)\n",
__func__, ret);
goto out;
}
/* Wait for the registration to complete in order to prevent an invalid
* access error resulting from a race between the memory region already
* being accessed while registration is still pending.
*/
wait_event(frmr->fr_reg_done, !frmr->fr_reg);
out:
return ret;
}
@ -255,12 +292,29 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
if (unlikely(ret)) {
frmr->fr_state = FRMR_IS_STALE;
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
frmr->fr_inv = false;
/* enforce order of frmr->fr_inv update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic();
atomic_inc(&ibmr->ic->i_fastreg_wrs);
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
goto out;
}
/* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
* 1) avoid a silly bouncing between "clean_list" and "drop_list"
* triggered by function "rds_ib_reg_frmr" as it is releases frmr
* regions whose state is not "FRMR_IS_FREE" right away.
* 2) prevents an invalid access error in a race
* from a pending "IB_WR_LOCAL_INV" operation
* with a teardown ("dma_unmap_sg", "put_page")
* and de-registration ("ib_dereg_mr") of the corresponding
* memory region.
*/
wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
out:
return ret;
}
@ -271,7 +325,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
if (wc->status != IB_WC_SUCCESS) {
frmr->fr_state = FRMR_IS_STALE;
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
if (rds_conn_up(ic->conn))
rds_ib_conn_error(ic->conn,
"frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
@ -283,10 +337,20 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
}
if (frmr->fr_inv) {
frmr->fr_state = FRMR_IS_FREE;
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
frmr->fr_inv = false;
wake_up(&frmr->fr_inv_done);
}
if (frmr->fr_reg) {
frmr->fr_reg = false;
wake_up(&frmr->fr_reg_done);
}
/* enforce order of frmr->{fr_reg,fr_inv} update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic();
atomic_inc(&ic->i_fastreg_wrs);
}
@ -295,14 +359,18 @@ void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
{
struct rds_ib_mr *ibmr, *next;
struct rds_ib_frmr *frmr;
int ret = 0;
int ret = 0, ret2;
unsigned int freed = *nfreed;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
list_for_each_entry(ibmr, list, unmap_list) {
if (ibmr->sg_dma_len)
ret |= rds_ib_post_inv(ibmr);
if (ibmr->sg_dma_len) {
ret2 = rds_ib_post_inv(ibmr);
if (ret2 && !ret)
ret = ret2;
}
}
if (ret)
pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);

View File

@ -57,6 +57,9 @@ struct rds_ib_frmr {
struct ib_mr *mr;
enum rds_ib_fr_state fr_state;
bool fr_inv;
wait_queue_head_t fr_inv_done;
bool fr_reg;
wait_queue_head_t fr_reg_done;
struct ib_send_wr fr_wr;
unsigned int dma_npages;
unsigned int sg_byte_len;
@ -97,6 +100,7 @@ struct rds_ib_mr_pool {
struct llist_head free_list; /* unused MRs */
struct llist_head clean_list; /* unused & unmapped MRs */
wait_queue_head_t flush_wait;
spinlock_t clean_lock; /* "clean_list" concurrency */
atomic_t free_pinned; /* memory pinned by free MRs */
unsigned long max_items;

View File

@ -40,9 +40,6 @@
struct workqueue_struct *rds_ib_mr_wq;
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
struct rds_ib_device *rds_ibdev;
@ -195,12 +192,11 @@ struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
struct llist_node *ret;
unsigned long *flag;
unsigned long flags;
preempt_disable();
flag = this_cpu_ptr(&clean_list_grace);
set_bit(CLEAN_LIST_BUSY_BIT, flag);
spin_lock_irqsave(&pool->clean_lock, flags);
ret = llist_del_first(&pool->clean_list);
spin_unlock_irqrestore(&pool->clean_lock, flags);
if (ret) {
ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
if (pool->pool_type == RDS_IB_MR_8K_POOL)
@ -209,23 +205,9 @@ struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
}
clear_bit(CLEAN_LIST_BUSY_BIT, flag);
preempt_enable();
return ibmr;
}
static inline void wait_clean_list_grace(void)
{
int cpu;
unsigned long *flag;
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
cpu_relax();
}
}
void rds_ib_sync_mr(void *trans_private, int direction)
{
struct rds_ib_mr *ibmr = trans_private;
@ -324,8 +306,7 @@ static unsigned int llist_append_to_list(struct llist_head *llist,
* of clusters. Each cluster has linked llist nodes of
* MR_CLUSTER_SIZE mrs that are ready for reuse.
*/
static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
struct list_head *list,
static void list_to_llist_nodes(struct list_head *list,
struct llist_node **nodes_head,
struct llist_node **nodes_tail)
{
@ -402,8 +383,13 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
*/
dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
if (free_all)
if (free_all) {
unsigned long flags;
spin_lock_irqsave(&pool->clean_lock, flags);
llist_append_to_list(&pool->clean_list, &unmap_list);
spin_unlock_irqrestore(&pool->clean_lock, flags);
}
free_goal = rds_ib_flush_goal(pool, free_all);
@ -416,27 +402,20 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
if (!list_empty(&unmap_list)) {
/* we have to make sure that none of the things we're about
* to put on the clean list would race with other cpus trying
* to pull items off. The llist would explode if we managed to
* remove something from the clean list and then add it back again
* while another CPU was spinning on that same item in llist_del_first.
*
* This is pretty unlikely, but just in case wait for an llist grace period
* here before adding anything back into the clean list.
*/
wait_clean_list_grace();
unsigned long flags;
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
clean_nodes = clean_nodes->next;
}
/* more than one entry in llist nodes */
if (clean_nodes)
if (clean_nodes) {
spin_lock_irqsave(&pool->clean_lock, flags);
llist_add_batch(clean_nodes, clean_tail,
&pool->clean_list);
spin_unlock_irqrestore(&pool->clean_lock, flags);
}
}
atomic_sub(unpinned, &pool->free_pinned);
@ -471,7 +450,7 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
return ERR_PTR(-EAGAIN);
break;
}
/* We do have some empty MRs. Flush them out. */
@ -485,7 +464,7 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
return ibmr;
}
return ibmr;
return NULL;
}
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
@ -610,6 +589,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
init_llist_head(&pool->free_list);
init_llist_head(&pool->drop_list);
init_llist_head(&pool->clean_list);
spin_lock_init(&pool->clean_lock);
mutex_init(&pool->flush_lock);
init_waitqueue_head(&pool->flush_wait);
INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);

View File

@ -942,7 +942,7 @@ config NET_ACT_TUNNEL_KEY
config NET_ACT_CT
tristate "connection tracking tc action"
depends on NET_CLS_ACT && NF_CONNTRACK
depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT
help
Say Y here to allow sending the packets to conntrack module.

View File

@ -2152,6 +2152,7 @@ replay:
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false, rtnl_held);
tfilter_put(tp, fh);
q->flags &= ~TCQ_F_CAN_BYPASS;
}
errout:

View File

@ -596,8 +596,6 @@ static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
/* we cannot bypass queue discipline anymore */
sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}

View File

@ -824,8 +824,6 @@ static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
/* we cannot bypass queue discipline anymore */
sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}

View File

@ -75,7 +75,7 @@ struct taprio_sched {
struct sched_gate_list __rcu *admin_sched;
struct hrtimer advance_timer;
struct list_head taprio_list;
int txtime_delay;
u32 txtime_delay;
};
static ktime_t sched_base_time(const struct sched_gate_list *sched)
@ -1113,7 +1113,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto unlock;
}
q->txtime_delay = nla_get_s32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
}
if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
@ -1430,7 +1430,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
goto options_error;
if (q->txtime_delay &&
nla_put_s32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error;
if (oper && dump_schedule(skb, oper))

View File

@ -2582,8 +2582,7 @@ do_addr_param:
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
if (asoc->peer.cookie)
kfree(asoc->peer.cookie);
kfree(asoc->peer.cookie);
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
if (!asoc->peer.cookie)
retval = 0;
@ -2648,8 +2647,7 @@ do_addr_param:
goto fall_through;
/* Save peer's random parameter */
if (asoc->peer.peer_random)
kfree(asoc->peer.peer_random);
kfree(asoc->peer.peer_random);
asoc->peer.peer_random = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_random) {
@ -2663,8 +2661,7 @@ do_addr_param:
goto fall_through;
/* Save peer's HMAC list */
if (asoc->peer.peer_hmacs)
kfree(asoc->peer.peer_hmacs);
kfree(asoc->peer.peer_hmacs);
asoc->peer.peer_hmacs = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_hmacs) {
@ -2680,8 +2677,7 @@ do_addr_param:
if (!ep->auth_enable)
goto fall_through;
if (asoc->peer.peer_chunks)
kfree(asoc->peer.peer_chunks);
kfree(asoc->peer.peer_chunks);
asoc->peer.peer_chunks = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_chunks)

View File

@ -1807,6 +1807,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
__skb_queue_head_init(&xmitq);
/* Ensure message is well-formed before touching the header */
TIPC_SKB_CB(skb)->validated = false;
if (unlikely(!tipc_msg_validate(&skb)))
goto discard;
hdr = buf_msg(skb);

View File

@ -87,21 +87,20 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
struct netdev_bpf bpf;
int err = 0;
ASSERT_RTNL();
force_zc = flags & XDP_ZEROCOPY;
force_copy = flags & XDP_COPY;
if (force_zc && force_copy)
return -EINVAL;
rtnl_lock();
if (xdp_get_umem_from_qid(dev, queue_id)) {
err = -EBUSY;
goto out_rtnl_unlock;
}
if (xdp_get_umem_from_qid(dev, queue_id))
return -EBUSY;
err = xdp_reg_umem_at_qid(dev, umem, queue_id);
if (err)
goto out_rtnl_unlock;
return err;
umem->dev = dev;
umem->queue_id = queue_id;
@ -110,7 +109,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (force_copy)
/* For copy-mode, we are done. */
goto out_rtnl_unlock;
return 0;
if (!dev->netdev_ops->ndo_bpf ||
!dev->netdev_ops->ndo_xsk_async_xmit) {
@ -125,7 +124,6 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
if (err)
goto err_unreg_umem;
rtnl_unlock();
umem->zc = true;
return 0;
@ -135,8 +133,6 @@ err_unreg_umem:
err = 0; /* fallback to copy mode */
if (err)
xdp_clear_umem_at_qid(dev, queue_id);
out_rtnl_unlock:
rtnl_unlock();
return err;
}

View File

@ -240,6 +240,9 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
mutex_lock(&xs->mutex);
if (xs->queue_id >= xs->dev->real_num_tx_queues)
goto out;
while (xskq_peek_desc(xs->tx, &desc)) {
char *buffer;
u64 addr;
@ -250,12 +253,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
goto out;
}
if (xskq_reserve_addr(xs->umem->cq))
goto out;
if (xs->queue_id >= xs->dev->real_num_tx_queues)
goto out;
len = desc.len;
skb = sock_alloc_send_skb(sk, len, 1, &err);
if (unlikely(!skb)) {
@ -267,7 +264,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
addr = desc.addr;
buffer = xdp_umem_get_data(xs->umem, addr);
err = skb_store_bits(skb, 0, buffer, len);
if (unlikely(err)) {
if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
kfree_skb(skb);
goto out;
}
@ -433,6 +430,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
return -EINVAL;
rtnl_lock();
mutex_lock(&xs->mutex);
if (xs->state != XSK_READY) {
err = -EBUSY;
@ -518,6 +516,7 @@ out_unlock:
xs->state = XSK_BOUND;
out_release:
mutex_unlock(&xs->mutex);
rtnl_unlock();
return err;
}

View File

@ -284,7 +284,7 @@ $(obj)/%.o: $(src)/%.c
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
-I$(srctree)/tools/testing/selftests/bpf/ \
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
-D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \
-D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \

View File

@ -74,6 +74,7 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
};

View File

@ -3245,7 +3245,7 @@ struct bpf_sock_addr {
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
__u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__u32 user_port; /* Allows 4-byte read and write.
@ -3257,7 +3257,7 @@ struct bpf_sock_addr {
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
__u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__bpf_md_ptr(struct bpf_sock *, sk);

View File

@ -4126,8 +4126,8 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
}
attr.size = sizeof(attr);
attr.type = type;
attr.config1 = (uint64_t)(void *)name; /* kprobe_func or uprobe_path */
attr.config2 = offset; /* kprobe_addr or probe_offset */
attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
attr.config2 = offset; /* kprobe_addr or probe_offset */
/* pid filter is meaningful only for uprobes */
pfd = syscall(__NR_perf_event_open, &attr,

View File

@ -517,7 +517,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
err = -errno;
goto out_socket;
}
strncpy(xsk->ifname, ifname, IFNAMSIZ);
strncpy(xsk->ifname, ifname, IFNAMSIZ - 1);
xsk->ifname[IFNAMSIZ - 1] = '\0';
err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
if (err)

View File

@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
include ../../../../scripts/Kbuild.include
include ../../../scripts/Makefile.arch
LIBDIR := ../../../lib
BPFDIR := $(LIBDIR)/bpf
@ -81,13 +83,14 @@ all: $(TEST_CUSTOM_PROGS)
$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
$(CC) -o $@ $< -Wl,--build-id
$(OUTPUT)/test_maps: map_tests/*.c
$(OUTPUT)/test_stub.o: test_stub.c
$(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) -c -o $@ $<
BPFOBJ := $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS): test_stub.o $(BPFOBJ)
$(TEST_GEN_PROGS): $(OUTPUT)/test_stub.o $(BPFOBJ)
$(TEST_GEN_PROGS_EXTENDED): test_stub.o $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(OUTPUT)/libbpf.a
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
@ -138,7 +141,8 @@ CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \
CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
$(CLANG_SYS_INCLUDES) \
-Wno-compare-distinct-pointer-types
-Wno-compare-distinct-pointer-types \
-D__TARGET_ARCH_$(SRCARCH)
$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
@ -172,6 +176,7 @@ endif
endif
TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
ifneq ($(SUBREG_CODEGEN),)
@ -180,12 +185,12 @@ TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
$(ALU32_BUILD_DIR):
mkdir -p $@
$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read | $(ALU32_BUILD_DIR)
cp $< $@
$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
$(ALU32_BUILD_DIR) \
$(ALU32_BUILD_DIR)/urandom_read
$(ALU32_BUILD_DIR)/urandom_read \
| $(ALU32_BUILD_DIR)
$(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
-o $(ALU32_BUILD_DIR)/test_progs_32 \
test_progs.c test_stub.c trace_helpers.c prog_tests/*.c \
@ -194,10 +199,10 @@ $(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
$(ALU32_BUILD_DIR)/test_progs_32
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR)/test_progs_32 \
| $(ALU32_BUILD_DIR)
($(CLANG) $(CLANG_FLAGS) -O2 -target bpf -emit-llvm -c $< -o - || \
echo "clang failed") | \
$(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
-filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
@ -208,32 +213,30 @@ endif
# Have one program compiled without "-target bpf" to test whether libbpf loads
# it successfully
$(OUTPUT)/test_xdp.o: progs/test_xdp.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -emit-llvm -c $< -o - | \
($(CLANG) $(CLANG_FLAGS) -O2 -emit-llvm -c $< -o - || \
echo "clang failed") | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
$(OUTPUT)/%.o: progs/%.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
($(CLANG) $(CLANG_FLAGS) -O2 -target bpf -emit-llvm -c $< -o - || \
echo "clang failed") | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
test_progs.c: $(PROG_TESTS_H)
$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
$(OUTPUT)/test_progs: prog_tests/*.c
PROG_TESTS_DIR = $(OUTPUT)/prog_tests
$(PROG_TESTS_DIR):
mkdir -p $@
PROG_TESTS_H := $(PROG_TESTS_DIR)/tests.h
PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
$(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
test_progs.c: $(PROG_TESTS_H)
$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_H) $(PROG_TESTS_FILES)
$(PROG_TESTS_H): $(PROG_TESTS_FILES) | $(PROG_TESTS_DIR)
$(shell ( cd prog_tests/; \
echo '/* Generated header, do not edit */'; \
echo '#ifdef DECLARE'; \
@ -246,15 +249,15 @@ $(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
echo '#endif' \
) > $(PROG_TESTS_H))
TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
MAP_TESTS_DIR = $(OUTPUT)/map_tests
$(MAP_TESTS_DIR):
mkdir -p $@
MAP_TESTS_H := $(MAP_TESTS_DIR)/tests.h
MAP_TESTS_FILES := $(wildcard map_tests/*.c)
test_maps.c: $(MAP_TESTS_H)
$(OUTPUT)/test_maps: CFLAGS += $(TEST_MAPS_CFLAGS)
MAP_TESTS_FILES := $(wildcard map_tests/*.c)
$(MAP_TESTS_H): $(MAP_TESTS_DIR) $(MAP_TESTS_FILES)
$(OUTPUT)/test_maps: test_maps.c $(MAP_TESTS_H) $(MAP_TESTS_FILES)
$(MAP_TESTS_H): $(MAP_TESTS_FILES) | $(MAP_TESTS_DIR)
$(shell ( cd map_tests/; \
echo '/* Generated header, do not edit */'; \
echo '#ifdef DECLARE'; \
@ -267,16 +270,15 @@ $(MAP_TESTS_H): $(MAP_TESTS_DIR) $(MAP_TESTS_FILES)
echo '#endif' \
) > $(MAP_TESTS_H))
VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
test_verifier.c: $(VERIFIER_TESTS_H)
$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
$(VERIFIER_TESTS_DIR):
mkdir -p $@
VERIFIER_TESTS_H := $(VERIFIER_TESTS_DIR)/tests.h
VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
$(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
test_verifier.c: $(VERIFIER_TESTS_H)
$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
$(OUTPUT)/test_verifier: test_verifier.c $(VERIFIER_TESTS_H)
$(VERIFIER_TESTS_H): $(VERIFIER_TEST_FILES) | $(VERIFIER_TESTS_DIR)
$(shell ( cd verifier/; \
echo '/* Generated header, do not edit */'; \
echo '#ifdef FILL_ARRAY'; \

View File

@ -315,8 +315,8 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__TARGET_ARCH_s930x)
#define bpf_target_s930x
#elif defined(__TARGET_ARCH_s390)
#define bpf_target_s390
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm)
#define bpf_target_arm
@ -341,8 +341,8 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
#elif defined(__s390x__)
#define bpf_target_s930x
#elif defined(__s390__)
#define bpf_target_s390
#elif defined(__arm__)
#define bpf_target_arm
#elif defined(__aarch64__)
@ -358,6 +358,7 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
#if defined(bpf_target_x86)
#ifdef __KERNEL__
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
#define PT_REGS_PARM3(x) ((x)->dx)
@ -368,19 +369,49 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
#define PT_REGS_RC(x) ((x)->ax)
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
#else
#ifdef __i386__
/* i386 kernel is built with -mregparm=3 */
#define PT_REGS_PARM1(x) ((x)->eax)
#define PT_REGS_PARM2(x) ((x)->edx)
#define PT_REGS_PARM3(x) ((x)->ecx)
#define PT_REGS_PARM4(x) 0
#define PT_REGS_PARM5(x) 0
#define PT_REGS_RET(x) ((x)->esp)
#define PT_REGS_FP(x) ((x)->ebp)
#define PT_REGS_RC(x) ((x)->eax)
#define PT_REGS_SP(x) ((x)->esp)
#define PT_REGS_IP(x) ((x)->eip)
#else
#define PT_REGS_PARM1(x) ((x)->rdi)
#define PT_REGS_PARM2(x) ((x)->rsi)
#define PT_REGS_PARM3(x) ((x)->rdx)
#define PT_REGS_PARM4(x) ((x)->rcx)
#define PT_REGS_PARM5(x) ((x)->r8)
#define PT_REGS_RET(x) ((x)->rsp)
#define PT_REGS_FP(x) ((x)->rbp)
#define PT_REGS_RC(x) ((x)->rax)
#define PT_REGS_SP(x) ((x)->rsp)
#define PT_REGS_IP(x) ((x)->rip)
#endif
#endif
#elif defined(bpf_target_s390x)
#elif defined(bpf_target_s390)
#define PT_REGS_PARM1(x) ((x)->gprs[2])
#define PT_REGS_PARM2(x) ((x)->gprs[3])
#define PT_REGS_PARM3(x) ((x)->gprs[4])
#define PT_REGS_PARM4(x) ((x)->gprs[5])
#define PT_REGS_PARM5(x) ((x)->gprs[6])
#define PT_REGS_RET(x) ((x)->gprs[14])
#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->gprs[2])
#define PT_REGS_SP(x) ((x)->gprs[15])
#define PT_REGS_IP(x) ((x)->psw.addr)
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
struct pt_regs;
#define PT_REGS_S390 const volatile user_pt_regs
#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
/* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
#elif defined(bpf_target_arm)
@ -397,16 +428,20 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
#elif defined(bpf_target_arm64)
#define PT_REGS_PARM1(x) ((x)->regs[0])
#define PT_REGS_PARM2(x) ((x)->regs[1])
#define PT_REGS_PARM3(x) ((x)->regs[2])
#define PT_REGS_PARM4(x) ((x)->regs[3])
#define PT_REGS_PARM5(x) ((x)->regs[4])
#define PT_REGS_RET(x) ((x)->regs[30])
#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->regs[0])
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->pc)
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
struct pt_regs;
#define PT_REGS_ARM64 const volatile struct user_pt_regs
#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
/* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
#elif defined(bpf_target_mips)
@ -452,10 +487,10 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
#endif
#ifdef bpf_target_powerpc
#if defined(bpf_target_powerpc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#elif bpf_target_sparc
#elif defined(bpf_target_sparc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else

View File

@ -21,12 +21,6 @@ ssize_t get_base_addr() {
return -EINVAL;
}
#ifdef __x86_64__
#define SYS_KPROBE_NAME "__x64_sys_nanosleep"
#else
#define SYS_KPROBE_NAME "sys_nanosleep"
#endif
void test_attach_probe(void)
{
const char *kprobe_name = "kprobe/sys_nanosleep";
@ -84,7 +78,7 @@ void test_attach_probe(void)
kprobe_link = bpf_program__attach_kprobe(kprobe_prog,
false /* retprobe */,
SYS_KPROBE_NAME);
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
"err %ld\n", PTR_ERR(kprobe_link))) {
kprobe_link = NULL;
@ -92,7 +86,7 @@ void test_attach_probe(void)
}
kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog,
true /* retprobe */,
SYS_KPROBE_NAME);
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
"err %ld\n", PTR_ERR(kretprobe_link))) {
kretprobe_link = NULL;

View File

@ -5,12 +5,6 @@
#include <sys/socket.h>
#include <test_progs.h>
#ifdef __x86_64__
#define SYS_KPROBE_NAME "__x64_sys_nanosleep"
#else
#define SYS_KPROBE_NAME "sys_nanosleep"
#endif
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
int cpu_data = *(int *)data, duration = 0;
@ -56,7 +50,7 @@ void test_perf_buffer(void)
/* attach kprobe */
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
SYS_KPROBE_NAME);
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
goto out_close;

View File

@ -173,6 +173,18 @@ static int test_send_signal_tracepoint(void)
return test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint");
}
static int test_send_signal_perf(void)
{
struct perf_event_attr attr = {
.sample_period = 1,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
"perf_sw_event");
}
static int test_send_signal_nmi(void)
{
struct perf_event_attr attr = {
@ -181,8 +193,26 @@ static int test_send_signal_nmi(void)
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
int pmu_fd;
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, "perf_event");
/* Some setups (e.g. virtual machines) might run with hardware
* perf events disabled. If this is the case, skip this test.
*/
pmu_fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */,
-1 /* cpu */, -1 /* group_fd */, 0 /* flags */);
if (pmu_fd == -1) {
if (errno == ENOENT) {
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n",
__func__);
return 0;
}
/* Let the test fail with a more informative message */
} else {
close(pmu_fd);
}
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
"perf_hw_event");
}
void test_send_signal(void)
@ -190,6 +220,7 @@ void test_send_signal(void)
int ret = 0;
ret |= test_send_signal_tracepoint();
ret |= test_send_signal_perf();
ret |= test_send_signal_nmi();
if (!ret)
printf("test_send_signal:OK\n");

View File

@ -18,7 +18,7 @@ int nested_loops(volatile struct pt_regs* ctx)
for (j = 0; j < 300; j++)
for (i = 0; i < j; i++) {
if (j & 1)
m = ctx->rax;
m = PT_REGS_RC(ctx);
else
m = j;
sum += i * m;

View File

@ -16,7 +16,7 @@ int while_true(volatile struct pt_regs* ctx)
int i = 0;
while (true) {
if (ctx->rax & 1)
if (PT_REGS_RC(ctx) & 1)
i += 3;
else
i += 7;

View File

@ -16,7 +16,7 @@ int while_true(volatile struct pt_regs* ctx)
__u64 i = 0, sum = 0;
do {
i++;
sum += ctx->rax;
sum += PT_REGS_RC(ctx);
} while (i < 0x100000000ULL);
return sum;
}

View File

@ -47,11 +47,12 @@ struct {
* issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here.
*/
typedef __u64 raw_stack_trace_t[2 * MAX_STACK_RAWTP];
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__u64 (*value)[2 * MAX_STACK_RAWTP];
__type(value, raw_stack_trace_t);
} rawdata_map SEC(".maps");
SEC("tracepoint/raw_syscalls/sys_enter")

View File

@ -36,8 +36,7 @@ struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 128);
__type(key, __u32);
/* there seems to be a bug in kernel not handling typedef properly */
struct bpf_stack_build_id (*value)[PERF_MAX_STACK_DEPTH];
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */

View File

@ -35,7 +35,7 @@ struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 16384);
__type(key, __u32);
__u64 (*value)[PERF_MAX_STACK_DEPTH];
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */

View File

@ -14,6 +14,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
static __u32 rol32(__u32 word, unsigned int shift)
{
@ -305,7 +306,7 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
ip6h->nexthdr = IPPROTO_IPV6;
ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
ip6h->payload_len =
__builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr));
bpf_htons(pkt_bytes + sizeof(struct ipv6hdr));
ip6h->hop_limit = 4;
ip6h->saddr.in6_u.u6_addr32[0] = 1;
@ -322,7 +323,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
struct real_definition *dst, __u32 pkt_bytes)
{
__u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]);
__u32 ip_suffix = bpf_ntohs(pckt->flow.port16[0]);
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
__u16 *next_iph_u16;
@ -352,7 +353,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
iph->protocol = IPPROTO_IPIP;
iph->check = 0;
iph->tos = 1;
iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr));
iph->tot_len = bpf_htons(pkt_bytes + sizeof(struct iphdr));
/* don't update iph->daddr, since it will overwrite old eth_proto
* and multiple iterations of bpf_prog_run() will fail
*/
@ -639,7 +640,7 @@ static int process_l3_headers_v6(struct packet_description *pckt,
iph_len = sizeof(struct ipv6hdr);
*protocol = ip6h->nexthdr;
pckt->flow.proto = *protocol;
*pkt_bytes = __builtin_bswap16(ip6h->payload_len);
*pkt_bytes = bpf_ntohs(ip6h->payload_len);
off += iph_len;
if (*protocol == 45) {
return XDP_DROP;
@ -671,7 +672,7 @@ static int process_l3_headers_v4(struct packet_description *pckt,
return XDP_DROP;
*protocol = iph->protocol;
pckt->flow.proto = *protocol;
*pkt_bytes = __builtin_bswap16(iph->tot_len);
*pkt_bytes = bpf_ntohs(iph->tot_len);
off += 20;
if (iph->frag_off & 65343)
return XDP_DROP;
@ -808,10 +809,10 @@ int balancer_ingress(struct xdp_md *ctx)
nh_off = sizeof(struct eth_hdr);
if (data + nh_off > data_end)
return XDP_DROP;
eth_proto = eth->eth_proto;
if (eth_proto == 8)
eth_proto = bpf_ntohs(eth->eth_proto);
if (eth_proto == ETH_P_IP)
return process_packet(data, nh_off, data_end, 0, ctx);
else if (eth_proto == 56710)
else if (eth_proto == ETH_P_IPV6)
return process_packet(data, nh_off, data_end, 1, ctx);
else
return XDP_DROP;

View File

@ -3417,6 +3417,94 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 1,
.max_entries = 4,
},
/*
* typedef int arr_t[16];
* struct s {
* arr_t *a;
* };
*/
{
.descr = "struct->ptr->typedef->array->int size resolution",
.raw_types = {
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_PTR_ENC(3), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPE_ARRAY_ENC(5, 5, 16), /* [4] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0a\0arr_t"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "ptr_mod_chain_size_resolve_map",
.key_size = sizeof(int),
.value_size = sizeof(int) * 16,
.key_type_id = 5 /* int */,
.value_type_id = 3 /* arr_t */,
.max_entries = 4,
},
/*
* typedef int arr_t[16][8][4];
* struct s {
* arr_t *a;
* };
*/
{
.descr = "struct->ptr->typedef->multi-array->int size resolution",
.raw_types = {
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_PTR_ENC(3), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPE_ARRAY_ENC(5, 7, 16), /* [4] */
BTF_TYPE_ARRAY_ENC(6, 7, 8), /* [5] */
BTF_TYPE_ARRAY_ENC(7, 7, 4), /* [6] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0a\0arr_t"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "multi_arr_size_resolve_map",
.key_size = sizeof(int),
.value_size = sizeof(int) * 16 * 8 * 4,
.key_type_id = 7 /* int */,
.value_type_id = 3 /* arr_t */,
.max_entries = 4,
},
/*
* typedef int int_t;
* typedef int_t arr3_t[4];
* typedef arr3_t arr2_t[8];
* typedef arr2_t arr1_t[16];
* struct s {
* arr1_t *a;
* };
*/
{
.descr = "typedef/multi-arr mix size resolution",
.raw_types = {
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
BTF_PTR_ENC(3), /* [2] */
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPE_ARRAY_ENC(5, 10, 16), /* [4] */
BTF_TYPEDEF_ENC(NAME_TBD, 6), /* [5] */
BTF_TYPE_ARRAY_ENC(7, 10, 8), /* [6] */
BTF_TYPEDEF_ENC(NAME_TBD, 8), /* [7] */
BTF_TYPE_ARRAY_ENC(9, 10, 4), /* [8] */
BTF_TYPEDEF_ENC(NAME_TBD, 10), /* [9] */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [10] */
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0a\0arr1_t\0arr2_t\0arr3_t\0int_t"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "typedef_arra_mix_size_resolve_map",
.key_size = sizeof(int),
.value_size = sizeof(int) * 16 * 8 * 4,
.key_type_id = 10 /* int */,
.value_type_id = 3 /* arr_t */,
.max_entries = 4,
},
}; /* struct btf_raw_test raw_tests[] */

View File

@ -92,3 +92,11 @@ int compare_map_keys(int map1_fd, int map2_fd);
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
int extract_build_id(char *build_id, size_t size);
void *spin_lock_thread(void *arg);
#ifdef __x86_64__
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
#elif defined(__s390x__)
#define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
#else
#define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
#endif

View File

@ -86,7 +86,7 @@ struct bpf_test {
int fixup_sk_storage_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t retval, retval_unpriv, insn_processed;
uint32_t insn_processed;
int prog_len;
enum {
UNDEF,
@ -95,16 +95,20 @@ struct bpf_test {
} result, result_unpriv;
enum bpf_prog_type prog_type;
uint8_t flags;
__u8 data[TEST_DATA_LEN];
void (*fill_helper)(struct bpf_test *self);
uint8_t runs;
struct {
uint32_t retval, retval_unpriv;
union {
__u8 data[TEST_DATA_LEN];
__u64 data64[TEST_DATA_LEN / 8];
};
} retvals[MAX_TEST_RUNS];
#define bpf_testdata_struct_t \
struct { \
uint32_t retval, retval_unpriv; \
union { \
__u8 data[TEST_DATA_LEN]; \
__u64 data64[TEST_DATA_LEN / 8]; \
}; \
}
union {
bpf_testdata_struct_t;
bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
};
enum bpf_attach_type expected_attach_type;
};
@ -949,17 +953,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
uint32_t expected_val;
int i;
if (!test->runs) {
expected_val = unpriv && test->retval_unpriv ?
test->retval_unpriv : test->retval;
err = do_prog_test_run(fd_prog, unpriv, expected_val,
test->data, sizeof(test->data));
if (err)
run_errs++;
else
run_successes++;
}
if (!test->runs)
test->runs = 1;
for (i = 0; i < test->runs; i++) {
if (unpriv && test->retvals[i].retval_unpriv)

View File

@ -226,7 +226,7 @@
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_ro = { 3 },

View File

@ -183,7 +183,7 @@
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0x100000),

View File

@ -0,0 +1,73 @@
#define BPF_SOCK_ADDR_STORE(field, off, res, err) \
{ \
"wide store to bpf_sock_addr." #field "[" #off "]", \
.insns = { \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, \
offsetof(struct bpf_sock_addr, field[off])), \
BPF_EXIT_INSN(), \
}, \
.result = res, \
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
.errstr = err, \
}
/* user_ip6[0] is u64 aligned */
BPF_SOCK_ADDR_STORE(user_ip6, 0, ACCEPT,
NULL),
BPF_SOCK_ADDR_STORE(user_ip6, 1, REJECT,
"invalid bpf_context access off=12 size=8"),
BPF_SOCK_ADDR_STORE(user_ip6, 2, ACCEPT,
NULL),
BPF_SOCK_ADDR_STORE(user_ip6, 3, REJECT,
"invalid bpf_context access off=20 size=8"),
/* msg_src_ip6[0] is _not_ u64 aligned */
BPF_SOCK_ADDR_STORE(msg_src_ip6, 0, REJECT,
"invalid bpf_context access off=44 size=8"),
BPF_SOCK_ADDR_STORE(msg_src_ip6, 1, ACCEPT,
NULL),
BPF_SOCK_ADDR_STORE(msg_src_ip6, 2, REJECT,
"invalid bpf_context access off=52 size=8"),
BPF_SOCK_ADDR_STORE(msg_src_ip6, 3, REJECT,
"invalid bpf_context access off=56 size=8"),
#undef BPF_SOCK_ADDR_STORE
#define BPF_SOCK_ADDR_LOAD(field, off, res, err) \
{ \
"wide load from bpf_sock_addr." #field "[" #off "]", \
.insns = { \
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, \
offsetof(struct bpf_sock_addr, field[off])), \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_EXIT_INSN(), \
}, \
.result = res, \
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
.errstr = err, \
}
/* user_ip6[0] is u64 aligned */
BPF_SOCK_ADDR_LOAD(user_ip6, 0, ACCEPT,
NULL),
BPF_SOCK_ADDR_LOAD(user_ip6, 1, REJECT,
"invalid bpf_context access off=12 size=8"),
BPF_SOCK_ADDR_LOAD(user_ip6, 2, ACCEPT,
NULL),
BPF_SOCK_ADDR_LOAD(user_ip6, 3, REJECT,
"invalid bpf_context access off=20 size=8"),
/* msg_src_ip6[0] is _not_ u64 aligned */
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 0, REJECT,
"invalid bpf_context access off=44 size=8"),
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 1, ACCEPT,
NULL),
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 2, REJECT,
"invalid bpf_context access off=52 size=8"),
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 3, REJECT,
"invalid bpf_context access off=56 size=8"),
#undef BPF_SOCK_ADDR_LOAD

Some files were not shown because too many files have changed in this diff Show More