mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-23 14:24:25 +08:00
Including fixes from 802.15.4 (Zigbee et al.).
Current release - regressions: - ipa: fix bugs in the register conversion for IPA v3.1 and v3.5.1 Current release - new code bugs: - mptcp: fix abba deadlock on fastopen - eth: stmmac: rk3588: allow multiple gmac controllers in one system Previous releases - regressions: - ip: rework the fix for dflt addr selection for connected nexthop - net: couple more fixes for misinterpreting bits in struct page after the signature was added Previous releases - always broken: - ipv6: ensure sane device mtu in tunnels - openvswitch: switch from WARN to pr_warn on a user-triggerable path - ethtool: eeprom: fix null-deref on genl_info in dump - ieee802154: more return code fixes for corner cases in dgram_sendmsg - mac802154: fix link-quality-indicator recording - eth: mlx5: fixes for IPsec, PTP timestamps, OvS and conntrack offload - eth: fec: limit register access on i.MX6UL - eth: bcm4908_enet: update TX stats after actual transmission - can: rcar_canfd: improve IRQ handling for RZ/G2L Misc: - genetlink: piggy back on the newly added resv_op_start to enforce more sanity checks on new commands Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmNa2CIACgkQMUZtbf5S IrsEDhAAsqvsIqhnwaDuvzTpdz/l2ZiLyRixue+Z5Q88/LkSYC7SRMjh70TzbYEj ENbB+hzGt9zDYIga1+vtLU13rENiI+3V0Pr5eOK9jVV2KBwQmgj1PatjlLhfQ8aa q9c/dg3YqKFcsLjHpCZC1O3imDEU+Wt1XV+N2tuoOhJ1QVPSemjSVUEgIP+qLTD7 cXd+bWpcEXq/X0jkptElGsCM4RHxuN9MCcQDoGfdyoGEmXDi17BmmJEVu4LWdamg bPlky2uerFBtuUyK3jSvsoTI0VHwcxAr/MSmMxwcRGMr/smy/1UIKfehSJUOXFsr XeN4pfgezqPvl4l7LjC0xx83zg1UffKGhkGuu47MS3A8rS+zSo9CEH993owOb5Ty ZH5ZhBsdS6wchCbM15eqEby2ATYh/pYf8gNEBYfItsj2QuIPoqt8h19yQ4Gu1eX2 1w1RpDJH0SyD02hsmfRWKzjehHNbNM+cQ2+prVazhXuSmhGxTOqWsirv6mThlfm6 IEuG62d0VOYFoRBKxTV27S57QyfT0/+uMyu7UjDX5lieJGXvN6wGH7UlOUDBC5j/ 4GhW8Li4hxskxv292S8nvwANAOY02wWaunVsEtLYwB+7erkPDISUkiUjdxi4Uc7W yfxqbhW70Yd9sDEoKXGRsQ21nl82ZBeUIWPx/xLr+F6PuKdvUHo= =g5TW -----END PGP SIGNATURE----- Merge tag 'net-6.1-rc3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from 802.15.4 (Zigbee et al). Current release - regressions: - ipa: fix bugs in the register conversion for IPA v3.1 and v3.5.1 Current release - new code bugs: - mptcp: fix abba deadlock on fastopen - eth: stmmac: rk3588: allow multiple gmac controllers in one system Previous releases - regressions: - ip: rework the fix for dflt addr selection for connected nexthop - net: couple more fixes for misinterpreting bits in struct page after the signature was added Previous releases - always broken: - ipv6: ensure sane device mtu in tunnels - openvswitch: switch from WARN to pr_warn on a user-triggerable path - ethtool: eeprom: fix null-deref on genl_info in dump - ieee802154: more return code fixes for corner cases in dgram_sendmsg - mac802154: fix link-quality-indicator recording - eth: mlx5: fixes for IPsec, PTP timestamps, OvS and conntrack offload - eth: fec: limit register access on i.MX6UL - eth: bcm4908_enet: update TX stats after actual transmission - can: rcar_canfd: improve IRQ handling for RZ/G2L Misc: - genetlink: piggy back on the newly added resv_op_start to enforce more sanity checks on new commands" * tag 'net-6.1-rc3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (57 commits) net: enetc: survive memory pressure without crashing kcm: do not sense pfmemalloc status in kcm_sendpage() net: do not sense pfmemalloc status in skb_append_pagefrags() net/mlx5e: Fix macsec sci endianness at rx sa update net/mlx5e: Fix wrong bitwise comparison usage in macsec_fs_rx_add_rule function net/mlx5e: Fix macsec rx security association (SA) update/delete net/mlx5e: Fix macsec coverity issue at rx sa update net/mlx5: Fix crash during sync firmware reset net/mlx5: Update fw fatal reporter state on PCI handlers successful recover net/mlx5e: TC, Fix cloned flow attr instance dests are not zeroed net/mlx5e: TC, Reject forwarding from internal port to internal port net/mlx5: Fix possible use-after-free in async command interface net/mlx5: ASO, Create the ASO SQ with the correct timestamp format net/mlx5e: Update restore chain id for slow path packets net/mlx5e: Extend SKB room check to include PTP-SQ net/mlx5: DR, Fix matcher disconnect error flow net/mlx5: Wait for firmware to enable CRS before pci_restore_state net/mlx5e: Do not increment ESN when updating IPsec ESN state netdevsim: remove dir in nsim_dev_debugfs_init() when creating ports dir failed netdevsim: fix memory leak in nsim_drv_probe() when nsim_dev_resources_register() failed ...
This commit is contained in:
commit
2375886721
@ -15436,6 +15436,7 @@ S: Maintained
|
||||
W: http://openvswitch.org
|
||||
F: include/uapi/linux/openvswitch.h
|
||||
F: net/openvswitch/
|
||||
F: tools/testing/selftests/net/openvswitch/
|
||||
|
||||
OPERATING PERFORMANCE POINTS (OPP)
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
|
@ -322,14 +322,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
|
||||
&mscan_clksrc);
|
||||
if (!priv->can.clock.freq) {
|
||||
dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
|
||||
goto exit_free_mscan;
|
||||
goto exit_put_clock;
|
||||
}
|
||||
|
||||
err = register_mscandev(dev, mscan_clksrc);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
|
||||
DRV_NAME, err);
|
||||
goto exit_free_mscan;
|
||||
goto exit_put_clock;
|
||||
}
|
||||
|
||||
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
|
||||
@ -337,7 +337,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
|
||||
|
||||
return 0;
|
||||
|
||||
exit_free_mscan:
|
||||
exit_put_clock:
|
||||
if (data->put_clock)
|
||||
data->put_clock(ofdev);
|
||||
free_candev(dev);
|
||||
exit_dispose_irq:
|
||||
irq_dispose_mapping(irq);
|
||||
|
@ -1157,11 +1157,13 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
|
||||
{
|
||||
struct rcar_canfd_channel *priv = gpriv->ch[ch];
|
||||
u32 ridx = ch + RCANFD_RFFIFO_IDX;
|
||||
u32 sts;
|
||||
u32 sts, cc;
|
||||
|
||||
/* Handle Rx interrupts */
|
||||
sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
|
||||
if (likely(sts & RCANFD_RFSTS_RFIF)) {
|
||||
cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx));
|
||||
if (likely(sts & RCANFD_RFSTS_RFIF &&
|
||||
cc & RCANFD_RFCC_RFIE)) {
|
||||
if (napi_schedule_prep(&priv->napi)) {
|
||||
/* Disable Rx FIFO interrupts */
|
||||
rcar_canfd_clear_bit(priv->base,
|
||||
@ -1244,11 +1246,9 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
|
||||
|
||||
static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct rcar_canfd_global *gpriv = dev_id;
|
||||
u32 ch;
|
||||
struct rcar_canfd_channel *priv = dev_id;
|
||||
|
||||
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
|
||||
rcar_canfd_handle_channel_tx(gpriv, ch);
|
||||
rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -1276,11 +1276,9 @@ static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 c
|
||||
|
||||
static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct rcar_canfd_global *gpriv = dev_id;
|
||||
u32 ch;
|
||||
struct rcar_canfd_channel *priv = dev_id;
|
||||
|
||||
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
|
||||
rcar_canfd_handle_channel_err(gpriv, ch);
|
||||
rcar_canfd_handle_channel_err(priv->gpriv, priv->channel);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -1721,6 +1719,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
|
||||
priv->ndev = ndev;
|
||||
priv->base = gpriv->base;
|
||||
priv->channel = ch;
|
||||
priv->gpriv = gpriv;
|
||||
priv->can.clock.freq = fcan_freq;
|
||||
dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
|
||||
|
||||
@ -1749,7 +1748,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
|
||||
}
|
||||
err = devm_request_irq(&pdev->dev, err_irq,
|
||||
rcar_canfd_channel_err_interrupt, 0,
|
||||
irq_name, gpriv);
|
||||
irq_name, priv);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
|
||||
err_irq, err);
|
||||
@ -1763,7 +1762,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
|
||||
}
|
||||
err = devm_request_irq(&pdev->dev, tx_irq,
|
||||
rcar_canfd_channel_tx_interrupt, 0,
|
||||
irq_name, gpriv);
|
||||
irq_name, priv);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
|
||||
tx_irq, err);
|
||||
@ -1789,7 +1788,6 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
|
||||
|
||||
priv->can.do_set_mode = rcar_canfd_do_set_mode;
|
||||
priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
|
||||
priv->gpriv = gpriv;
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
||||
netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
|
||||
|
@ -1415,11 +1415,14 @@ static int mcp251x_can_probe(struct spi_device *spi)
|
||||
|
||||
ret = mcp251x_gpio_setup(priv);
|
||||
if (ret)
|
||||
goto error_probe;
|
||||
goto out_unregister_candev;
|
||||
|
||||
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
|
||||
return 0;
|
||||
|
||||
out_unregister_candev:
|
||||
unregister_candev(net);
|
||||
|
||||
error_probe:
|
||||
destroy_workqueue(priv->wq);
|
||||
priv->wq = NULL;
|
||||
|
@ -1875,7 +1875,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
init_completion(&priv->start_comp);
|
||||
reinit_completion(&priv->start_comp);
|
||||
|
||||
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
|
||||
priv->channel);
|
||||
@ -1893,7 +1893,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
init_completion(&priv->stop_comp);
|
||||
reinit_completion(&priv->stop_comp);
|
||||
|
||||
/* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
|
||||
* see comment in kvaser_usb_hydra_update_state()
|
||||
|
@ -1320,7 +1320,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
init_completion(&priv->start_comp);
|
||||
reinit_completion(&priv->start_comp);
|
||||
|
||||
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
|
||||
priv->channel);
|
||||
@ -1338,7 +1338,7 @@ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
init_completion(&priv->stop_comp);
|
||||
reinit_completion(&priv->stop_comp);
|
||||
|
||||
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
|
||||
priv->channel);
|
||||
|
@ -561,8 +561,6 @@ static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_devic
|
||||
|
||||
if (++ring->write_idx == ring->length - 1)
|
||||
ring->write_idx = 0;
|
||||
enet->netdev->stats.tx_bytes += skb->len;
|
||||
enet->netdev->stats.tx_packets++;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@ -635,6 +633,7 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
|
||||
struct bcm4908_enet_dma_ring_bd *buf_desc;
|
||||
struct bcm4908_enet_dma_ring_slot *slot;
|
||||
struct device *dev = enet->dev;
|
||||
unsigned int bytes = 0;
|
||||
int handled = 0;
|
||||
|
||||
while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
|
||||
@ -645,12 +644,17 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
|
||||
|
||||
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
|
||||
dev_kfree_skb(slot->skb);
|
||||
if (++tx_ring->read_idx == tx_ring->length)
|
||||
tx_ring->read_idx = 0;
|
||||
|
||||
handled++;
|
||||
bytes += slot->len;
|
||||
|
||||
if (++tx_ring->read_idx == tx_ring->length)
|
||||
tx_ring->read_idx = 0;
|
||||
}
|
||||
|
||||
enet->netdev->stats.tx_packets += handled;
|
||||
enet->netdev->stats.tx_bytes += bytes;
|
||||
|
||||
if (handled < weight) {
|
||||
napi_complete_done(napi, handled);
|
||||
bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
|
||||
|
@ -1991,6 +1991,9 @@ static int bcm_sysport_open(struct net_device *dev)
|
||||
goto out_clk_disable;
|
||||
}
|
||||
|
||||
/* Indicate that the MAC is responsible for PHY PM */
|
||||
phydev->mac_managed_pm = true;
|
||||
|
||||
/* Reset house keeping link status */
|
||||
priv->old_duplex = -1;
|
||||
priv->old_link = -1;
|
||||
|
@ -2090,7 +2090,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
|
||||
else
|
||||
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
|
||||
|
||||
/* Also prepare the consumer index in case page allocation never
|
||||
* succeeds. In that case, hardware will never advance producer index
|
||||
* to match consumer index, and will drop all frames.
|
||||
*/
|
||||
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
|
||||
enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
|
||||
|
||||
/* enable Rx ints by setting pkt thr to 1 */
|
||||
enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
|
||||
|
@ -2432,6 +2432,31 @@ static u32 fec_enet_register_offset[] = {
|
||||
IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
|
||||
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
|
||||
};
|
||||
/* for i.MX6ul */
|
||||
static u32 fec_enet_register_offset_6ul[] = {
|
||||
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
|
||||
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
|
||||
FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
|
||||
FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
|
||||
FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
|
||||
FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
|
||||
FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
|
||||
RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
|
||||
RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
|
||||
RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
|
||||
RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
|
||||
RMON_T_P_GTE2048, RMON_T_OCTETS,
|
||||
IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
|
||||
IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
|
||||
IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
|
||||
RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
|
||||
RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
|
||||
RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
|
||||
RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
|
||||
RMON_R_P_GTE2048, RMON_R_OCTETS,
|
||||
IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
|
||||
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
|
||||
};
|
||||
#else
|
||||
static __u32 fec_enet_register_version = 1;
|
||||
static u32 fec_enet_register_offset[] = {
|
||||
@ -2456,7 +2481,24 @@ static void fec_enet_get_regs(struct net_device *ndev,
|
||||
u32 *buf = (u32 *)regbuf;
|
||||
u32 i, off;
|
||||
int ret;
|
||||
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
|
||||
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
|
||||
defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
|
||||
u32 *reg_list;
|
||||
u32 reg_cnt;
|
||||
|
||||
if (!of_machine_is_compatible("fsl,imx6ul")) {
|
||||
reg_list = fec_enet_register_offset;
|
||||
reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
|
||||
} else {
|
||||
reg_list = fec_enet_register_offset_6ul;
|
||||
reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
|
||||
}
|
||||
#else
|
||||
/* coldfire */
|
||||
static u32 *reg_list = fec_enet_register_offset;
|
||||
static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
|
||||
#endif
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0)
|
||||
return;
|
||||
@ -2465,8 +2507,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
|
||||
|
||||
memset(buf, 0, regs->len);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
|
||||
off = fec_enet_register_offset[i];
|
||||
for (i = 0; i < reg_cnt; i++) {
|
||||
off = reg_list[i];
|
||||
|
||||
if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
|
||||
!(fep->quirks & FEC_QUIRK_HAS_FRREG))
|
||||
|
@ -2900,6 +2900,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
|
||||
ret = of_device_register(&port->ofdev);
|
||||
if (ret) {
|
||||
pr_err("failed to register device. ret=%d\n", ret);
|
||||
put_device(&port->ofdev.dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -3185,10 +3185,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
|
||||
|
||||
if (cmd->flow_type == TCP_V4_FLOW ||
|
||||
cmd->flow_type == UDP_V4_FLOW) {
|
||||
if (i_set & I40E_L3_SRC_MASK)
|
||||
cmd->data |= RXH_IP_SRC;
|
||||
if (i_set & I40E_L3_DST_MASK)
|
||||
cmd->data |= RXH_IP_DST;
|
||||
if (hw->mac.type == I40E_MAC_X722) {
|
||||
if (i_set & I40E_X722_L3_SRC_MASK)
|
||||
cmd->data |= RXH_IP_SRC;
|
||||
if (i_set & I40E_X722_L3_DST_MASK)
|
||||
cmd->data |= RXH_IP_DST;
|
||||
} else {
|
||||
if (i_set & I40E_L3_SRC_MASK)
|
||||
cmd->data |= RXH_IP_SRC;
|
||||
if (i_set & I40E_L3_DST_MASK)
|
||||
cmd->data |= RXH_IP_DST;
|
||||
}
|
||||
} else if (cmd->flow_type == TCP_V6_FLOW ||
|
||||
cmd->flow_type == UDP_V6_FLOW) {
|
||||
if (i_set & I40E_L3_V6_SRC_MASK)
|
||||
@ -3546,12 +3553,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
||||
|
||||
/**
|
||||
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
|
||||
* @hw: hw structure
|
||||
* @nfc: pointer to user request
|
||||
* @i_setc: bits currently set
|
||||
*
|
||||
* Returns value of bits to be set per user request
|
||||
**/
|
||||
static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
|
||||
static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
|
||||
struct ethtool_rxnfc *nfc,
|
||||
u64 i_setc)
|
||||
{
|
||||
u64 i_set = i_setc;
|
||||
u64 src_l3 = 0, dst_l3 = 0;
|
||||
@ -3570,8 +3580,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
|
||||
dst_l3 = I40E_L3_V6_DST_MASK;
|
||||
} else if (nfc->flow_type == TCP_V4_FLOW ||
|
||||
nfc->flow_type == UDP_V4_FLOW) {
|
||||
src_l3 = I40E_L3_SRC_MASK;
|
||||
dst_l3 = I40E_L3_DST_MASK;
|
||||
if (hw->mac.type == I40E_MAC_X722) {
|
||||
src_l3 = I40E_X722_L3_SRC_MASK;
|
||||
dst_l3 = I40E_X722_L3_DST_MASK;
|
||||
} else {
|
||||
src_l3 = I40E_L3_SRC_MASK;
|
||||
dst_l3 = I40E_L3_DST_MASK;
|
||||
}
|
||||
} else {
|
||||
/* Any other flow type are not supported here */
|
||||
return i_set;
|
||||
@ -3589,6 +3604,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
|
||||
return i_set;
|
||||
}
|
||||
|
||||
#define FLOW_PCTYPES_SIZE 64
|
||||
/**
|
||||
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
|
||||
* @pf: pointer to the physical function struct
|
||||
@ -3601,9 +3617,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
|
||||
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
|
||||
u8 flow_pctype = 0;
|
||||
DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
|
||||
u64 i_set, i_setc;
|
||||
|
||||
bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
|
||||
|
||||
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
|
||||
@ -3619,36 +3637,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
||||
|
||||
switch (nfc->flow_type) {
|
||||
case TCP_V4_FLOW:
|
||||
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
|
||||
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
|
||||
hena |=
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
|
||||
flow_pctypes);
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
|
||||
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
|
||||
hena |=
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
|
||||
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
|
||||
hena |=
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
|
||||
flow_pctypes);
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
|
||||
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
|
||||
hena |=
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
|
||||
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
|
||||
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
|
||||
flow_pctypes);
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
|
||||
flow_pctypes);
|
||||
}
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
|
||||
break;
|
||||
case UDP_V6_FLOW:
|
||||
flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
|
||||
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
|
||||
hena |=
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
|
||||
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
|
||||
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
|
||||
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
|
||||
flow_pctypes);
|
||||
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
|
||||
flow_pctypes);
|
||||
}
|
||||
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
|
||||
break;
|
||||
case AH_ESP_V4_FLOW:
|
||||
@ -3681,17 +3698,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flow_pctype) {
|
||||
i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
|
||||
flow_pctype)) |
|
||||
((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
|
||||
flow_pctype)) << 32);
|
||||
i_set = i40e_get_rss_hash_bits(nfc, i_setc);
|
||||
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
|
||||
(u32)i_set);
|
||||
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
|
||||
(u32)(i_set >> 32));
|
||||
hena |= BIT_ULL(flow_pctype);
|
||||
if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
|
||||
u8 flow_id;
|
||||
|
||||
for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
|
||||
i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
|
||||
((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
|
||||
i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
|
||||
|
||||
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
|
||||
(u32)i_set);
|
||||
i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
|
||||
(u32)(i_set >> 32));
|
||||
hena |= BIT_ULL(flow_id);
|
||||
}
|
||||
}
|
||||
|
||||
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
|
||||
|
@ -1404,6 +1404,10 @@ struct i40e_lldp_variables {
|
||||
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
|
||||
|
||||
/* INPUT SET MASK for RSS, flow director, and flexible payload */
|
||||
#define I40E_X722_L3_SRC_SHIFT 49
|
||||
#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
|
||||
#define I40E_X722_L3_DST_SHIFT 41
|
||||
#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
|
||||
#define I40E_L3_SRC_SHIFT 47
|
||||
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
|
||||
#define I40E_L3_V6_SRC_SHIFT 43
|
||||
|
@ -1536,10 +1536,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
|
||||
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
|
||||
return true;
|
||||
|
||||
/* If the VFs have been disabled, this means something else is
|
||||
* resetting the VF, so we shouldn't continue.
|
||||
*/
|
||||
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
|
||||
/* Bail out if VFs are disabled. */
|
||||
if (test_bit(__I40E_VF_DISABLE, pf->state))
|
||||
return true;
|
||||
|
||||
/* If VF is being reset already we don't need to continue. */
|
||||
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
return true;
|
||||
|
||||
i40e_trigger_vf_reset(vf, flr);
|
||||
@ -1576,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
|
||||
i40e_cleanup_reset_vf(vf);
|
||||
|
||||
i40e_flush(hw);
|
||||
clear_bit(__I40E_VF_DISABLE, pf->state);
|
||||
clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1609,8 +1611,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
return false;
|
||||
|
||||
/* Begin reset on all VFs at once */
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++)
|
||||
i40e_trigger_vf_reset(&pf->vf[v], flr);
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++) {
|
||||
vf = &pf->vf[v];
|
||||
/* If VF is being reset no need to trigger reset again */
|
||||
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
i40e_trigger_vf_reset(&pf->vf[v], flr);
|
||||
}
|
||||
|
||||
/* HW requires some time to make sure it can flush the FIFO for a VF
|
||||
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
|
||||
@ -1626,9 +1632,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
*/
|
||||
while (v < pf->num_alloc_vfs) {
|
||||
vf = &pf->vf[v];
|
||||
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
|
||||
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
|
||||
break;
|
||||
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
|
||||
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
|
||||
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
|
||||
break;
|
||||
}
|
||||
|
||||
/* If the current VF has finished resetting, move on
|
||||
* to the next VF in sequence.
|
||||
@ -1656,6 +1664,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
if (pf->vf[v].lan_vsi_idx == 0)
|
||||
continue;
|
||||
|
||||
/* If VF is reset in another thread just continue */
|
||||
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
continue;
|
||||
|
||||
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
|
||||
}
|
||||
|
||||
@ -1667,6 +1679,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
if (pf->vf[v].lan_vsi_idx == 0)
|
||||
continue;
|
||||
|
||||
/* If VF is reset in another thread just continue */
|
||||
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
continue;
|
||||
|
||||
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
|
||||
}
|
||||
|
||||
@ -1676,8 +1692,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
mdelay(50);
|
||||
|
||||
/* Finish the reset on each VF */
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++)
|
||||
for (v = 0; v < pf->num_alloc_vfs; v++) {
|
||||
/* If VF is reset in another thread just continue */
|
||||
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
|
||||
continue;
|
||||
|
||||
i40e_cleanup_reset_vf(&pf->vf[v]);
|
||||
}
|
||||
|
||||
i40e_flush(hw);
|
||||
clear_bit(__I40E_VF_DISABLE, pf->state);
|
||||
|
@ -39,6 +39,7 @@ enum i40e_vf_states {
|
||||
I40E_VF_STATE_MC_PROMISC,
|
||||
I40E_VF_STATE_UC_PROMISC,
|
||||
I40E_VF_STATE_PRE_ENABLE,
|
||||
I40E_VF_STATE_RESETTING
|
||||
};
|
||||
|
||||
/* VF capabilities */
|
||||
|
@ -2004,7 +2004,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
||||
ctx->dev = dev;
|
||||
/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
|
||||
atomic_set(&ctx->num_inflight, 1);
|
||||
init_waitqueue_head(&ctx->wait);
|
||||
init_completion(&ctx->inflight_done);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
|
||||
|
||||
@ -2018,8 +2018,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
|
||||
*/
|
||||
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
|
||||
{
|
||||
atomic_dec(&ctx->num_inflight);
|
||||
wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
|
||||
if (!atomic_dec_and_test(&ctx->num_inflight))
|
||||
wait_for_completion(&ctx->inflight_done);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
|
||||
|
||||
@ -2032,7 +2032,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
|
||||
status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
|
||||
work->user_callback(status, work);
|
||||
if (atomic_dec_and_test(&ctx->num_inflight))
|
||||
wake_up(&ctx->wait);
|
||||
complete(&ctx->inflight_done);
|
||||
}
|
||||
|
||||
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
||||
@ -2050,7 +2050,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
||||
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
|
||||
mlx5_cmd_exec_cb_handler, work, false);
|
||||
if (ret && atomic_dec_and_test(&ctx->num_inflight))
|
||||
wake_up(&ctx->wait);
|
||||
complete(&ctx->inflight_done);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "en.h"
|
||||
#include "en_stats.h"
|
||||
#include "en/txrx.h"
|
||||
#include <linux/ptp_classify.h>
|
||||
|
||||
#define MLX5E_PTP_CHANNEL_IX 0
|
||||
@ -68,6 +69,14 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
|
||||
fk.ports.dst == htons(PTP_EV_PORT));
|
||||
}
|
||||
|
||||
static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq)
|
||||
{
|
||||
if (!sq->ptpsq)
|
||||
return true;
|
||||
|
||||
return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo);
|
||||
}
|
||||
|
||||
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
u8 lag_port, struct mlx5e_ptp **cp);
|
||||
void mlx5e_ptp_close(struct mlx5e_ptp *c);
|
||||
|
@ -96,6 +96,7 @@ struct mlx5e_tc_flow {
|
||||
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
struct mlx5e_tc_flow *peer_flow;
|
||||
struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
|
||||
struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
|
||||
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
|
||||
struct list_head hairpin; /* flows sharing the same hairpin */
|
||||
struct list_head peer; /* flows with peer flow */
|
||||
@ -111,6 +112,7 @@ struct mlx5e_tc_flow {
|
||||
struct completion del_hw_done;
|
||||
struct mlx5_flow_attr *attr;
|
||||
struct list_head attrs;
|
||||
u32 chain_mapping;
|
||||
};
|
||||
|
||||
struct mlx5_flow_handle *
|
||||
|
@ -57,6 +57,12 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
|
||||
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
|
||||
|
||||
static inline bool
|
||||
mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
|
||||
{
|
||||
return (*fifo->pc - *fifo->cc) < fifo->mask;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
|
||||
{
|
||||
|
@ -101,7 +101,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
struct xfrm_replay_state_esn *replay_esn;
|
||||
u32 seq_bottom = 0;
|
||||
u8 overlap;
|
||||
u32 *esn;
|
||||
|
||||
if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
|
||||
sa_entry->esn_state.trigger = 0;
|
||||
@ -116,11 +115,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
|
||||
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
|
||||
htonl(seq_bottom));
|
||||
esn = &sa_entry->esn_state.esn;
|
||||
|
||||
sa_entry->esn_state.trigger = 1;
|
||||
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
|
||||
++(*esn);
|
||||
sa_entry->esn_state.overlap = 0;
|
||||
return true;
|
||||
} else if (unlikely(!overlap &&
|
||||
|
@ -432,7 +432,7 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
|
||||
bool active)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = macsec->mdev;
|
||||
struct mlx5_macsec_obj_attrs attrs;
|
||||
struct mlx5_macsec_obj_attrs attrs = {};
|
||||
int err = 0;
|
||||
|
||||
if (rx_sa->active != active)
|
||||
@ -444,7 +444,7 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
|
||||
return 0;
|
||||
}
|
||||
|
||||
attrs.sci = rx_sa->sci;
|
||||
attrs.sci = cpu_to_be64((__force u64)rx_sa->sci);
|
||||
attrs.enc_key_id = rx_sa->enc_key_id;
|
||||
err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
|
||||
if (err)
|
||||
@ -999,11 +999,11 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
|
||||
}
|
||||
|
||||
rx_sa = rx_sc->rx_sa[assoc_num];
|
||||
if (rx_sa) {
|
||||
if (!rx_sa) {
|
||||
netdev_err(ctx->netdev,
|
||||
"MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
|
||||
"MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
|
||||
sci, assoc_num);
|
||||
err = -EEXIST;
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1055,11 +1055,11 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
|
||||
}
|
||||
|
||||
rx_sa = rx_sc->rx_sa[assoc_num];
|
||||
if (rx_sa) {
|
||||
if (!rx_sa) {
|
||||
netdev_err(ctx->netdev,
|
||||
"MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
|
||||
"MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
|
||||
sci, assoc_num);
|
||||
err = -EEXIST;
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1180,7 +1180,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
|
||||
rx_rule->rule[0] = rule;
|
||||
|
||||
/* Rx crypto table without SCI rule */
|
||||
if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) {
|
||||
if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
|
||||
memset(spec, 0, sizeof(struct mlx5_flow_spec));
|
||||
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
|
||||
memset(&flow_act, 0, sizeof(flow_act));
|
||||
|
@ -1405,8 +1405,13 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct mlx5_flow_spec *spec)
|
||||
{
|
||||
struct mlx5e_tc_mod_hdr_acts mod_acts = {};
|
||||
struct mlx5e_mod_hdr_handle *mh = NULL;
|
||||
struct mlx5_flow_attr *slow_attr;
|
||||
struct mlx5_flow_handle *rule;
|
||||
bool fwd_and_modify_cap;
|
||||
u32 chain_mapping = 0;
|
||||
int err;
|
||||
|
||||
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (!slow_attr)
|
||||
@ -1417,13 +1422,56 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
|
||||
slow_attr->esw_attr->split_count = 0;
|
||||
slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
|
||||
|
||||
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
|
||||
if (!IS_ERR(rule))
|
||||
flow_flag_set(flow, SLOW);
|
||||
fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
|
||||
if (!fwd_and_modify_cap)
|
||||
goto skip_restore;
|
||||
|
||||
err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
|
||||
if (err)
|
||||
goto err_get_chain;
|
||||
|
||||
err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
|
||||
CHAIN_TO_REG, chain_mapping);
|
||||
if (err)
|
||||
goto err_reg_set;
|
||||
|
||||
mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
|
||||
MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
|
||||
if (IS_ERR(mh)) {
|
||||
err = PTR_ERR(mh);
|
||||
goto err_attach;
|
||||
}
|
||||
|
||||
slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
|
||||
|
||||
skip_restore:
|
||||
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
|
||||
if (IS_ERR(rule)) {
|
||||
err = PTR_ERR(rule);
|
||||
goto err_offload;
|
||||
}
|
||||
|
||||
flow->slow_mh = mh;
|
||||
flow->chain_mapping = chain_mapping;
|
||||
flow_flag_set(flow, SLOW);
|
||||
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
kfree(slow_attr);
|
||||
|
||||
return rule;
|
||||
|
||||
err_offload:
|
||||
if (fwd_and_modify_cap)
|
||||
mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
|
||||
err_attach:
|
||||
err_reg_set:
|
||||
if (fwd_and_modify_cap)
|
||||
mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
|
||||
err_get_chain:
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
kfree(slow_attr);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
|
||||
@ -1441,7 +1489,17 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
slow_attr->esw_attr->split_count = 0;
|
||||
slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
|
||||
if (flow->slow_mh) {
|
||||
slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
|
||||
}
|
||||
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
|
||||
if (flow->slow_mh) {
|
||||
mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
|
||||
mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
|
||||
flow->chain_mapping = 0;
|
||||
flow->slow_mh = NULL;
|
||||
}
|
||||
flow_flag_clear(flow, SLOW);
|
||||
kfree(slow_attr);
|
||||
}
|
||||
@ -3575,6 +3633,10 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
|
||||
attr2->action = 0;
|
||||
attr2->flags = 0;
|
||||
attr2->parse_attr = parse_attr;
|
||||
attr2->esw_attr->out_count = 0;
|
||||
attr2->esw_attr->split_count = 0;
|
||||
attr2->dest_chain = 0;
|
||||
attr2->dest_ft = NULL;
|
||||
return attr2;
|
||||
}
|
||||
|
||||
@ -4008,6 +4070,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
struct mlx5_flow_attr *attr = flow->attr;
|
||||
struct mlx5_esw_flow_attr *esw_attr;
|
||||
struct net_device *filter_dev;
|
||||
int err;
|
||||
|
||||
err = flow_action_supported(flow_action, extack);
|
||||
@ -4016,6 +4079,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
||||
|
||||
esw_attr = attr->esw_attr;
|
||||
parse_attr = attr->parse_attr;
|
||||
filter_dev = parse_attr->filter_dev;
|
||||
parse_state = &parse_attr->parse_state;
|
||||
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
|
||||
parse_state->ct_priv = get_ct_priv(priv);
|
||||
@ -4025,13 +4089,21 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
||||
return err;
|
||||
|
||||
/* Forward to/from internal port can only have 1 dest */
|
||||
if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
|
||||
if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
|
||||
esw_attr->out_count > 1) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Rules with internal port can have only one destination");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Forward from tunnel/internal port to internal port is not supported */
|
||||
if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
|
||||
esw_attr->dest_int_port) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Forwarding from tunnel/internal port to internal port is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -392,6 +392,11 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
if (unlikely(sq->ptpsq)) {
|
||||
mlx5e_skb_cb_hwtstamp_init(skb);
|
||||
mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
|
||||
if (!netif_tx_queue_stopped(sq->txq) &&
|
||||
!mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
|
||||
netif_tx_stop_queue(sq->txq);
|
||||
sq->stats->stopped++;
|
||||
}
|
||||
skb_get(skb);
|
||||
}
|
||||
|
||||
@ -868,6 +873,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
|
||||
if (netif_tx_queue_stopped(sq->txq) &&
|
||||
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
|
||||
mlx5e_ptpsq_fifo_has_room(sq) &&
|
||||
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
|
||||
netif_tx_wake_queue(sq->txq);
|
||||
stats->wake++;
|
||||
|
@ -358,6 +358,23 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
|
||||
err = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
do {
|
||||
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, ®16);
|
||||
if (err)
|
||||
return err;
|
||||
if (reg16 == dev_id)
|
||||
break;
|
||||
msleep(20);
|
||||
} while (!time_after(jiffies, timeout));
|
||||
|
||||
if (reg16 == dev_id) {
|
||||
mlx5_core_info(dev, "Firmware responds to PCI config cycles again\n");
|
||||
} else {
|
||||
mlx5_core_err(dev, "Firmware is not responsive (0x%04x) after %llu ms\n",
|
||||
reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
|
||||
err = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
restore:
|
||||
list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
|
||||
pci_cfg_access_unlock(sdev);
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/mlx5/device.h>
|
||||
#include <linux/mlx5/transobj.h>
|
||||
#include "clock.h"
|
||||
#include "aso.h"
|
||||
#include "wq.h"
|
||||
|
||||
@ -179,6 +180,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
|
||||
{
|
||||
void *in, *sqc, *wq;
|
||||
int inlen, err;
|
||||
u8 ts_format;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
|
||||
sizeof(u64) * sq->wq_ctrl.buf.npages;
|
||||
@ -195,6 +197,11 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
|
||||
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
|
||||
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
|
||||
|
||||
ts_format = mlx5_is_real_time_sq(mdev) ?
|
||||
MLX5_TIMESTAMP_FORMAT_REAL_TIME :
|
||||
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
|
||||
MLX5_SET(sqc, sqc, ts_format, ts_format);
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
|
||||
MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
|
||||
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
|
||||
|
@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
|
||||
|
||||
if (!MLX5_ESWITCH_MANAGER(dev))
|
||||
if (!mpfs)
|
||||
return;
|
||||
|
||||
WARN_ON(!hlist_empty(mpfs->hash));
|
||||
@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
|
||||
int err = 0;
|
||||
u32 index;
|
||||
|
||||
if (!MLX5_ESWITCH_MANAGER(dev))
|
||||
if (!mpfs)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&mpfs->lock);
|
||||
@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
|
||||
int err = 0;
|
||||
u32 index;
|
||||
|
||||
if (!MLX5_ESWITCH_MANAGER(dev))
|
||||
if (!mpfs)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&mpfs->lock);
|
||||
|
@ -1872,6 +1872,10 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
|
||||
|
||||
err = mlx5_load_one(dev, false);
|
||||
|
||||
if (!err)
|
||||
devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter,
|
||||
DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
|
||||
|
||||
mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
|
||||
!err ? "recovered" : "Failed");
|
||||
}
|
||||
|
@ -1200,7 +1200,8 @@ free_rule:
|
||||
}
|
||||
|
||||
remove_from_nic_tbl:
|
||||
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
|
||||
if (!nic_matcher->rules)
|
||||
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
|
||||
|
||||
free_hw_ste:
|
||||
mlx5dr_domain_nic_unlock(nic_dmn);
|
||||
|
@ -6851,7 +6851,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
char banner[sizeof(version)];
|
||||
struct ksz_switch *sw = NULL;
|
||||
|
||||
result = pci_enable_device(pdev);
|
||||
result = pcim_enable_device(pdev);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
|
@ -309,6 +309,7 @@ static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
|
||||
lan966x, FDMA_CH_DB_DISCARD);
|
||||
|
||||
tx->activated = false;
|
||||
tx->last_in_use = -1;
|
||||
}
|
||||
|
||||
static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
|
||||
@ -687,17 +688,14 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)
|
||||
|
||||
static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
|
||||
{
|
||||
void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
|
||||
dma_addr_t rx_dma, tx_dma;
|
||||
dma_addr_t rx_dma;
|
||||
void *rx_dcbs;
|
||||
u32 size;
|
||||
int err;
|
||||
|
||||
/* Store these for later to free them */
|
||||
rx_dma = lan966x->rx.dma;
|
||||
tx_dma = lan966x->tx.dma;
|
||||
rx_dcbs = lan966x->rx.dcbs;
|
||||
tx_dcbs = lan966x->tx.dcbs;
|
||||
tx_dcbs_buf = lan966x->tx.dcbs_buf;
|
||||
|
||||
napi_synchronize(&lan966x->napi);
|
||||
napi_disable(&lan966x->napi);
|
||||
@ -715,17 +713,6 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
|
||||
|
||||
lan966x_fdma_tx_disable(&lan966x->tx);
|
||||
err = lan966x_fdma_tx_alloc(&lan966x->tx);
|
||||
if (err)
|
||||
goto restore_tx;
|
||||
|
||||
size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
|
||||
|
||||
kfree(tx_dcbs_buf);
|
||||
|
||||
lan966x_fdma_wakeup_netdev(lan966x);
|
||||
napi_enable(&lan966x->napi);
|
||||
|
||||
@ -735,11 +722,6 @@ restore:
|
||||
lan966x->rx.dcbs = rx_dcbs;
|
||||
lan966x_fdma_rx_start(&lan966x->rx);
|
||||
|
||||
restore_tx:
|
||||
lan966x->tx.dma = tx_dma;
|
||||
lan966x->tx.dcbs = tx_dcbs;
|
||||
lan966x->tx.dcbs_buf = tx_dcbs_buf;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1229,6 +1229,8 @@ static int ave_init(struct net_device *ndev)
|
||||
|
||||
phy_support_asym_pause(phydev);
|
||||
|
||||
phydev->mac_managed_pm = true;
|
||||
|
||||
phy_attached_info(phydev);
|
||||
|
||||
return 0;
|
||||
@ -1756,6 +1758,10 @@ static int ave_resume(struct device *dev)
|
||||
|
||||
ave_global_reset(ndev);
|
||||
|
||||
ret = phy_init_hw(ndev->phydev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ave_ethtool_get_wol(ndev, &wol);
|
||||
wol.wolopts = priv->wolopts;
|
||||
__ave_ethtool_set_wol(ndev, &wol);
|
||||
|
@ -1243,6 +1243,12 @@ static const struct rk_gmac_ops rk3588_ops = {
|
||||
.set_rgmii_speed = rk3588_set_gmac_speed,
|
||||
.set_rmii_speed = rk3588_set_gmac_speed,
|
||||
.set_clock_selection = rk3588_set_clock_selection,
|
||||
.regs_valid = true,
|
||||
.regs = {
|
||||
0xfe1b0000, /* gmac0 */
|
||||
0xfe1c0000, /* gmac1 */
|
||||
0x0, /* sentinel */
|
||||
},
|
||||
};
|
||||
|
||||
#define RV1108_GRF_GMAC_CON0 0X0900
|
||||
|
@ -179,10 +179,10 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
|
||||
static const struct ipa_resource ipa_resource_src[] = {
|
||||
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
|
||||
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
|
||||
.min = 1, .max = 255,
|
||||
.min = 1, .max = 63,
|
||||
},
|
||||
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
|
||||
.min = 1, .max = 255,
|
||||
.min = 1, .max = 63,
|
||||
},
|
||||
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
|
||||
.min = 1, .max = 63,
|
||||
|
@ -434,6 +434,9 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
|
||||
const struct ipa_reg *reg;
|
||||
u32 val;
|
||||
|
||||
if (ipa->version < IPA_VERSION_3_5_1)
|
||||
return;
|
||||
|
||||
reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
|
||||
val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
|
||||
enter_idle_debounce_thresh);
|
||||
|
@ -127,112 +127,80 @@ static const u32 ipa_reg_counter_cfg_fmask[] = {
|
||||
IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
|
||||
|
||||
static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
|
||||
0x00000400, 0x0020);
|
||||
|
||||
static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
|
||||
0x00000404, 0x0020);
|
||||
|
||||
static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
|
||||
0x00000408, 0x0020);
|
||||
|
||||
static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
|
||||
0x0000040c, 0x0020);
|
||||
|
||||
static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
|
||||
0x00000500, 0x0020);
|
||||
|
||||
static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
|
||||
0x00000504, 0x0020);
|
||||
|
||||
static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
|
||||
0x00000508, 0x0020);
|
||||
|
||||
static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
|
||||
[X_MIN_LIM] = GENMASK(5, 0),
|
||||
/* Bits 6-7 reserved */
|
||||
[X_MAX_LIM] = GENMASK(13, 8),
|
||||
/* Bits 14-15 reserved */
|
||||
[Y_MIN_LIM] = GENMASK(21, 16),
|
||||
/* Bits 22-23 reserved */
|
||||
[Y_MAX_LIM] = GENMASK(29, 24),
|
||||
/* Bits 30-31 reserved */
|
||||
[X_MIN_LIM] = GENMASK(7, 0),
|
||||
[X_MAX_LIM] = GENMASK(15, 8),
|
||||
[Y_MIN_LIM] = GENMASK(23, 16),
|
||||
[Y_MAX_LIM] = GENMASK(31, 24),
|
||||
};
|
||||
|
||||
IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
|
||||
|
@ -117,6 +117,10 @@ static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
|
||||
|
||||
static void nsim_bus_dev_release(struct device *dev)
|
||||
{
|
||||
struct nsim_bus_dev *nsim_bus_dev;
|
||||
|
||||
nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev);
|
||||
kfree(nsim_bus_dev);
|
||||
}
|
||||
|
||||
static struct device_type nsim_bus_dev_type = {
|
||||
@ -291,6 +295,8 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu
|
||||
|
||||
err_nsim_bus_dev_id_free:
|
||||
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
|
||||
put_device(&nsim_bus_dev->dev);
|
||||
nsim_bus_dev = NULL;
|
||||
err_nsim_bus_dev_free:
|
||||
kfree(nsim_bus_dev);
|
||||
return ERR_PTR(err);
|
||||
@ -300,9 +306,8 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
|
||||
{
|
||||
/* Disallow using nsim_bus_dev */
|
||||
smp_store_release(&nsim_bus_dev->init, false);
|
||||
device_unregister(&nsim_bus_dev->dev);
|
||||
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
|
||||
kfree(nsim_bus_dev);
|
||||
device_unregister(&nsim_bus_dev->dev);
|
||||
}
|
||||
|
||||
static struct device_driver nsim_driver = {
|
||||
|
@ -309,8 +309,10 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
|
||||
if (IS_ERR(nsim_dev->ddir))
|
||||
return PTR_ERR(nsim_dev->ddir);
|
||||
nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
|
||||
if (IS_ERR(nsim_dev->ports_ddir))
|
||||
return PTR_ERR(nsim_dev->ports_ddir);
|
||||
if (IS_ERR(nsim_dev->ports_ddir)) {
|
||||
err = PTR_ERR(nsim_dev->ports_ddir);
|
||||
goto err_ddir;
|
||||
}
|
||||
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
|
||||
&nsim_dev->fw_update_status);
|
||||
debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
|
||||
@ -346,7 +348,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
|
||||
nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir);
|
||||
if (IS_ERR(nsim_dev->nodes_ddir)) {
|
||||
err = PTR_ERR(nsim_dev->nodes_ddir);
|
||||
goto err_out;
|
||||
goto err_ports_ddir;
|
||||
}
|
||||
debugfs_create_bool("fail_trap_drop_counter_get", 0600,
|
||||
nsim_dev->ddir,
|
||||
@ -354,8 +356,9 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
|
||||
nsim_udp_tunnels_debugfs_create(nsim_dev);
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
err_ports_ddir:
|
||||
debugfs_remove_recursive(nsim_dev->ports_ddir);
|
||||
err_ddir:
|
||||
debugfs_remove_recursive(nsim_dev->ddir);
|
||||
return err;
|
||||
}
|
||||
@ -442,7 +445,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
|
||||
¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv4 top resource\n");
|
||||
goto out;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = devl_resource_register(devlink, "fib", (u64)-1,
|
||||
@ -450,7 +453,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
|
||||
NSIM_RESOURCE_IPV4, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv4 FIB resource\n");
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = devl_resource_register(devlink, "fib-rules", (u64)-1,
|
||||
@ -458,7 +461,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
|
||||
NSIM_RESOURCE_IPV4, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv4 FIB rules resource\n");
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Resources for IPv6 */
|
||||
@ -468,7 +471,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
|
||||
¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv6 top resource\n");
|
||||
goto out;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = devl_resource_register(devlink, "fib", (u64)-1,
|
||||
@ -476,7 +479,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
|
||||
NSIM_RESOURCE_IPV6, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv6 FIB resource\n");
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = devl_resource_register(devlink, "fib-rules", (u64)-1,
|
||||
@ -484,7 +487,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
|
||||
NSIM_RESOURCE_IPV6, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv6 FIB rules resource\n");
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Resources for nexthops */
|
||||
@ -492,8 +495,14 @@ static int nsim_dev_resources_register(struct devlink *devlink)
|
||||
NSIM_RESOURCE_NEXTHOPS,
|
||||
DEVLINK_RESOURCE_ID_PARENT_TOP,
|
||||
¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register NEXTHOPS resource\n");
|
||||
goto err_out;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out:
|
||||
err_out:
|
||||
devl_resources_unregister(devlink);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -970,7 +970,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
|
||||
struct mlx5_async_ctx {
|
||||
struct mlx5_core_dev *dev;
|
||||
atomic_t num_inflight;
|
||||
struct wait_queue_head wait;
|
||||
struct completion inflight_done;
|
||||
};
|
||||
|
||||
struct mlx5_async_work;
|
||||
|
@ -41,13 +41,21 @@ struct genl_info;
|
||||
* @mcgrps: multicast groups used by this family
|
||||
* @n_mcgrps: number of multicast groups
|
||||
* @resv_start_op: first operation for which reserved fields of the header
|
||||
* can be validated, new families should leave this field at zero
|
||||
* can be validated and policies are required (see below);
|
||||
* new families should leave this field at zero
|
||||
* @mcgrp_offset: starting number of multicast group IDs in this family
|
||||
* (private)
|
||||
* @ops: the operations supported by this family
|
||||
* @n_ops: number of operations supported by this family
|
||||
* @small_ops: the small-struct operations supported by this family
|
||||
* @n_small_ops: number of small-struct operations supported by this family
|
||||
*
|
||||
* Attribute policies (the combination of @policy and @maxattr fields)
|
||||
* can be attached at the family level or at the operation level.
|
||||
* If both are present the per-operation policy takes precedence.
|
||||
* For operations before @resv_start_op lack of policy means that the core
|
||||
* will perform no attribute parsing or validation. For newer operations
|
||||
* if policy is not provided core will reject all TLV attributes.
|
||||
*/
|
||||
struct genl_family {
|
||||
int id; /* private */
|
||||
|
@ -369,18 +369,10 @@ static int __init test_rhltable(unsigned int entries)
|
||||
pr_info("test %d random rhlist add/delete operations\n", entries);
|
||||
for (j = 0; j < entries; j++) {
|
||||
u32 i = prandom_u32_max(entries);
|
||||
u32 prand = get_random_u32();
|
||||
u32 prand = prandom_u32_max(4);
|
||||
|
||||
cond_resched();
|
||||
|
||||
if (prand == 0)
|
||||
prand = get_random_u32();
|
||||
|
||||
if (prand & 1) {
|
||||
prand >>= 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
|
||||
if (test_bit(i, obj_in_table)) {
|
||||
clear_bit(i, obj_in_table);
|
||||
@ -393,35 +385,29 @@ static int __init test_rhltable(unsigned int entries)
|
||||
}
|
||||
|
||||
if (prand & 1) {
|
||||
prand >>= 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
|
||||
if (err == 0) {
|
||||
if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
|
||||
continue;
|
||||
} else {
|
||||
if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prand & 1) {
|
||||
prand >>= 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
i = prandom_u32_max(entries);
|
||||
if (test_bit(i, obj_in_table)) {
|
||||
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
|
||||
WARN(err, "cannot remove element at slot %d", i);
|
||||
if (err == 0)
|
||||
clear_bit(i, obj_in_table);
|
||||
} else {
|
||||
err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
|
||||
WARN(err, "failed to insert object %d", i);
|
||||
if (err == 0)
|
||||
set_bit(i, obj_in_table);
|
||||
if (err == 0) {
|
||||
if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
|
||||
continue;
|
||||
} else {
|
||||
if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (prand & 2) {
|
||||
i = prandom_u32_max(entries);
|
||||
if (test_bit(i, obj_in_table)) {
|
||||
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
|
||||
WARN(err, "cannot remove element at slot %d", i);
|
||||
if (err == 0)
|
||||
clear_bit(i, obj_in_table);
|
||||
} else {
|
||||
err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
|
||||
WARN(err, "failed to insert object %d", i);
|
||||
if (err == 0)
|
||||
set_bit(i, obj_in_table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -342,10 +342,12 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
|
||||
__skb_unlink(do_skb, &session->skb_queue);
|
||||
/* drop ref taken in j1939_session_skb_queue() */
|
||||
skb_unref(do_skb);
|
||||
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
|
||||
|
||||
kfree_skb(do_skb);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
|
||||
}
|
||||
|
||||
void j1939_session_skb_queue(struct j1939_session *session,
|
||||
|
@ -3971,7 +3971,7 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
|
||||
} else if (i < MAX_SKB_FRAGS) {
|
||||
skb_zcopy_downgrade_managed(skb);
|
||||
get_page(page);
|
||||
skb_fill_page_desc(skb, i, page, offset, size);
|
||||
skb_fill_page_desc_noacc(skb, i, page, offset, size);
|
||||
} else {
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ static int eeprom_prepare_data(const struct ethnl_req_info *req_base,
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
ret = get_module_eeprom_by_page(dev, &page_data, info->extack);
|
||||
ret = get_module_eeprom_by_page(dev, &page_data, info ? info->extack : NULL);
|
||||
if (ret < 0)
|
||||
goto err_ops;
|
||||
|
||||
|
@ -502,8 +502,10 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (addr->family != AF_IEEE802154)
|
||||
if (addr->family != AF_IEEE802154) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ieee802154_addr_from_sa(&haddr, &addr->addr);
|
||||
dev = ieee802154_get_dev(sock_net(sk), &haddr);
|
||||
|
@ -389,7 +389,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
||||
dev_match = dev_match || (res.type == RTN_LOCAL &&
|
||||
dev == net->loopback_dev);
|
||||
if (dev_match) {
|
||||
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
|
||||
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
|
||||
return ret;
|
||||
}
|
||||
if (no_addr)
|
||||
@ -401,7 +401,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
||||
ret = 0;
|
||||
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
|
||||
if (res.type == RTN_UNICAST)
|
||||
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
|
||||
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
|
||||
}
|
||||
return ret;
|
||||
|
||||
|
@ -1231,7 +1231,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
|
||||
|
||||
nh->fib_nh_dev = in_dev->dev;
|
||||
netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
|
||||
nh->fib_nh_scope = RT_SCOPE_LINK;
|
||||
nh->fib_nh_scope = RT_SCOPE_HOST;
|
||||
if (!netif_carrier_ok(nh->fib_nh_dev))
|
||||
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
|
||||
err = 0;
|
||||
|
@ -2534,7 +2534,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
|
||||
if (!err) {
|
||||
nh->nh_flags = fib_nh->fib_nh_flags;
|
||||
fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
|
||||
fib_nh->fib_nh_scope);
|
||||
!fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
|
||||
} else {
|
||||
fib_nh_release(net, fib_nh);
|
||||
}
|
||||
|
@ -1175,14 +1175,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
|
||||
dev->needed_headroom = dst_len;
|
||||
|
||||
if (set_mtu) {
|
||||
dev->mtu = rt->dst.dev->mtu - t_hlen;
|
||||
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
||||
dev->mtu -= 8;
|
||||
if (dev->type == ARPHRD_ETHER)
|
||||
dev->mtu -= ETH_HLEN;
|
||||
int mtu = rt->dst.dev->mtu - t_hlen;
|
||||
|
||||
if (dev->mtu < IPV6_MIN_MTU)
|
||||
dev->mtu = IPV6_MIN_MTU;
|
||||
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
||||
mtu -= 8;
|
||||
if (dev->type == ARPHRD_ETHER)
|
||||
mtu -= ETH_HLEN;
|
||||
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
WRITE_ONCE(dev->mtu, mtu);
|
||||
}
|
||||
}
|
||||
ip6_rt_put(rt);
|
||||
|
@ -1450,8 +1450,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
|
||||
struct net_device *tdev = NULL;
|
||||
struct __ip6_tnl_parm *p = &t->parms;
|
||||
struct flowi6 *fl6 = &t->fl.u.ip6;
|
||||
unsigned int mtu;
|
||||
int t_hlen;
|
||||
int mtu;
|
||||
|
||||
__dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
|
||||
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
|
||||
@ -1498,12 +1498,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
|
||||
dev->hard_header_len = tdev->hard_header_len + t_hlen;
|
||||
mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
|
||||
|
||||
dev->mtu = mtu - t_hlen;
|
||||
mtu = mtu - t_hlen;
|
||||
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
||||
dev->mtu -= 8;
|
||||
mtu -= 8;
|
||||
|
||||
if (dev->mtu < IPV6_MIN_MTU)
|
||||
dev->mtu = IPV6_MIN_MTU;
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
WRITE_ONCE(dev->mtu, mtu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1124,10 +1124,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
|
||||
|
||||
if (tdev && !netif_is_l3_master(tdev)) {
|
||||
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
|
||||
int mtu;
|
||||
|
||||
dev->mtu = tdev->mtu - t_hlen;
|
||||
if (dev->mtu < IPV6_MIN_MTU)
|
||||
dev->mtu = IPV6_MIN_MTU;
|
||||
mtu = tdev->mtu - t_hlen;
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
WRITE_ONCE(dev->mtu, mtu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -839,7 +839,7 @@ static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
skb_fill_page_desc(skb, i, page, offset, size);
|
||||
skb_fill_page_desc_noacc(skb, i, page, offset, size);
|
||||
skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
|
||||
|
||||
coalesced:
|
||||
|
@ -132,7 +132,7 @@ static int
|
||||
ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr)
|
||||
{
|
||||
int hlen;
|
||||
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
|
||||
struct ieee802154_mac_cb *cb = mac_cb(skb);
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
@ -294,8 +294,9 @@ void
|
||||
ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
|
||||
{
|
||||
struct ieee802154_local *local = hw_to_local(hw);
|
||||
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
|
||||
|
||||
mac_cb(skb)->lqi = lqi;
|
||||
cb->lqi = lqi;
|
||||
skb->pkt_type = IEEE802154_RX_MSG;
|
||||
skb_queue_tail(&local->skb_queue, skb);
|
||||
tasklet_schedule(&local->tasklet);
|
||||
|
@ -1673,6 +1673,37 @@ static void mptcp_set_nospace(struct sock *sk)
|
||||
set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
|
||||
}
|
||||
|
||||
static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msghdr *msg,
|
||||
size_t len, int *copied_syn)
|
||||
{
|
||||
unsigned int saved_flags = msg->msg_flags;
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
int ret;
|
||||
|
||||
lock_sock(ssk);
|
||||
msg->msg_flags |= MSG_DONTWAIT;
|
||||
msk->connect_flags = O_NONBLOCK;
|
||||
msk->is_sendmsg = 1;
|
||||
ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
|
||||
msk->is_sendmsg = 0;
|
||||
msg->msg_flags = saved_flags;
|
||||
release_sock(ssk);
|
||||
|
||||
/* do the blocking bits of inet_stream_connect outside the ssk socket lock */
|
||||
if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) {
|
||||
ret = __inet_stream_connect(sk->sk_socket, msg->msg_name,
|
||||
msg->msg_namelen, msg->msg_flags, 1);
|
||||
|
||||
/* Keep the same behaviour of plain TCP: zero the copied bytes in
|
||||
* case of any error, except timeout or signal
|
||||
*/
|
||||
if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
|
||||
*copied_syn = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
@ -1693,23 +1724,14 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
|
||||
ssock = __mptcp_nmpc_socket(msk);
|
||||
if (unlikely(ssock && inet_sk(ssock->sk)->defer_connect)) {
|
||||
struct sock *ssk = ssock->sk;
|
||||
int copied_syn = 0;
|
||||
|
||||
lock_sock(ssk);
|
||||
|
||||
ret = tcp_sendmsg_fastopen(ssk, msg, &copied_syn, len, NULL);
|
||||
ret = mptcp_sendmsg_fastopen(sk, ssock->sk, msg, len, &copied_syn);
|
||||
copied += copied_syn;
|
||||
if (ret == -EINPROGRESS && copied_syn > 0) {
|
||||
/* reflect the new state on the MPTCP socket */
|
||||
inet_sk_state_store(sk, inet_sk_state_load(ssk));
|
||||
release_sock(ssk);
|
||||
if (ret == -EINPROGRESS && copied_syn > 0)
|
||||
goto out;
|
||||
} else if (ret) {
|
||||
release_sock(ssk);
|
||||
else if (ret)
|
||||
goto do_error;
|
||||
}
|
||||
release_sock(ssk);
|
||||
}
|
||||
|
||||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
@ -2952,7 +2974,7 @@ static void mptcp_close(struct sock *sk, long timeout)
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
|
||||
void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
|
||||
@ -3507,10 +3529,73 @@ static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||
return put_user(answ, (int __user *)arg);
|
||||
}
|
||||
|
||||
static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
|
||||
struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
subflow->request_mptcp = 0;
|
||||
__mptcp_do_fallback(msk);
|
||||
}
|
||||
|
||||
static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow;
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
struct socket *ssock;
|
||||
int err = -EINVAL;
|
||||
|
||||
ssock = __mptcp_nmpc_socket(msk);
|
||||
if (!ssock)
|
||||
return -EINVAL;
|
||||
|
||||
mptcp_token_destroy(msk);
|
||||
inet_sk_state_store(sk, TCP_SYN_SENT);
|
||||
subflow = mptcp_subflow_ctx(ssock->sk);
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
|
||||
* TCP option space.
|
||||
*/
|
||||
if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
|
||||
mptcp_subflow_early_fallback(msk, subflow);
|
||||
#endif
|
||||
if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
|
||||
MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
|
||||
mptcp_subflow_early_fallback(msk, subflow);
|
||||
}
|
||||
if (likely(!__mptcp_check_fallback(msk)))
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
|
||||
|
||||
/* if reaching here via the fastopen/sendmsg path, the caller already
|
||||
* acquired the subflow socket lock, too.
|
||||
*/
|
||||
if (msk->is_sendmsg)
|
||||
err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
|
||||
else
|
||||
err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
|
||||
inet_sk(sk)->defer_connect = inet_sk(ssock->sk)->defer_connect;
|
||||
|
||||
/* on successful connect, the msk state will be moved to established by
|
||||
* subflow_finish_connect()
|
||||
*/
|
||||
if (unlikely(err && err != -EINPROGRESS)) {
|
||||
inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
|
||||
return err;
|
||||
}
|
||||
|
||||
mptcp_copy_inaddrs(sk, ssock->sk);
|
||||
|
||||
/* unblocking connect, mptcp-level inet_stream_connect will error out
|
||||
* without changing the socket state, update it here.
|
||||
*/
|
||||
if (err == -EINPROGRESS)
|
||||
sk->sk_socket->state = ssock->state;
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct proto mptcp_prot = {
|
||||
.name = "MPTCP",
|
||||
.owner = THIS_MODULE,
|
||||
.init = mptcp_init_sock,
|
||||
.connect = mptcp_connect,
|
||||
.disconnect = mptcp_disconnect,
|
||||
.close = mptcp_close,
|
||||
.accept = mptcp_accept,
|
||||
@ -3562,78 +3647,16 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
|
||||
struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
subflow->request_mptcp = 0;
|
||||
__mptcp_do_fallback(msk);
|
||||
}
|
||||
|
||||
static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
int addr_len, int flags)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sock->sk);
|
||||
struct mptcp_subflow_context *subflow;
|
||||
struct socket *ssock;
|
||||
int err = -EINVAL;
|
||||
int ret;
|
||||
|
||||
lock_sock(sock->sk);
|
||||
if (uaddr) {
|
||||
if (addr_len < sizeof(uaddr->sa_family))
|
||||
goto unlock;
|
||||
|
||||
if (uaddr->sa_family == AF_UNSPEC) {
|
||||
err = mptcp_disconnect(sock->sk, flags);
|
||||
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (sock->state != SS_UNCONNECTED && msk->subflow) {
|
||||
/* pending connection or invalid state, let existing subflow
|
||||
* cope with that
|
||||
*/
|
||||
ssock = msk->subflow;
|
||||
goto do_connect;
|
||||
}
|
||||
|
||||
ssock = __mptcp_nmpc_socket(msk);
|
||||
if (!ssock)
|
||||
goto unlock;
|
||||
|
||||
mptcp_token_destroy(msk);
|
||||
inet_sk_state_store(sock->sk, TCP_SYN_SENT);
|
||||
subflow = mptcp_subflow_ctx(ssock->sk);
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
|
||||
* TCP option space.
|
||||
*/
|
||||
if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
|
||||
mptcp_subflow_early_fallback(msk, subflow);
|
||||
#endif
|
||||
if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
|
||||
MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
|
||||
mptcp_subflow_early_fallback(msk, subflow);
|
||||
}
|
||||
if (likely(!__mptcp_check_fallback(msk)))
|
||||
MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE);
|
||||
|
||||
do_connect:
|
||||
err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
|
||||
inet_sk(sock->sk)->defer_connect = inet_sk(ssock->sk)->defer_connect;
|
||||
sock->state = ssock->state;
|
||||
|
||||
/* on successful connect, the msk state will be moved to established by
|
||||
* subflow_finish_connect()
|
||||
*/
|
||||
if (!err || err == -EINPROGRESS)
|
||||
mptcp_copy_inaddrs(sock->sk, ssock->sk);
|
||||
else
|
||||
inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
|
||||
|
||||
unlock:
|
||||
mptcp_sk(sock->sk)->connect_flags = flags;
|
||||
ret = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
|
||||
release_sock(sock->sk);
|
||||
return err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mptcp_listen(struct socket *sock, int backlog)
|
||||
@ -3699,7 +3722,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
|
||||
if (mptcp_is_fully_established(newsk))
|
||||
mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
|
||||
|
||||
mptcp_copy_inaddrs(newsk, msk->first);
|
||||
mptcp_rcv_space_init(msk, msk->first);
|
||||
mptcp_propagate_sndbuf(newsk, msk->first);
|
||||
|
||||
|
@ -285,7 +285,9 @@ struct mptcp_sock {
|
||||
u8 mpc_endpoint_id;
|
||||
u8 recvmsg_inq:1,
|
||||
cork:1,
|
||||
nodelay:1;
|
||||
nodelay:1,
|
||||
is_sendmsg:1;
|
||||
int connect_flags;
|
||||
struct work_struct work;
|
||||
struct sk_buff *ooo_last_skb;
|
||||
struct rb_root out_of_order_queue;
|
||||
@ -599,6 +601,7 @@ int mptcp_is_checksum_enabled(const struct net *net);
|
||||
int mptcp_allow_join_id0(const struct net *net);
|
||||
unsigned int mptcp_stale_loss_cnt(const struct net *net);
|
||||
int mptcp_get_pm_type(const struct net *net);
|
||||
void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk);
|
||||
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
|
||||
struct mptcp_options_received *mp_opt);
|
||||
bool __mptcp_retransmit_pending_data(struct sock *sk);
|
||||
|
@ -723,6 +723,8 @@ create_child:
|
||||
goto dispose_child;
|
||||
}
|
||||
|
||||
if (new_msk)
|
||||
mptcp_copy_inaddrs(new_msk, child);
|
||||
subflow_drop_ctx(child);
|
||||
goto out;
|
||||
}
|
||||
@ -750,6 +752,11 @@ create_child:
|
||||
ctx->conn = new_msk;
|
||||
new_msk = NULL;
|
||||
|
||||
/* set msk addresses early to ensure mptcp_pm_get_local_id()
|
||||
* uses the correct data
|
||||
*/
|
||||
mptcp_copy_inaddrs(ctx->conn, child);
|
||||
|
||||
/* with OoO packets we can reach here without ingress
|
||||
* mpc option
|
||||
*/
|
||||
|
@ -78,10 +78,29 @@ static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
|
||||
static unsigned long *mc_groups = &mc_group_start;
|
||||
static unsigned long mc_groups_longs = 1;
|
||||
|
||||
/* We need the last attribute with non-zero ID therefore a 2-entry array */
|
||||
static struct nla_policy genl_policy_reject_all[] = {
|
||||
{ .type = NLA_REJECT },
|
||||
{ .type = NLA_REJECT },
|
||||
};
|
||||
|
||||
static int genl_ctrl_event(int event, const struct genl_family *family,
|
||||
const struct genl_multicast_group *grp,
|
||||
int grp_id);
|
||||
|
||||
static void
|
||||
genl_op_fill_in_reject_policy(const struct genl_family *family,
|
||||
struct genl_ops *op)
|
||||
{
|
||||
BUILD_BUG_ON(ARRAY_SIZE(genl_policy_reject_all) - 1 != 1);
|
||||
|
||||
if (op->policy || op->cmd < family->resv_start_op)
|
||||
return;
|
||||
|
||||
op->policy = genl_policy_reject_all;
|
||||
op->maxattr = 1;
|
||||
}
|
||||
|
||||
static const struct genl_family *genl_family_find_byid(unsigned int id)
|
||||
{
|
||||
return idr_find(&genl_fam_idr, id);
|
||||
@ -113,6 +132,8 @@ static void genl_op_from_full(const struct genl_family *family,
|
||||
op->maxattr = family->maxattr;
|
||||
if (!op->policy)
|
||||
op->policy = family->policy;
|
||||
|
||||
genl_op_fill_in_reject_policy(family, op);
|
||||
}
|
||||
|
||||
static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
|
||||
@ -142,6 +163,8 @@ static void genl_op_from_small(const struct genl_family *family,
|
||||
|
||||
op->maxattr = family->maxattr;
|
||||
op->policy = family->policy;
|
||||
|
||||
genl_op_fill_in_reject_policy(family, op);
|
||||
}
|
||||
|
||||
static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
|
||||
@ -357,6 +380,8 @@ static int genl_validate_ops(const struct genl_family *family)
|
||||
genl_get_cmd_by_index(i, family, &op);
|
||||
if (op.dumpit == NULL && op.doit == NULL)
|
||||
return -EINVAL;
|
||||
if (WARN_ON(op.cmd >= family->resv_start_op && op.validate))
|
||||
return -EINVAL;
|
||||
for (j = i + 1; j < genl_get_cmd_cnt(family); j++) {
|
||||
struct genl_ops op2;
|
||||
|
||||
|
@ -1616,7 +1616,8 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb,
|
||||
if (IS_ERR(dp))
|
||||
return;
|
||||
|
||||
WARN(dp->user_features, "Dropping previously announced user features\n");
|
||||
pr_warn("%s: Dropping previously announced user features\n",
|
||||
ovs_dp_name(dp));
|
||||
dp->user_features = 0;
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,7 @@ TARGETS += net
|
||||
TARGETS += net/af_unix
|
||||
TARGETS += net/forwarding
|
||||
TARGETS += net/mptcp
|
||||
TARGETS += net/openvswitch
|
||||
TARGETS += netfilter
|
||||
TARGETS += nsfs
|
||||
TARGETS += pidfd
|
||||
|
13
tools/testing/selftests/net/openvswitch/Makefile
Normal file
13
tools/testing/selftests/net/openvswitch/Makefile
Normal file
@ -0,0 +1,13 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
top_srcdir = ../../../../..
|
||||
|
||||
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
|
||||
|
||||
TEST_PROGS := openvswitch.sh
|
||||
|
||||
TEST_FILES := ovs-dpctl.py
|
||||
|
||||
EXTRA_CLEAN := test_netlink_checks
|
||||
|
||||
include ../../lib.mk
|
218
tools/testing/selftests/net/openvswitch/openvswitch.sh
Executable file
218
tools/testing/selftests/net/openvswitch/openvswitch.sh
Executable file
@ -0,0 +1,218 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# OVS kernel module self tests
|
||||
|
||||
# Kselftest framework requirement - SKIP code is 4.
|
||||
ksft_skip=4
|
||||
|
||||
PAUSE_ON_FAIL=no
|
||||
VERBOSE=0
|
||||
TRACING=0
|
||||
|
||||
tests="
|
||||
netlink_checks ovsnl: validate netlink attrs and settings"
|
||||
|
||||
info() {
|
||||
[ $VERBOSE = 0 ] || echo $*
|
||||
}
|
||||
|
||||
ovs_base=`pwd`
|
||||
sbxs=
|
||||
sbx_add () {
|
||||
info "adding sandbox '$1'"
|
||||
|
||||
sbxs="$sbxs $1"
|
||||
|
||||
NO_BIN=0
|
||||
|
||||
# Create sandbox.
|
||||
local d="$ovs_base"/$1
|
||||
if [ -e $d ]; then
|
||||
info "removing $d"
|
||||
rm -rf "$d"
|
||||
fi
|
||||
mkdir "$d" || return 1
|
||||
ovs_setenv $1
|
||||
}
|
||||
|
||||
ovs_exit_sig() {
|
||||
[ -e ${ovs_dir}/cleanup ] && . "$ovs_dir/cleanup"
|
||||
}
|
||||
|
||||
on_exit() {
|
||||
echo "$1" > ${ovs_dir}/cleanup.tmp
|
||||
cat ${ovs_dir}/cleanup >> ${ovs_dir}/cleanup.tmp
|
||||
mv ${ovs_dir}/cleanup.tmp ${ovs_dir}/cleanup
|
||||
}
|
||||
|
||||
ovs_setenv() {
|
||||
sandbox=$1
|
||||
|
||||
ovs_dir=$ovs_base${1:+/$1}; export ovs_dir
|
||||
|
||||
test -e ${ovs_dir}/cleanup || : > ${ovs_dir}/cleanup
|
||||
}
|
||||
|
||||
ovs_sbx() {
|
||||
if test "X$2" != X; then
|
||||
(ovs_setenv $1; shift; "$@" >> ${ovs_dir}/debug.log)
|
||||
else
|
||||
ovs_setenv $1
|
||||
fi
|
||||
}
|
||||
|
||||
ovs_add_dp () {
|
||||
info "Adding DP/Bridge IF: sbx:$1 dp:$2 {$3, $4, $5}"
|
||||
sbxname="$1"
|
||||
shift
|
||||
ovs_sbx "$sbxname" python3 $ovs_base/ovs-dpctl.py add-dp $*
|
||||
on_exit "ovs_sbx $sbxname python3 $ovs_base/ovs-dpctl.py del-dp $1;"
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo
|
||||
echo "$0 [OPTIONS] [TEST]..."
|
||||
echo "If no TEST argument is given, all tests will be run."
|
||||
echo
|
||||
echo "Options"
|
||||
echo " -t: capture traffic via tcpdump"
|
||||
echo " -v: verbose"
|
||||
echo " -p: pause on failure"
|
||||
echo
|
||||
echo "Available tests${tests}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# netlink_validation
|
||||
# - Create a dp
|
||||
# - check no warning with "old version" simulation
|
||||
test_netlink_checks () {
|
||||
sbx_add "test_netlink_checks" || return 1
|
||||
|
||||
info "setting up new DP"
|
||||
ovs_add_dp "test_netlink_checks" nv0 || return 1
|
||||
# now try again
|
||||
PRE_TEST=$(dmesg | grep -E "RIP: [0-9a-fA-Fx]+:ovs_dp_cmd_new\+")
|
||||
ovs_add_dp "test_netlink_checks" nv0 -V 0 || return 1
|
||||
POST_TEST=$(dmesg | grep -E "RIP: [0-9a-fA-Fx]+:ovs_dp_cmd_new\+")
|
||||
if [ "$PRE_TEST" != "$POST_TEST" ]; then
|
||||
info "failed - gen warning"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
run_test() {
|
||||
(
|
||||
tname="$1"
|
||||
tdesc="$2"
|
||||
|
||||
if ! lsmod | grep openvswitch >/dev/null 2>&1; then
|
||||
stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}"
|
||||
return $ksft_skip
|
||||
fi
|
||||
|
||||
if python3 ovs-dpctl.py -h 2>&1 | \
|
||||
grep "Need to install the python" >/dev/null 2>&1; then
|
||||
stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}"
|
||||
return $ksft_skip
|
||||
fi
|
||||
printf "TEST: %-60s [START]\n" "${tname}"
|
||||
|
||||
unset IFS
|
||||
|
||||
eval test_${tname}
|
||||
ret=$?
|
||||
|
||||
if [ $ret -eq 0 ]; then
|
||||
printf "TEST: %-60s [ OK ]\n" "${tdesc}"
|
||||
ovs_exit_sig
|
||||
rm -rf "$ovs_dir"
|
||||
elif [ $ret -eq 1 ]; then
|
||||
printf "TEST: %-60s [FAIL]\n" "${tdesc}"
|
||||
if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
|
||||
echo
|
||||
echo "Pausing. Logs in $ovs_dir/. Hit enter to continue"
|
||||
read a
|
||||
fi
|
||||
ovs_exit_sig
|
||||
[ "${PAUSE_ON_FAIL}" = "yes" ] || rm -rf "$ovs_dir"
|
||||
exit 1
|
||||
elif [ $ret -eq $ksft_skip ]; then
|
||||
printf "TEST: %-60s [SKIP]\n" "${tdesc}"
|
||||
elif [ $ret -eq 2 ]; then
|
||||
rm -rf test_${tname}
|
||||
run_test "$1" "$2"
|
||||
fi
|
||||
|
||||
return $ret
|
||||
)
|
||||
ret=$?
|
||||
case $ret in
|
||||
0)
|
||||
[ $all_skipped = true ] && [ $exitcode=$ksft_skip ] && exitcode=0
|
||||
all_skipped=false
|
||||
;;
|
||||
$ksft_skip)
|
||||
[ $all_skipped = true ] && exitcode=$ksft_skip
|
||||
;;
|
||||
*)
|
||||
all_skipped=false
|
||||
exitcode=1
|
||||
;;
|
||||
esac
|
||||
|
||||
return $ret
|
||||
}
|
||||
|
||||
|
||||
exitcode=0
|
||||
desc=0
|
||||
all_skipped=true
|
||||
|
||||
while getopts :pvt o
|
||||
do
|
||||
case $o in
|
||||
p) PAUSE_ON_FAIL=yes;;
|
||||
v) VERBOSE=1;;
|
||||
t) if which tcpdump > /dev/null 2>&1; then
|
||||
TRACING=1
|
||||
else
|
||||
echo "=== tcpdump not available, tracing disabled"
|
||||
fi
|
||||
;;
|
||||
*) usage;;
|
||||
esac
|
||||
done
|
||||
shift $(($OPTIND-1))
|
||||
|
||||
IFS="
|
||||
"
|
||||
|
||||
for arg do
|
||||
# Check first that all requested tests are available before running any
|
||||
command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
|
||||
done
|
||||
|
||||
name=""
|
||||
desc=""
|
||||
for t in ${tests}; do
|
||||
[ "${name}" = "" ] && name="${t}" && continue
|
||||
[ "${desc}" = "" ] && desc="${t}"
|
||||
|
||||
run_this=1
|
||||
for arg do
|
||||
[ "${arg}" != "${arg#--*}" ] && continue
|
||||
[ "${arg}" = "${name}" ] && run_this=1 && break
|
||||
run_this=0
|
||||
done
|
||||
if [ $run_this -eq 1 ]; then
|
||||
run_test "${name}" "${desc}"
|
||||
fi
|
||||
name=""
|
||||
desc=""
|
||||
done
|
||||
|
||||
exit ${exitcode}
|
351
tools/testing/selftests/net/openvswitch/ovs-dpctl.py
Normal file
351
tools/testing/selftests/net/openvswitch/ovs-dpctl.py
Normal file
@ -0,0 +1,351 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
# Controls the openvswitch module. Part of the kselftest suite, but
|
||||
# can be used for some diagnostic purpose as well.
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import sys
|
||||
|
||||
try:
|
||||
from pyroute2 import NDB
|
||||
|
||||
from pyroute2.netlink import NLM_F_ACK
|
||||
from pyroute2.netlink import NLM_F_REQUEST
|
||||
from pyroute2.netlink import genlmsg
|
||||
from pyroute2.netlink import nla
|
||||
from pyroute2.netlink.exceptions import NetlinkError
|
||||
from pyroute2.netlink.generic import GenericNetlinkSocket
|
||||
except ModuleNotFoundError:
|
||||
print("Need to install the python pyroute2 package.")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
OVS_DATAPATH_FAMILY = "ovs_datapath"
|
||||
OVS_VPORT_FAMILY = "ovs_vport"
|
||||
OVS_FLOW_FAMILY = "ovs_flow"
|
||||
OVS_PACKET_FAMILY = "ovs_packet"
|
||||
OVS_METER_FAMILY = "ovs_meter"
|
||||
OVS_CT_LIMIT_FAMILY = "ovs_ct_limit"
|
||||
|
||||
OVS_DATAPATH_VERSION = 2
|
||||
OVS_DP_CMD_NEW = 1
|
||||
OVS_DP_CMD_DEL = 2
|
||||
OVS_DP_CMD_GET = 3
|
||||
OVS_DP_CMD_SET = 4
|
||||
|
||||
OVS_VPORT_CMD_NEW = 1
|
||||
OVS_VPORT_CMD_DEL = 2
|
||||
OVS_VPORT_CMD_GET = 3
|
||||
OVS_VPORT_CMD_SET = 4
|
||||
|
||||
|
||||
class ovs_dp_msg(genlmsg):
|
||||
# include the OVS version
|
||||
# We need a custom header rather than just being able to rely on
|
||||
# genlmsg because fields ends up not expressing everything correctly
|
||||
# if we use the canonical example of setting fields = (('customfield',),)
|
||||
fields = genlmsg.fields + (("dpifindex", "I"),)
|
||||
|
||||
|
||||
class OvsDatapath(GenericNetlinkSocket):
|
||||
|
||||
OVS_DP_F_VPORT_PIDS = 1 << 1
|
||||
OVS_DP_F_DISPATCH_UPCALL_PER_CPU = 1 << 3
|
||||
|
||||
class dp_cmd_msg(ovs_dp_msg):
|
||||
"""
|
||||
Message class that will be used to communicate with the kernel module.
|
||||
"""
|
||||
|
||||
nla_map = (
|
||||
("OVS_DP_ATTR_UNSPEC", "none"),
|
||||
("OVS_DP_ATTR_NAME", "asciiz"),
|
||||
("OVS_DP_ATTR_UPCALL_PID", "uint32"),
|
||||
("OVS_DP_ATTR_STATS", "dpstats"),
|
||||
("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
|
||||
("OVS_DP_ATTR_USER_FEATURES", "uint32"),
|
||||
("OVS_DP_ATTR_PAD", "none"),
|
||||
("OVS_DP_ATTR_MASKS_CACHE_SIZE", "uint32"),
|
||||
("OVS_DP_ATTR_PER_CPU_PIDS", "array(uint32)"),
|
||||
)
|
||||
|
||||
class dpstats(nla):
|
||||
fields = (
|
||||
("hit", "=Q"),
|
||||
("missed", "=Q"),
|
||||
("lost", "=Q"),
|
||||
("flows", "=Q"),
|
||||
)
|
||||
|
||||
class megaflowstats(nla):
|
||||
fields = (
|
||||
("mask_hit", "=Q"),
|
||||
("masks", "=I"),
|
||||
("padding", "=I"),
|
||||
("cache_hits", "=Q"),
|
||||
("pad1", "=Q"),
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
GenericNetlinkSocket.__init__(self)
|
||||
self.bind(OVS_DATAPATH_FAMILY, OvsDatapath.dp_cmd_msg)
|
||||
|
||||
def info(self, dpname, ifindex=0):
|
||||
msg = OvsDatapath.dp_cmd_msg()
|
||||
msg["cmd"] = OVS_DP_CMD_GET
|
||||
msg["version"] = OVS_DATAPATH_VERSION
|
||||
msg["reserved"] = 0
|
||||
msg["dpifindex"] = ifindex
|
||||
msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
|
||||
|
||||
try:
|
||||
reply = self.nlm_request(
|
||||
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
|
||||
)
|
||||
reply = reply[0]
|
||||
except NetlinkError as ne:
|
||||
if ne.code == errno.ENODEV:
|
||||
reply = None
|
||||
else:
|
||||
raise ne
|
||||
|
||||
return reply
|
||||
|
||||
def create(self, dpname, shouldUpcall=False, versionStr=None):
|
||||
msg = OvsDatapath.dp_cmd_msg()
|
||||
msg["cmd"] = OVS_DP_CMD_NEW
|
||||
if versionStr is None:
|
||||
msg["version"] = OVS_DATAPATH_VERSION
|
||||
else:
|
||||
msg["version"] = int(versionStr.split(":")[0], 0)
|
||||
msg["reserved"] = 0
|
||||
msg["dpifindex"] = 0
|
||||
msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
|
||||
|
||||
dpfeatures = 0
|
||||
if versionStr is not None and versionStr.find(":") != -1:
|
||||
dpfeatures = int(versionStr.split(":")[1], 0)
|
||||
else:
|
||||
dpfeatures = OvsDatapath.OVS_DP_F_VPORT_PIDS
|
||||
|
||||
msg["attrs"].append(["OVS_DP_ATTR_USER_FEATURES", dpfeatures])
|
||||
if not shouldUpcall:
|
||||
msg["attrs"].append(["OVS_DP_ATTR_UPCALL_PID", 0])
|
||||
|
||||
try:
|
||||
reply = self.nlm_request(
|
||||
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
|
||||
)
|
||||
reply = reply[0]
|
||||
except NetlinkError as ne:
|
||||
if ne.code == errno.EEXIST:
|
||||
reply = None
|
||||
else:
|
||||
raise ne
|
||||
|
||||
return reply
|
||||
|
||||
def destroy(self, dpname):
|
||||
msg = OvsDatapath.dp_cmd_msg()
|
||||
msg["cmd"] = OVS_DP_CMD_DEL
|
||||
msg["version"] = OVS_DATAPATH_VERSION
|
||||
msg["reserved"] = 0
|
||||
msg["dpifindex"] = 0
|
||||
msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
|
||||
|
||||
try:
|
||||
reply = self.nlm_request(
|
||||
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
|
||||
)
|
||||
reply = reply[0]
|
||||
except NetlinkError as ne:
|
||||
if ne.code == errno.ENODEV:
|
||||
reply = None
|
||||
else:
|
||||
raise ne
|
||||
|
||||
return reply
|
||||
|
||||
|
||||
class OvsVport(GenericNetlinkSocket):
|
||||
class ovs_vport_msg(ovs_dp_msg):
|
||||
nla_map = (
|
||||
("OVS_VPORT_ATTR_UNSPEC", "none"),
|
||||
("OVS_VPORT_ATTR_PORT_NO", "uint32"),
|
||||
("OVS_VPORT_ATTR_TYPE", "uint32"),
|
||||
("OVS_VPORT_ATTR_NAME", "asciiz"),
|
||||
("OVS_VPORT_ATTR_OPTIONS", "none"),
|
||||
("OVS_VPORT_ATTR_UPCALL_PID", "array(uint32)"),
|
||||
("OVS_VPORT_ATTR_STATS", "vportstats"),
|
||||
("OVS_VPORT_ATTR_PAD", "none"),
|
||||
("OVS_VPORT_ATTR_IFINDEX", "uint32"),
|
||||
("OVS_VPORT_ATTR_NETNSID", "uint32"),
|
||||
)
|
||||
|
||||
class vportstats(nla):
|
||||
fields = (
|
||||
("rx_packets", "=Q"),
|
||||
("tx_packets", "=Q"),
|
||||
("rx_bytes", "=Q"),
|
||||
("tx_bytes", "=Q"),
|
||||
("rx_errors", "=Q"),
|
||||
("tx_errors", "=Q"),
|
||||
("rx_dropped", "=Q"),
|
||||
("tx_dropped", "=Q"),
|
||||
)
|
||||
|
||||
def type_to_str(vport_type):
|
||||
if vport_type == 1:
|
||||
return "netdev"
|
||||
elif vport_type == 2:
|
||||
return "internal"
|
||||
elif vport_type == 3:
|
||||
return "gre"
|
||||
elif vport_type == 4:
|
||||
return "vxlan"
|
||||
elif vport_type == 5:
|
||||
return "geneve"
|
||||
return "unknown:%d" % vport_type
|
||||
|
||||
def __init__(self):
|
||||
GenericNetlinkSocket.__init__(self)
|
||||
self.bind(OVS_VPORT_FAMILY, OvsVport.ovs_vport_msg)
|
||||
|
||||
def info(self, vport_name, dpifindex=0, portno=None):
|
||||
msg = OvsVport.ovs_vport_msg()
|
||||
|
||||
msg["cmd"] = OVS_VPORT_CMD_GET
|
||||
msg["version"] = OVS_DATAPATH_VERSION
|
||||
msg["reserved"] = 0
|
||||
msg["dpifindex"] = dpifindex
|
||||
|
||||
if portno is None:
|
||||
msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_name])
|
||||
else:
|
||||
msg["attrs"].append(["OVS_VPORT_ATTR_PORT_NO", portno])
|
||||
|
||||
try:
|
||||
reply = self.nlm_request(
|
||||
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
|
||||
)
|
||||
reply = reply[0]
|
||||
except NetlinkError as ne:
|
||||
if ne.code == errno.ENODEV:
|
||||
reply = None
|
||||
else:
|
||||
raise ne
|
||||
return reply
|
||||
|
||||
|
||||
def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()):
|
||||
dp_name = dp_lookup_rep.get_attr("OVS_DP_ATTR_NAME")
|
||||
base_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_STATS")
|
||||
megaflow_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_MEGAFLOW_STATS")
|
||||
user_features = dp_lookup_rep.get_attr("OVS_DP_ATTR_USER_FEATURES")
|
||||
masks_cache_size = dp_lookup_rep.get_attr("OVS_DP_ATTR_MASKS_CACHE_SIZE")
|
||||
|
||||
print("%s:" % dp_name)
|
||||
print(
|
||||
" lookups: hit:%d missed:%d lost:%d"
|
||||
% (base_stats["hit"], base_stats["missed"], base_stats["lost"])
|
||||
)
|
||||
print(" flows:%d" % base_stats["flows"])
|
||||
pkts = base_stats["hit"] + base_stats["missed"]
|
||||
avg = (megaflow_stats["mask_hit"] / pkts) if pkts != 0 else 0.0
|
||||
print(
|
||||
" masks: hit:%d total:%d hit/pkt:%f"
|
||||
% (megaflow_stats["mask_hit"], megaflow_stats["masks"], avg)
|
||||
)
|
||||
print(" caches:")
|
||||
print(" masks-cache: size:%d" % masks_cache_size)
|
||||
|
||||
if user_features is not None:
|
||||
print(" features: 0x%X" % user_features)
|
||||
|
||||
# port print out
|
||||
vpl = OvsVport()
|
||||
for iface in ndb.interfaces:
|
||||
rep = vpl.info(iface.ifname, ifindex)
|
||||
if rep is not None:
|
||||
print(
|
||||
" port %d: %s (%s)"
|
||||
% (
|
||||
rep.get_attr("OVS_VPORT_ATTR_PORT_NO"),
|
||||
rep.get_attr("OVS_VPORT_ATTR_NAME"),
|
||||
OvsVport.type_to_str(rep.get_attr("OVS_VPORT_ATTR_TYPE")),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def main(argv):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
help="Increment 'verbose' output counter.",
|
||||
)
|
||||
subparsers = parser.add_subparsers()
|
||||
|
||||
showdpcmd = subparsers.add_parser("show")
|
||||
showdpcmd.add_argument(
|
||||
"showdp", metavar="N", type=str, nargs="?", help="Datapath Name"
|
||||
)
|
||||
|
||||
adddpcmd = subparsers.add_parser("add-dp")
|
||||
adddpcmd.add_argument("adddp", help="Datapath Name")
|
||||
adddpcmd.add_argument(
|
||||
"-u",
|
||||
"--upcall",
|
||||
action="store_true",
|
||||
help="Leave open a reader for upcalls",
|
||||
)
|
||||
adddpcmd.add_argument(
|
||||
"-V",
|
||||
"--versioning",
|
||||
required=False,
|
||||
help="Specify a custom version / feature string",
|
||||
)
|
||||
|
||||
deldpcmd = subparsers.add_parser("del-dp")
|
||||
deldpcmd.add_argument("deldp", help="Datapath Name")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
ovsdp = OvsDatapath()
|
||||
ndb = NDB()
|
||||
|
||||
if hasattr(args, "showdp"):
|
||||
found = False
|
||||
for iface in ndb.interfaces:
|
||||
rep = None
|
||||
if args.showdp is None:
|
||||
rep = ovsdp.info(iface.ifname, 0)
|
||||
elif args.showdp == iface.ifname:
|
||||
rep = ovsdp.info(iface.ifname, 0)
|
||||
|
||||
if rep is not None:
|
||||
found = True
|
||||
print_ovsdp_full(rep, iface.index, ndb)
|
||||
|
||||
if not found:
|
||||
msg = "No DP found"
|
||||
if args.showdp is not None:
|
||||
msg += ":'%s'" % args.showdp
|
||||
print(msg)
|
||||
elif hasattr(args, "adddp"):
|
||||
rep = ovsdp.create(args.adddp, args.upcall, args.versioning)
|
||||
if rep is None:
|
||||
print("DP '%s' already exists" % args.adddp)
|
||||
else:
|
||||
print("DP '%s' added" % args.adddp)
|
||||
elif hasattr(args, "deldp"):
|
||||
ovsdp.destroy(args.deldp)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
Loading…
Reference in New Issue
Block a user