mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 10:44:14 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix excessive stack usage in cxgb4, from Arnd Bergmann. 2) Missing skb queue lock init in tipc, from Chris Packham. 3) Fix some regressions in ipv6 flow label handling, from Eric Dumazet. 4) Elide flow dissection of local packets in FIB rules, from Petar Penkov. 5) Fix TLS support build failure in mlx5, from Tariq Toukab. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (36 commits) ppp: mppe: Revert "ppp: mppe: Add softdep to arc4" net: dsa: qca8k: replace legacy gpio include net: hisilicon: Use devm_platform_ioremap_resource cxgb4: reduce kernel stack usage in cudbg_collect_mem_region() tipc: ensure head->lock is initialised tc-tests: updated skbedit tests nfp: flower: ensure ip protocol is specified for L4 matches nfp: flower: fix ethernet check on match fields net/mlx5e: Provide cb_list pointer when setting up tc block on rep net: phy: make exported variables non-static net: sched: Fix NULL-pointer dereference in tc_indr_block_ing_cmd() davinci_cpdma: don't cast dma_addr_t to pointer net: openvswitch: do not update max_headroom if new headroom is equal to old headroom net/mlx5e: Convert single case statement switch statements into if statements net/mlx5: E-Switch, Reduce ingress acl modify metadata stack usage net/mlx5e: Fix unused variable warning when CONFIG_MLX5_ESWITCH is off net/mlx5e: Fix compilation error in TLS code ipv6: fix static key imbalance in fl_create() ipv6: fix potential crash in ip6_datagram_dst_update() ipv6: tcp: fix flowlabels reflection for RST packets ...
This commit is contained in:
commit
d12109291c
@ -14,7 +14,7 @@
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/if_bridge.h>
|
||||
#include <linux/mdio.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/etherdevice.h>
|
||||
|
||||
#include "qca8k.h"
|
||||
|
@ -1054,14 +1054,12 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
|
||||
}
|
||||
}
|
||||
|
||||
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err,
|
||||
u8 mem_type)
|
||||
static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_error *cudbg_err,
|
||||
u8 mem_type)
|
||||
{
|
||||
struct adapter *padap = pdbg_init->adap;
|
||||
struct cudbg_meminfo mem_info;
|
||||
unsigned long size;
|
||||
u8 mc_idx;
|
||||
int rc;
|
||||
|
||||
@ -1075,7 +1073,16 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
|
||||
return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
|
||||
}
|
||||
|
||||
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_buffer *dbg_buff,
|
||||
struct cudbg_error *cudbg_err,
|
||||
u8 mem_type)
|
||||
{
|
||||
unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
|
||||
|
||||
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
|
||||
cudbg_err);
|
||||
}
|
||||
|
@ -899,7 +899,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
|
||||
struct of_phandle_args arg;
|
||||
struct net_device *ndev;
|
||||
struct hip04_priv *priv;
|
||||
struct resource *res;
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
@ -912,16 +911,14 @@ static int hip04_mac_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, ndev);
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->base = devm_ioremap_resource(d, res);
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base)) {
|
||||
ret = PTR_ERR(priv->base);
|
||||
goto init_fail;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HI13X1_GMAC)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
priv->sysctrl_base = devm_ioremap_resource(d, res);
|
||||
priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(priv->sysctrl_base)) {
|
||||
ret = PTR_ERR(priv->sysctrl_base);
|
||||
goto init_fail;
|
||||
|
@ -781,7 +781,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct resource *res;
|
||||
struct net_device *ndev;
|
||||
struct hisi_femac_priv *priv;
|
||||
struct phy_device *phy;
|
||||
@ -799,15 +798,13 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
|
||||
priv->dev = dev;
|
||||
priv->ndev = ndev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->port_base = devm_ioremap_resource(dev, res);
|
||||
priv->port_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->port_base)) {
|
||||
ret = PTR_ERR(priv->port_base);
|
||||
goto out_free_netdev;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
priv->glb_base = devm_ioremap_resource(dev, res);
|
||||
priv->glb_base = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(priv->glb_base)) {
|
||||
ret = PTR_ERR(priv->glb_base);
|
||||
goto out_free_netdev;
|
||||
|
@ -1097,7 +1097,6 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
|
||||
const struct of_device_id *of_id = NULL;
|
||||
struct net_device *ndev;
|
||||
struct hix5hd2_priv *priv;
|
||||
struct resource *res;
|
||||
struct mii_bus *bus;
|
||||
const char *mac_addr;
|
||||
int ret;
|
||||
@ -1119,15 +1118,13 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
|
||||
}
|
||||
priv->hw_cap = (unsigned long)of_id->data;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->base = devm_ioremap_resource(dev, res);
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base)) {
|
||||
ret = PTR_ERR(priv->base);
|
||||
goto out_free_netdev;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
priv->ctrl_base = devm_ioremap_resource(dev, res);
|
||||
priv->ctrl_base = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(priv->ctrl_base)) {
|
||||
ret = PTR_ERR(priv->ctrl_base);
|
||||
goto out_free_netdev;
|
||||
|
@ -417,7 +417,6 @@ static int hns_mdio_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct hns_mdio_device *mdio_dev;
|
||||
struct mii_bus *new_bus;
|
||||
struct resource *res;
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (!pdev) {
|
||||
@ -442,8 +441,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
|
||||
new_bus->priv = mdio_dev;
|
||||
new_bus->parent = &pdev->dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
|
||||
mdio_dev->vbase = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(mdio_dev->vbase)) {
|
||||
ret = PTR_ERR(mdio_dev->vbase);
|
||||
return ret;
|
||||
|
@ -723,7 +723,7 @@ struct mtk_soc_data {
|
||||
#define MTK_MAX_DEVS 2
|
||||
|
||||
#define MTK_SGMII_PHYSPEED_AN BIT(31)
|
||||
#define MTK_SGMII_PHYSPEED_MASK GENMASK(0, 2)
|
||||
#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
|
||||
#define MTK_SGMII_PHYSPEED_1000 BIT(0)
|
||||
#define MTK_SGMII_PHYSPEED_2500 BIT(1)
|
||||
#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
|
||||
|
@ -82,7 +82,7 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
|
||||
return -EINVAL;
|
||||
|
||||
regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
|
||||
val &= ~GENMASK(2, 3);
|
||||
val &= ~GENMASK(3, 2);
|
||||
mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK;
|
||||
val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2);
|
||||
regmap_write(ss->regmap[id], ss->ana_rgc3, val);
|
||||
|
@ -81,7 +81,6 @@ mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
|
||||
struct tls_crypto_info *crypto_info) { return false; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MLX5_FPGA_TLS
|
||||
enum {
|
||||
MLX5_ACCEL_TLS_TX = BIT(0),
|
||||
MLX5_ACCEL_TLS_RX = BIT(1),
|
||||
@ -103,6 +102,7 @@ struct mlx5_ifc_tls_flow_bits {
|
||||
u8 reserved_at_2[0x1e];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX5_FPGA_TLS
|
||||
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
|
||||
struct tls_crypto_info *crypto_info,
|
||||
u32 start_offload_tcp_sn, u32 *p_swid,
|
||||
|
@ -305,6 +305,7 @@ enum {
|
||||
MLX5E_RQ_STATE_ENABLED,
|
||||
MLX5E_RQ_STATE_AM,
|
||||
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
|
||||
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
|
||||
};
|
||||
|
||||
struct mlx5e_cq {
|
||||
|
@ -142,22 +142,20 @@ static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
|
||||
{
|
||||
struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
|
||||
u32 eqe_count;
|
||||
int ret;
|
||||
|
||||
netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
|
||||
eq->core.eqn, eq->core.cons_index, eq->core.irqn);
|
||||
|
||||
eqe_count = mlx5_eq_poll_irq_disabled(eq);
|
||||
ret = eqe_count ? false : true;
|
||||
if (!eqe_count) {
|
||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||
return ret;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
|
||||
eqe_count, eq->core.eqn);
|
||||
sq->channel->stats->eq_rearm++;
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
|
||||
@ -264,13 +262,13 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
|
||||
|
||||
err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
|
||||
if (err)
|
||||
break;
|
||||
goto unlock;
|
||||
|
||||
err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
|
||||
state,
|
||||
netif_xmit_stopped(sq->txq));
|
||||
if (err)
|
||||
break;
|
||||
goto unlock;
|
||||
}
|
||||
err = devlink_fmsg_arr_pair_nest_end(fmsg);
|
||||
if (err)
|
||||
|
@ -25,23 +25,17 @@ static void
|
||||
fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
|
||||
{
|
||||
struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
|
||||
struct tls12_crypto_info_aes_gcm_128 *info;
|
||||
char *initial_rn, *gcm_iv;
|
||||
u16 salt_sz, rec_seq_sz;
|
||||
char *salt, *rec_seq;
|
||||
u8 tls_version;
|
||||
|
||||
switch (crypto_info->cipher_type) {
|
||||
case TLS_CIPHER_AES_GCM_128: {
|
||||
struct tls12_crypto_info_aes_gcm_128 *info =
|
||||
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
|
||||
|
||||
EXTRACT_INFO_FIELDS;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
WARN_ON(1);
|
||||
if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
|
||||
return;
|
||||
}
|
||||
|
||||
info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
|
||||
EXTRACT_INFO_FIELDS;
|
||||
|
||||
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
|
||||
initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
|
||||
@ -234,24 +228,18 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
|
||||
u64 rcd_sn)
|
||||
{
|
||||
struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
|
||||
struct tls12_crypto_info_aes_gcm_128 *info;
|
||||
__be64 rn_be = cpu_to_be64(rcd_sn);
|
||||
bool skip_static_post;
|
||||
u16 rec_seq_sz;
|
||||
char *rec_seq;
|
||||
|
||||
switch (crypto_info->cipher_type) {
|
||||
case TLS_CIPHER_AES_GCM_128: {
|
||||
struct tls12_crypto_info_aes_gcm_128 *info =
|
||||
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
|
||||
|
||||
rec_seq = info->rec_seq;
|
||||
rec_seq_sz = sizeof(info->rec_seq);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
WARN_ON(1);
|
||||
if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
|
||||
return;
|
||||
}
|
||||
|
||||
info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
|
||||
rec_seq = info->rec_seq;
|
||||
rec_seq_sz = sizeof(info->rec_seq);
|
||||
|
||||
skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
|
||||
if (!skip_static_post)
|
||||
|
@ -889,6 +889,9 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
if (err)
|
||||
goto err_destroy_rq;
|
||||
|
||||
if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
|
||||
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
|
||||
|
||||
if (params->rx_dim_enabled)
|
||||
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
|
||||
|
||||
@ -3390,10 +3393,9 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
|
||||
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
|
||||
struct tc_mqprio_qopt *mqprio)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_channels new_channels = {};
|
||||
u8 tc = mqprio->num_tc;
|
||||
int err = 0;
|
||||
@ -3475,7 +3477,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
priv, priv, true);
|
||||
#endif
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
return mlx5e_setup_tc_mqprio(dev, type_data);
|
||||
return mlx5e_setup_tc_mqprio(priv, type_data);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -1175,6 +1175,8 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static LIST_HEAD(mlx5e_rep_block_cb_list);
|
||||
|
||||
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
@ -1182,7 +1184,8 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return flow_block_cb_setup_simple(type_data, NULL,
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&mlx5e_rep_block_cb_list,
|
||||
mlx5e_rep_setup_tc_cb,
|
||||
priv, priv, true);
|
||||
default:
|
||||
|
@ -923,8 +923,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
|
||||
goto csum_unnecessary;
|
||||
|
||||
stats->csum_complete++;
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
|
||||
|
||||
if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
|
||||
return; /* CQE csum covers all received bytes */
|
||||
|
||||
/* csum might need some fixups ...*/
|
||||
if (network_depth > ETH_HLEN)
|
||||
/* CQE csum is calculated from the IP header and does
|
||||
* not cover VLAN headers (if present). This will add
|
||||
@ -935,7 +941,6 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
skb->csum);
|
||||
|
||||
mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
|
||||
stats->csum_complete++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1946,11 +1946,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
esw->enabled_vports = 0;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
||||
else
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
|
||||
dev->priv.eswitch = esw;
|
||||
return 0;
|
||||
|
@ -1785,8 +1785,8 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
|
||||
static const struct mlx5_flow_spec spec = {};
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_spec spec = {};
|
||||
int err = 0;
|
||||
|
||||
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
|
||||
@ -2131,6 +2131,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
||||
else
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
|
||||
err = esw_offloads_steering_init(esw);
|
||||
if (err)
|
||||
return err;
|
||||
@ -2187,6 +2193,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
mlx5_eswitch_disable_passing_vport_metadata(esw);
|
||||
esw_offloads_steering_cleanup(esw);
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
}
|
||||
|
||||
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
|
||||
|
@ -711,7 +711,9 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
|
||||
|
||||
prof->init(mdev, netdev, prof, ipriv);
|
||||
|
||||
mlx5e_attach_netdev(epriv);
|
||||
err = mlx5e_attach_netdev(epriv);
|
||||
if (err)
|
||||
goto detach;
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
/* set rdma_netdev func pointers */
|
||||
@ -727,6 +729,11 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
|
||||
|
||||
return 0;
|
||||
|
||||
detach:
|
||||
prof->cleanup(epriv);
|
||||
if (ipriv->sub_interface)
|
||||
return err;
|
||||
mlx5e_destroy_mdev_resources(mdev);
|
||||
destroy_ht:
|
||||
mlx5i_pkey_qpn_ht_cleanup(netdev);
|
||||
return err;
|
||||
|
@ -98,27 +98,12 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
|
||||
*/
|
||||
if (entropy_flags.gre_calc_supported &&
|
||||
reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) {
|
||||
/* Other applications may change the global FW entropy
|
||||
* calculations settings. Check that the current entropy value
|
||||
* is the negative of the updated value.
|
||||
*/
|
||||
if (entropy_flags.force_enabled &&
|
||||
enable == entropy_flags.gre_calc_enabled) {
|
||||
mlx5_core_warn(tun_entropy->mdev,
|
||||
"Unexpected GRE entropy calc setting - expected %d",
|
||||
!entropy_flags.gre_calc_enabled);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, enable,
|
||||
entropy_flags.force_supported);
|
||||
if (!entropy_flags.force_supported)
|
||||
return 0;
|
||||
err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev,
|
||||
enable, !enable);
|
||||
if (err)
|
||||
return err;
|
||||
/* if we turn on the entropy we don't need to force it anymore */
|
||||
if (entropy_flags.force_supported && enable) {
|
||||
err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, 1, 0);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
} else if (entropy_flags.calc_supported) {
|
||||
/* Other applications may change the global FW entropy
|
||||
* calculations settings. Check that the current entropy value
|
||||
|
@ -368,15 +368,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Other ethtype - we need check the masks for the
|
||||
* remainder of the key to ensure we can offload.
|
||||
*/
|
||||
if (nfp_flower_check_higher_than_mac(flow)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: non IPv4/IPv6 offload with L3/L4 matches not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
break;
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
} else if (nfp_flower_check_higher_than_mac(flow)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (basic.mask && basic.mask->ip_proto) {
|
||||
@ -389,18 +386,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
key_layer |= NFP_FLOWER_LAYER_TP;
|
||||
key_size += sizeof(struct nfp_flower_tp_ports);
|
||||
break;
|
||||
default:
|
||||
/* Other ip proto - we need check the masks for the
|
||||
* remainder of the key to ensure we can offload.
|
||||
*/
|
||||
if (nfp_flower_check_higher_than_l3(flow)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unknown IP protocol with L4 matches not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
|
||||
nfp_flower_check_higher_than_l3(flow)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
|
||||
struct flow_match_tcp tcp;
|
||||
u32 tcp_flags;
|
||||
|
@ -123,7 +123,7 @@
|
||||
#define ETDES1_BUFFER2_SIZE_SHIFT 16
|
||||
|
||||
/* Extended Receive descriptor definitions */
|
||||
#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(2, 6)
|
||||
#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(6, 2)
|
||||
#define ERDES4_IP_HDR_ERR BIT(3)
|
||||
#define ERDES4_IP_PAYLOAD_ERR BIT(4)
|
||||
#define ERDES4_IP_CSUM_BYPASSED BIT(5)
|
||||
|
@ -192,7 +192,7 @@ static const struct emac_variant emac_variant_h6 = {
|
||||
|
||||
/* Used in RX_CTL1*/
|
||||
#define EMAC_RX_MD BIT(1)
|
||||
#define EMAC_RX_TH_MASK GENMASK(4, 5)
|
||||
#define EMAC_RX_TH_MASK GENMASK(5, 4)
|
||||
#define EMAC_RX_TH_32 0
|
||||
#define EMAC_RX_TH_64 (0x1 << 4)
|
||||
#define EMAC_RX_TH_96 (0x2 << 4)
|
||||
@ -203,7 +203,7 @@ static const struct emac_variant emac_variant_h6 = {
|
||||
/* Used in TX_CTL1*/
|
||||
#define EMAC_TX_MD BIT(1)
|
||||
#define EMAC_TX_NEXT_FRM BIT(2)
|
||||
#define EMAC_TX_TH_MASK GENMASK(8, 10)
|
||||
#define EMAC_TX_TH_MASK GENMASK(10, 8)
|
||||
#define EMAC_TX_TH_64 0
|
||||
#define EMAC_TX_TH_128 (0x1 << 8)
|
||||
#define EMAC_TX_TH_192 (0x2 << 8)
|
||||
|
@ -138,8 +138,8 @@ struct submit_info {
|
||||
struct cpdma_chan *chan;
|
||||
int directed;
|
||||
void *token;
|
||||
void *data;
|
||||
int flags;
|
||||
void *data_virt;
|
||||
dma_addr_t data_dma;
|
||||
int len;
|
||||
};
|
||||
|
||||
@ -1043,12 +1043,12 @@ static int cpdma_chan_submit_si(struct submit_info *si)
|
||||
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
|
||||
cpdma_desc_to_port(chan, mode, si->directed);
|
||||
|
||||
if (si->flags & CPDMA_DMA_EXT_MAP) {
|
||||
buffer = (dma_addr_t)si->data;
|
||||
if (si->data_dma) {
|
||||
buffer = si->data_dma;
|
||||
dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
|
||||
swlen |= CPDMA_DMA_EXT_MAP;
|
||||
} else {
|
||||
buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
|
||||
buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
|
||||
ret = dma_mapping_error(ctlr->dev, buffer);
|
||||
if (ret) {
|
||||
cpdma_desc_free(ctlr->pool, desc, 1);
|
||||
@ -1086,10 +1086,10 @@ int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
|
||||
|
||||
si.chan = chan;
|
||||
si.token = token;
|
||||
si.data = data;
|
||||
si.data_virt = data;
|
||||
si.data_dma = 0;
|
||||
si.len = len;
|
||||
si.directed = directed;
|
||||
si.flags = 0;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
if (chan->state == CPDMA_STATE_TEARDOWN) {
|
||||
@ -1111,10 +1111,10 @@ int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
|
||||
|
||||
si.chan = chan;
|
||||
si.token = token;
|
||||
si.data = (void *)data;
|
||||
si.data_virt = NULL;
|
||||
si.data_dma = data;
|
||||
si.len = len;
|
||||
si.directed = directed;
|
||||
si.flags = CPDMA_DMA_EXT_MAP;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
if (chan->state == CPDMA_STATE_TEARDOWN) {
|
||||
@ -1136,10 +1136,10 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
|
||||
|
||||
si.chan = chan;
|
||||
si.token = token;
|
||||
si.data = data;
|
||||
si.data_virt = data;
|
||||
si.data_dma = 0;
|
||||
si.len = len;
|
||||
si.directed = directed;
|
||||
si.flags = 0;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
if (chan->state != CPDMA_STATE_ACTIVE) {
|
||||
@ -1161,10 +1161,10 @@ int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
|
||||
|
||||
si.chan = chan;
|
||||
si.token = token;
|
||||
si.data = (void *)data;
|
||||
si.data_virt = NULL;
|
||||
si.data_dma = data;
|
||||
si.len = len;
|
||||
si.directed = directed;
|
||||
si.flags = CPDMA_DMA_EXT_MAP;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
if (chan->state != CPDMA_STATE_ACTIVE) {
|
||||
|
@ -56,19 +56,19 @@ EXPORT_SYMBOL_GPL(phy_10gbit_features);
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
|
||||
EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
|
||||
|
||||
static const int phy_basic_ports_array[] = {
|
||||
const int phy_basic_ports_array[3] = {
|
||||
ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
ETHTOOL_LINK_MODE_TP_BIT,
|
||||
ETHTOOL_LINK_MODE_MII_BIT,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(phy_basic_ports_array);
|
||||
|
||||
static const int phy_fibre_port_array[] = {
|
||||
const int phy_fibre_port_array[1] = {
|
||||
ETHTOOL_LINK_MODE_FIBRE_BIT,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(phy_fibre_port_array);
|
||||
|
||||
static const int phy_all_ports_features_array[] = {
|
||||
const int phy_all_ports_features_array[7] = {
|
||||
ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||
ETHTOOL_LINK_MODE_TP_BIT,
|
||||
ETHTOOL_LINK_MODE_MII_BIT,
|
||||
|
@ -64,7 +64,6 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
|
||||
MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
|
||||
MODULE_SOFTDEP("pre: arc4");
|
||||
MODULE_VERSION("1.0.2");
|
||||
|
||||
#define SHA1_PAD_SIZE 40
|
||||
|
@ -805,7 +805,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 swp[0x1];
|
||||
u8 swp_csum[0x1];
|
||||
u8 swp_lso[0x1];
|
||||
u8 reserved_at_23[0xd];
|
||||
u8 cqe_checksum_full[0x1];
|
||||
u8 reserved_at_24[0xc];
|
||||
u8 max_vxlan_udp_ports[0x8];
|
||||
u8 reserved_at_38[0x6];
|
||||
u8 max_geneve_opt_len[0x1];
|
||||
|
@ -55,6 +55,9 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
|
||||
#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
|
||||
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
|
||||
|
||||
extern const int phy_basic_ports_array[3];
|
||||
extern const int phy_fibre_port_array[1];
|
||||
extern const int phy_all_ports_features_array[7];
|
||||
extern const int phy_10_100_features_array[4];
|
||||
extern const int phy_basic_t1_features_array[2];
|
||||
extern const int phy_gbit_features_array[2];
|
||||
|
@ -180,9 +180,9 @@ static inline bool fib_rule_port_range_compare(struct fib_rule_port_range *a,
|
||||
|
||||
static inline bool fib_rule_requires_fldissect(struct fib_rule *rule)
|
||||
{
|
||||
return rule->ip_proto ||
|
||||
return rule->iifindex != LOOPBACK_IFINDEX && (rule->ip_proto ||
|
||||
fib_rule_port_range_set(&rule->sport_range) ||
|
||||
fib_rule_port_range_set(&rule->dport_range);
|
||||
fib_rule_port_range_set(&rule->dport_range));
|
||||
}
|
||||
|
||||
struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
|
||||
|
@ -60,6 +60,11 @@ static inline bool tcf_block_shared(struct tcf_block *block)
|
||||
return block->index;
|
||||
}
|
||||
|
||||
static inline bool tcf_block_non_null_shared(struct tcf_block *block)
|
||||
{
|
||||
return block && block->index;
|
||||
}
|
||||
|
||||
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
|
||||
{
|
||||
WARN_ON(tcf_block_shared(block));
|
||||
@ -84,6 +89,11 @@ static inline bool tcf_block_shared(struct tcf_block *block)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool tcf_block_non_null_shared(struct tcf_block *block)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
int tcf_block_get(struct tcf_block **p_block,
|
||||
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
|
||||
|
@ -227,13 +227,8 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct hsr_port *master;
|
||||
|
||||
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
||||
if (master) {
|
||||
skb->dev = master->dev;
|
||||
hsr_forward_skb(skb, master);
|
||||
} else {
|
||||
atomic_long_inc(&dev->tx_dropped);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
skb->dev = master->dev;
|
||||
hsr_forward_skb(skb, master);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@ -348,7 +343,11 @@ static void hsr_announce(struct timer_list *t)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void hsr_dev_destroy(struct net_device *hsr_dev)
|
||||
/* This has to be called after all the readers are gone.
|
||||
* Otherwise we would have to check the return value of
|
||||
* hsr_port_get_hsr().
|
||||
*/
|
||||
static void hsr_dev_destroy(struct net_device *hsr_dev)
|
||||
{
|
||||
struct hsr_priv *hsr;
|
||||
struct hsr_port *port;
|
||||
@ -364,8 +363,6 @@ void hsr_dev_destroy(struct net_device *hsr_dev)
|
||||
del_timer_sync(&hsr->prune_timer);
|
||||
del_timer_sync(&hsr->announce_timer);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
hsr_del_self_node(&hsr->self_node_db);
|
||||
hsr_del_nodes(&hsr->node_db);
|
||||
}
|
||||
@ -376,6 +373,7 @@ static const struct net_device_ops hsr_device_ops = {
|
||||
.ndo_stop = hsr_dev_close,
|
||||
.ndo_start_xmit = hsr_dev_xmit,
|
||||
.ndo_fix_features = hsr_fix_features,
|
||||
.ndo_uninit = hsr_dev_destroy,
|
||||
};
|
||||
|
||||
static struct device_type hsr_type = {
|
||||
|
@ -14,7 +14,6 @@
|
||||
void hsr_dev_setup(struct net_device *dev);
|
||||
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
|
||||
unsigned char multicast_spec, u8 protocol_version);
|
||||
void hsr_dev_destroy(struct net_device *hsr_dev);
|
||||
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
|
||||
bool is_hsr_master(struct net_device *dev);
|
||||
int hsr_get_max_mtu(struct hsr_priv *hsr);
|
||||
|
@ -69,12 +69,6 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
|
||||
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
|
||||
}
|
||||
|
||||
static void hsr_dellink(struct net_device *hsr_dev, struct list_head *head)
|
||||
{
|
||||
hsr_dev_destroy(hsr_dev);
|
||||
unregister_netdevice_queue(hsr_dev, head);
|
||||
}
|
||||
|
||||
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct hsr_priv *hsr;
|
||||
@ -119,7 +113,6 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
|
||||
.priv_size = sizeof(struct hsr_priv),
|
||||
.setup = hsr_dev_setup,
|
||||
.newlink = hsr_newlink,
|
||||
.dellink = hsr_dellink,
|
||||
.fill_info = hsr_fill_info,
|
||||
};
|
||||
|
||||
|
@ -464,7 +464,7 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
|
||||
struct ah_data *ahp = x->data;
|
||||
struct ip_auth_hdr *ah = ip_auth_hdr(skb);
|
||||
int hdr_len = skb_network_header_len(skb);
|
||||
int ah_hlen = (ah->hdrlen + 2) << 2;
|
||||
int ah_hlen = ipv6_authlen(ah);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
@ -546,7 +546,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
ahash = ahp->ahash;
|
||||
|
||||
nexthdr = ah->nexthdr;
|
||||
ah_hlen = (ah->hdrlen + 2) << 2;
|
||||
ah_hlen = ipv6_authlen(ah);
|
||||
|
||||
if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
|
||||
ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
|
||||
|
@ -74,7 +74,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
|
||||
|
||||
if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
|
||||
flowlabel = fl6_sock_lookup(sk, np->flow_label);
|
||||
if (!flowlabel)
|
||||
if (IS_ERR(flowlabel))
|
||||
return -EINVAL;
|
||||
}
|
||||
ip6_datagram_flow_key_init(&fl6, sk);
|
||||
|
@ -266,7 +266,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
|
||||
} else if (nexthdr == NEXTHDR_AUTH) {
|
||||
if (flags && (*flags & IP6_FH_F_AUTH) && (target < 0))
|
||||
break;
|
||||
hdrlen = (hp->hdrlen + 2) << 2;
|
||||
hdrlen = ipv6_authlen(hp);
|
||||
} else
|
||||
hdrlen = ipv6_optlen(hp);
|
||||
|
||||
|
@ -435,8 +435,6 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
|
||||
}
|
||||
fl->dst = freq->flr_dst;
|
||||
atomic_set(&fl->users, 1);
|
||||
if (fl_shared_exclusive(fl) || fl->opt)
|
||||
static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
|
||||
switch (fl->share) {
|
||||
case IPV6_FL_S_EXCL:
|
||||
case IPV6_FL_S_ANY:
|
||||
@ -451,10 +449,15 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (fl_shared_exclusive(fl) || fl->opt)
|
||||
static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
|
||||
return fl;
|
||||
|
||||
done:
|
||||
fl_free(fl);
|
||||
if (fl) {
|
||||
kfree(fl->opt);
|
||||
kfree(fl);
|
||||
}
|
||||
*err_p = err;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
||||
break;
|
||||
optlen = 8;
|
||||
} else if (nexthdr == NEXTHDR_AUTH) {
|
||||
optlen = (hdr->hdrlen + 2) << 2;
|
||||
optlen = ipv6_authlen(hdr);
|
||||
} else {
|
||||
optlen = ipv6_optlen(hdr);
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
return false;
|
||||
}
|
||||
|
||||
hdrlen = (ah->hdrlen + 2) << 2;
|
||||
hdrlen = ipv6_authlen(ah);
|
||||
|
||||
pr_debug("IPv6 AH LEN %u %u ", hdrlen, ah->hdrlen);
|
||||
pr_debug("RES %04X ", ah->reserved);
|
||||
|
@ -71,7 +71,7 @@ ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
if (nexthdr == NEXTHDR_FRAGMENT)
|
||||
hdrlen = 8;
|
||||
else if (nexthdr == NEXTHDR_AUTH)
|
||||
hdrlen = (hp->hdrlen + 2) << 2;
|
||||
hdrlen = ipv6_authlen(hp);
|
||||
else
|
||||
hdrlen = ipv6_optlen(hp);
|
||||
|
||||
|
@ -414,7 +414,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
|
||||
if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
|
||||
BUG();
|
||||
if (nexthdr == NEXTHDR_AUTH)
|
||||
hdrlen = (hdr.hdrlen+2)<<2;
|
||||
hdrlen = ipv6_authlen(&hdr);
|
||||
else
|
||||
hdrlen = ipv6_optlen(&hdr);
|
||||
|
||||
|
@ -155,7 +155,7 @@ static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
|
||||
|
||||
}
|
||||
|
||||
hdrlen = (hp->hdrlen+2)<<2;
|
||||
hdrlen = ipv6_authlen(hp);
|
||||
break;
|
||||
case IPPROTO_ESP:
|
||||
if (logflags & NF_LOG_IPOPT) {
|
||||
|
@ -984,8 +984,13 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
if (sk) {
|
||||
oif = sk->sk_bound_dev_if;
|
||||
if (sk_fullsock(sk))
|
||||
if (sk_fullsock(sk)) {
|
||||
const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
|
||||
|
||||
trace_tcp_send_reset(sk, skb);
|
||||
if (np->repflow)
|
||||
label = ip6_flowlabel(ipv6h);
|
||||
}
|
||||
if (sk->sk_state == TCP_TIME_WAIT)
|
||||
label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
|
||||
} else {
|
||||
|
@ -1958,10 +1958,9 @@ static struct vport *lookup_vport(struct net *net,
|
||||
|
||||
}
|
||||
|
||||
/* Called with ovs_mutex */
|
||||
static void update_headroom(struct datapath *dp)
|
||||
static unsigned int ovs_get_max_headroom(struct datapath *dp)
|
||||
{
|
||||
unsigned dev_headroom, max_headroom = 0;
|
||||
unsigned int dev_headroom, max_headroom = 0;
|
||||
struct net_device *dev;
|
||||
struct vport *vport;
|
||||
int i;
|
||||
@ -1975,10 +1974,19 @@ static void update_headroom(struct datapath *dp)
|
||||
}
|
||||
}
|
||||
|
||||
dp->max_headroom = max_headroom;
|
||||
return max_headroom;
|
||||
}
|
||||
|
||||
/* Called with ovs_mutex */
|
||||
static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
|
||||
{
|
||||
struct vport *vport;
|
||||
int i;
|
||||
|
||||
dp->max_headroom = new_headroom;
|
||||
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
|
||||
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
|
||||
netdev_set_rx_headroom(vport->dev, max_headroom);
|
||||
netdev_set_rx_headroom(vport->dev, new_headroom);
|
||||
}
|
||||
|
||||
static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
|
||||
@ -1989,6 +1997,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
|
||||
struct sk_buff *reply;
|
||||
struct vport *vport;
|
||||
struct datapath *dp;
|
||||
unsigned int new_headroom;
|
||||
u32 port_no;
|
||||
int err;
|
||||
|
||||
@ -2050,8 +2059,10 @@ restart:
|
||||
info->snd_portid, info->snd_seq, 0,
|
||||
OVS_VPORT_CMD_NEW);
|
||||
|
||||
if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
|
||||
update_headroom(dp);
|
||||
new_headroom = netdev_get_fwd_headroom(vport->dev);
|
||||
|
||||
if (new_headroom > dp->max_headroom)
|
||||
ovs_update_headroom(dp, new_headroom);
|
||||
else
|
||||
netdev_set_rx_headroom(vport->dev, dp->max_headroom);
|
||||
|
||||
@ -2122,11 +2133,12 @@ exit_unlock_free:
|
||||
|
||||
static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
bool must_update_headroom = false;
|
||||
bool update_headroom = false;
|
||||
struct nlattr **a = info->attrs;
|
||||
struct sk_buff *reply;
|
||||
struct datapath *dp;
|
||||
struct vport *vport;
|
||||
unsigned int new_headroom;
|
||||
int err;
|
||||
|
||||
reply = ovs_vport_cmd_alloc_info();
|
||||
@ -2152,12 +2164,17 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
|
||||
/* the vport deletion may trigger dp headroom update */
|
||||
dp = vport->dp;
|
||||
if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
|
||||
must_update_headroom = true;
|
||||
update_headroom = true;
|
||||
|
||||
netdev_reset_rx_headroom(vport->dev);
|
||||
ovs_dp_detach_port(vport);
|
||||
|
||||
if (must_update_headroom)
|
||||
update_headroom(dp);
|
||||
if (update_headroom) {
|
||||
new_headroom = ovs_get_max_headroom(dp);
|
||||
|
||||
if (new_headroom < dp->max_headroom)
|
||||
ovs_update_headroom(dp, new_headroom);
|
||||
}
|
||||
ovs_unlock();
|
||||
|
||||
ovs_notify(&dp_vport_genl_family, reply, info);
|
||||
|
@ -736,6 +736,7 @@ static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
|
||||
cinfo->next_rx_seq = cp->cp_next_rx_seq;
|
||||
cinfo->laddr = conn->c_laddr.s6_addr32[3];
|
||||
cinfo->faddr = conn->c_faddr.s6_addr32[3];
|
||||
cinfo->tos = conn->c_tos;
|
||||
strncpy(cinfo->transport, conn->c_trans->t_name,
|
||||
sizeof(cinfo->transport));
|
||||
cinfo->flags = 0;
|
||||
|
@ -15,8 +15,7 @@
|
||||
|
||||
#define RDS_IB_DEFAULT_RECV_WR 1024
|
||||
#define RDS_IB_DEFAULT_SEND_WR 256
|
||||
#define RDS_IB_DEFAULT_FR_WR 256
|
||||
#define RDS_IB_DEFAULT_FR_INV_WR 256
|
||||
#define RDS_IB_DEFAULT_FR_WR 512
|
||||
|
||||
#define RDS_IB_DEFAULT_RETRY_COUNT 1
|
||||
|
||||
@ -157,7 +156,6 @@ struct rds_ib_connection {
|
||||
|
||||
/* To control the number of wrs from fastreg */
|
||||
atomic_t i_fastreg_wrs;
|
||||
atomic_t i_fastunreg_wrs;
|
||||
|
||||
/* interrupt handling */
|
||||
struct tasklet_struct i_send_tasklet;
|
||||
|
@ -460,10 +460,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
|
||||
* completion queue and send queue. This extra space is used for FRMR
|
||||
* registration and invalidation work requests
|
||||
*/
|
||||
fr_queue_space = rds_ibdev->use_fastreg ?
|
||||
(RDS_IB_DEFAULT_FR_WR + 1) +
|
||||
(RDS_IB_DEFAULT_FR_INV_WR + 1)
|
||||
: 0;
|
||||
fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
|
||||
|
||||
/* add the conn now so that connection establishment has the dev */
|
||||
rds_ib_add_conn(rds_ibdev, conn);
|
||||
@ -530,7 +527,6 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
|
||||
attr.send_cq = ic->i_send_cq;
|
||||
attr.recv_cq = ic->i_recv_cq;
|
||||
atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
|
||||
atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR);
|
||||
|
||||
/*
|
||||
* XXX this can fail if max_*_wr is too large? Are we supposed
|
||||
@ -1009,8 +1005,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
|
||||
wait_event(rds_ib_ring_empty_wait,
|
||||
rds_ib_ring_empty(&ic->i_recv_ring) &&
|
||||
(atomic_read(&ic->i_signaled_sends) == 0) &&
|
||||
(atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) &&
|
||||
(atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR));
|
||||
(atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
|
||||
tasklet_kill(&ic->i_send_tasklet);
|
||||
tasklet_kill(&ic->i_recv_tasklet);
|
||||
|
||||
|
@ -239,8 +239,8 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
|
||||
if (frmr->fr_state != FRMR_IS_INUSE)
|
||||
goto out;
|
||||
|
||||
while (atomic_dec_return(&ibmr->ic->i_fastunreg_wrs) <= 0) {
|
||||
atomic_inc(&ibmr->ic->i_fastunreg_wrs);
|
||||
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
|
||||
atomic_inc(&ibmr->ic->i_fastreg_wrs);
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
@ -257,7 +257,7 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
|
||||
if (unlikely(ret)) {
|
||||
frmr->fr_state = FRMR_IS_STALE;
|
||||
frmr->fr_inv = false;
|
||||
atomic_inc(&ibmr->ic->i_fastunreg_wrs);
|
||||
atomic_inc(&ibmr->ic->i_fastreg_wrs);
|
||||
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
|
||||
goto out;
|
||||
}
|
||||
@ -285,10 +285,9 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
|
||||
if (frmr->fr_inv) {
|
||||
frmr->fr_state = FRMR_IS_FREE;
|
||||
frmr->fr_inv = false;
|
||||
atomic_inc(&ic->i_fastreg_wrs);
|
||||
} else {
|
||||
atomic_inc(&ic->i_fastunreg_wrs);
|
||||
}
|
||||
|
||||
atomic_inc(&ic->i_fastreg_wrs);
|
||||
}
|
||||
|
||||
void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
|
||||
|
@ -69,6 +69,16 @@ static void rds_ib_send_complete(struct rds_message *rm,
|
||||
complete(rm, notify_status);
|
||||
}
|
||||
|
||||
static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
|
||||
struct rm_data_op *op,
|
||||
int wc_status)
|
||||
{
|
||||
if (op->op_nents)
|
||||
ib_dma_unmap_sg(ic->i_cm_id->device,
|
||||
op->op_sg, op->op_nents,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
|
||||
struct rm_rdma_op *op,
|
||||
int wc_status)
|
||||
@ -129,21 +139,6 @@ static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
|
||||
rds_ib_stats_inc(s_ib_atomic_fadd);
|
||||
}
|
||||
|
||||
static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
|
||||
struct rm_data_op *op,
|
||||
int wc_status)
|
||||
{
|
||||
struct rds_message *rm = container_of(op, struct rds_message, data);
|
||||
|
||||
if (op->op_nents)
|
||||
ib_dma_unmap_sg(ic->i_cm_id->device,
|
||||
op->op_sg, op->op_nents,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (rm->rdma.op_active && rm->data.op_notify)
|
||||
rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmap the resources associated with a struct send_work.
|
||||
*
|
||||
@ -902,7 +897,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
|
||||
send->s_queued = jiffies;
|
||||
send->s_op = NULL;
|
||||
|
||||
nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
|
||||
if (!op->op_notify)
|
||||
nr_sig += rds_ib_set_wr_signal_state(ic, send,
|
||||
op->op_notify);
|
||||
|
||||
send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
|
||||
send->s_rdma_wr.remote_addr = remote_addr;
|
||||
|
@ -641,16 +641,6 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
|
||||
}
|
||||
op->op_notifier->n_user_token = args->user_token;
|
||||
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
|
||||
|
||||
/* Enable rmda notification on data operation for composite
|
||||
* rds messages and make sure notification is enabled only
|
||||
* for the data operation which follows it so that application
|
||||
* gets notified only after full message gets delivered.
|
||||
*/
|
||||
if (rm->data.op_sg) {
|
||||
rm->rdma.op_notify = 0;
|
||||
rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
|
||||
}
|
||||
}
|
||||
|
||||
/* The cookie contains the R_Key of the remote memory region, and
|
||||
|
@ -112,17 +112,20 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
|
||||
if (!conn)
|
||||
break;
|
||||
err = (int *)rdma_consumer_reject_data(cm_id, event, &len);
|
||||
if (!err || (err && ((*err) == RDS_RDMA_REJ_INCOMPAT))) {
|
||||
if (!err ||
|
||||
(err && len >= sizeof(*err) &&
|
||||
((*err) <= RDS_RDMA_REJ_INCOMPAT))) {
|
||||
pr_warn("RDS/RDMA: conn <%pI6c, %pI6c> rejected, dropping connection\n",
|
||||
&conn->c_laddr, &conn->c_faddr);
|
||||
conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION;
|
||||
conn->c_tos = 0;
|
||||
|
||||
if (!conn->c_tos)
|
||||
conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION;
|
||||
|
||||
rds_conn_drop(conn);
|
||||
}
|
||||
rdsdebug("Connection rejected: %s\n",
|
||||
rdma_reject_msg(cm_id, event->status));
|
||||
break;
|
||||
/* FALLTHROUGH */
|
||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
|
@ -476,7 +476,6 @@ struct rds_message {
|
||||
} rdma;
|
||||
struct rm_data_op {
|
||||
unsigned int op_active:1;
|
||||
unsigned int op_notify:1;
|
||||
unsigned int op_nents;
|
||||
unsigned int op_count;
|
||||
unsigned int op_dmasg;
|
||||
|
@ -491,14 +491,12 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
|
||||
struct rm_rdma_op *ro;
|
||||
struct rds_notifier *notifier;
|
||||
unsigned long flags;
|
||||
unsigned int notify = 0;
|
||||
|
||||
spin_lock_irqsave(&rm->m_rs_lock, flags);
|
||||
|
||||
notify = rm->rdma.op_notify | rm->data.op_notify;
|
||||
ro = &rm->rdma;
|
||||
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
|
||||
ro->op_active && notify && ro->op_notifier) {
|
||||
ro->op_active && ro->op_notify && ro->op_notifier) {
|
||||
notifier = ro->op_notifier;
|
||||
rs = rm->m_rs;
|
||||
sock_hold(rds_rs_to_sk(rs));
|
||||
|
@ -684,7 +684,7 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
|
||||
.command = command,
|
||||
.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
|
||||
.net = dev_net(indr_dev->dev),
|
||||
.block_shared = tcf_block_shared(indr_dev->block),
|
||||
.block_shared = tcf_block_non_null_shared(indr_dev->block),
|
||||
};
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
|
||||
|
@ -190,7 +190,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
|
||||
struct name_table *nt = tipc_name_table(net);
|
||||
struct sk_buff_head head;
|
||||
|
||||
__skb_queue_head_init(&head);
|
||||
skb_queue_head_init(&head);
|
||||
|
||||
read_lock_bh(&nt->cluster_scope_lock);
|
||||
named_distribute(net, &head, dnode, &nt->cluster_scope);
|
||||
|
@ -69,6 +69,123 @@
|
||||
"matchCount": "0",
|
||||
"teardown": []
|
||||
},
|
||||
{
|
||||
"id": "d4cd",
|
||||
"name": "Add skbedit action with valid mark and mask",
|
||||
"category": [
|
||||
"actions",
|
||||
"skbedit"
|
||||
],
|
||||
"setup": [
|
||||
[
|
||||
"$TC actions flush action skbedit",
|
||||
0,
|
||||
1,
|
||||
255
|
||||
]
|
||||
],
|
||||
"cmdUnderTest": "$TC actions add action skbedit mark 1/0xaabb",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC actions list action skbedit",
|
||||
"matchPattern": "action order [0-9]*: skbedit mark 1/0xaabb",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC actions flush action skbedit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "baa7",
|
||||
"name": "Add skbedit action with valid mark and 32-bit maximum mask",
|
||||
"category": [
|
||||
"actions",
|
||||
"skbedit"
|
||||
],
|
||||
"setup": [
|
||||
[
|
||||
"$TC actions flush action skbedit",
|
||||
0,
|
||||
1,
|
||||
255
|
||||
]
|
||||
],
|
||||
"cmdUnderTest": "$TC actions add action skbedit mark 1/0xffffffff",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC actions list action skbedit",
|
||||
"matchPattern": "action order [0-9]*: skbedit mark 1/0xffffffff",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC actions flush action skbedit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "62a5",
|
||||
"name": "Add skbedit action with valid mark and mask exceeding 32-bit maximum",
|
||||
"category": [
|
||||
"actions",
|
||||
"skbedit"
|
||||
],
|
||||
"setup": [
|
||||
[
|
||||
"$TC actions flush action skbedit",
|
||||
0,
|
||||
1,
|
||||
255
|
||||
]
|
||||
],
|
||||
"cmdUnderTest": "$TC actions add action skbedit mark 1/0xaabbccddeeff112233",
|
||||
"expExitCode": "255",
|
||||
"verifyCmd": "$TC actions list action skbedit",
|
||||
"matchPattern": "action order [0-9]*: skbedit mark 1/0xaabbccddeeff112233",
|
||||
"matchCount": "0",
|
||||
"teardown": []
|
||||
},
|
||||
{
|
||||
"id": "bc15",
|
||||
"name": "Add skbedit action with valid mark and mask with invalid format",
|
||||
"category": [
|
||||
"actions",
|
||||
"skbedit"
|
||||
],
|
||||
"setup": [
|
||||
[
|
||||
"$TC actions flush action skbedit",
|
||||
0,
|
||||
1,
|
||||
255
|
||||
]
|
||||
],
|
||||
"cmdUnderTest": "$TC actions add action skbedit mark 1/-1234",
|
||||
"expExitCode": "255",
|
||||
"verifyCmd": "$TC actions list action skbedit",
|
||||
"matchPattern": "action order [0-9]*: skbedit mark 1/-1234",
|
||||
"matchCount": "0",
|
||||
"teardown": []
|
||||
},
|
||||
{
|
||||
"id": "57c2",
|
||||
"name": "Replace skbedit action with new mask",
|
||||
"category": [
|
||||
"actions",
|
||||
"skbedit"
|
||||
],
|
||||
"setup": [
|
||||
[
|
||||
"$TC actions flush action skbedit",
|
||||
0,
|
||||
1,
|
||||
255
|
||||
],
|
||||
"$TC actions add action skbedit mark 1/0x11223344 index 1"
|
||||
],
|
||||
"cmdUnderTest": "$TC actions replace action skbedit mark 1/0xaabb index 1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC actions list action skbedit",
|
||||
"matchPattern": "action order [0-9]*: skbedit mark 1/0xaabb",
|
||||
"matchCount": "1",
|
||||
"teardown": [
|
||||
"$TC actions flush action skbedit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "081d",
|
||||
"name": "Add skbedit action with priority",
|
||||
|
Loading…
Reference in New Issue
Block a user