mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
mlx5-fixes-2017-04-22
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJY+9NgAAoJEEg/ir3gV/o+/RIH/2Ua/FvxWtnaXhLj9GPELdGx 4Q2+ub43Q/F2cU2rIP0S/Ki3fEeOfk+IR87bvKBc+KTcLwUcBQloLjiLTxVOXSNY +NmE7T1gl7Sb4NzJ9lDVYbmUlDzWZixbFkQdZ6nZJTKecXuN+xooL7EWosyZKuFd FlDpIMacWlH2bMb/1U4lClg9MMPz8e37B9kJ0Vy/lert7NkVdXgYbPI2pKxweF9i 7yH0pNLKYvIQOubZZ9A7gPhk+OGp6xLAo9pJF6xG8tQuXI59Fz6tcKGbNb8GdzZu g12EY2c75BxWJofPtvsDDM5i8ypwF3tfCqxDjw4h9F0wHGJv6tlh51vyuYA8ceg= =KnUF -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2017-04-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2017-04-22 This series contains some mlx5 fixes for net. For your convenience, the series doesn't introduce any conflict with the ongoing net-next pull request. Please pull and let me know if there's any problem. For -stable: ("net/mlx5: E-Switch, Correctly deal with inline mode on ConnectX-5") kernels >= 4.10 ("net/mlx5e: Fix ETHTOOL_GRXCLSRLALL handling") kernels >= 4.8 ("net/mlx5e: Fix small packet threshold") kernels >= 4.7 ("net/mlx5: Fix driver load bad flow when having fw initializing timeout") kernels >= 4.4 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
38baf3a68b
@ -90,7 +90,7 @@
|
||||
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
|
||||
|
||||
#define MLX5_UMR_ALIGN (2048)
|
||||
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
|
||||
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
|
||||
|
||||
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
|
||||
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
|
||||
|
@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
|
||||
int idx = 0;
|
||||
int err = 0;
|
||||
|
||||
info->data = MAX_NUM_OF_ETHTOOL_RULES;
|
||||
while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
|
||||
err = mlx5e_ethtool_get_flow(priv, info, location);
|
||||
if (!err)
|
||||
|
@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
||||
|
||||
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
|
||||
rep->vport != FDB_UPLINK_VPORT) {
|
||||
if (min_inline > esw->offloads.inline_mode) {
|
||||
if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
|
||||
esw->offloads.inline_mode < min_inline) {
|
||||
netdev_warn(priv->netdev,
|
||||
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
|
||||
min_inline, esw->offloads.inline_mode);
|
||||
@ -785,16 +786,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen_vxlan_header_ipv4(struct net_device *out_dev,
|
||||
char buf[],
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
__be32 daddr,
|
||||
__be32 saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
static void gen_vxlan_header_ipv4(struct net_device *out_dev,
|
||||
char buf[], int encap_size,
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
__be32 daddr,
|
||||
__be32 saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
{
|
||||
int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
|
||||
struct ethhdr *eth = (struct ethhdr *)buf;
|
||||
struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
|
||||
struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
|
||||
@ -817,20 +817,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
|
||||
udp->dest = udp_dst_port;
|
||||
vxh->vx_flags = VXLAN_HF_VNI;
|
||||
vxh->vx_vni = vxlan_vni_field(vx_vni);
|
||||
|
||||
return encap_size;
|
||||
}
|
||||
|
||||
static int gen_vxlan_header_ipv6(struct net_device *out_dev,
|
||||
char buf[],
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
struct in6_addr *daddr,
|
||||
struct in6_addr *saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
static void gen_vxlan_header_ipv6(struct net_device *out_dev,
|
||||
char buf[], int encap_size,
|
||||
unsigned char h_dest[ETH_ALEN],
|
||||
int ttl,
|
||||
struct in6_addr *daddr,
|
||||
struct in6_addr *saddr,
|
||||
__be16 udp_dst_port,
|
||||
__be32 vx_vni)
|
||||
{
|
||||
int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
|
||||
struct ethhdr *eth = (struct ethhdr *)buf;
|
||||
struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
|
||||
struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
|
||||
@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
|
||||
udp->dest = udp_dst_port;
|
||||
vxh->vx_flags = VXLAN_HF_VNI;
|
||||
vxh->vx_vni = vxlan_vni_field(vx_vni);
|
||||
|
||||
return encap_size;
|
||||
}
|
||||
|
||||
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||
@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||
struct net_device **out_dev)
|
||||
{
|
||||
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
||||
int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
|
||||
struct ip_tunnel_key *tun_key = &e->tun_info.key;
|
||||
int encap_size, ttl, err;
|
||||
struct neighbour *n = NULL;
|
||||
struct flowi4 fl4 = {};
|
||||
char *encap_header;
|
||||
int ttl, err;
|
||||
|
||||
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
|
||||
if (max_encap_size < ipv4_encap_size) {
|
||||
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
|
||||
ipv4_encap_size, max_encap_size);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
|
||||
if (!encap_header)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -903,11 +905,11 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||
|
||||
switch (e->tunnel_type) {
|
||||
case MLX5_HEADER_TYPE_VXLAN:
|
||||
encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
|
||||
e->h_dest, ttl,
|
||||
fl4.daddr,
|
||||
fl4.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
gen_vxlan_header_ipv4(*out_dev, encap_header,
|
||||
ipv4_encap_size, e->h_dest, ttl,
|
||||
fl4.daddr,
|
||||
fl4.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||
}
|
||||
|
||||
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
|
||||
encap_size, encap_header, &e->encap_id);
|
||||
ipv4_encap_size, encap_header, &e->encap_id);
|
||||
out:
|
||||
if (err && n)
|
||||
neigh_release(n);
|
||||
@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
||||
|
||||
{
|
||||
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
|
||||
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
|
||||
struct ip_tunnel_key *tun_key = &e->tun_info.key;
|
||||
int encap_size, err, ttl = 0;
|
||||
struct neighbour *n = NULL;
|
||||
struct flowi6 fl6 = {};
|
||||
char *encap_header;
|
||||
int err, ttl = 0;
|
||||
|
||||
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
|
||||
if (max_encap_size < ipv6_encap_size) {
|
||||
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
|
||||
ipv6_encap_size, max_encap_size);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
|
||||
if (!encap_header)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -972,11 +981,11 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
||||
|
||||
switch (e->tunnel_type) {
|
||||
case MLX5_HEADER_TYPE_VXLAN:
|
||||
encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
|
||||
e->h_dest, ttl,
|
||||
&fl6.daddr,
|
||||
&fl6.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
gen_vxlan_header_ipv6(*out_dev, encap_header,
|
||||
ipv6_encap_size, e->h_dest, ttl,
|
||||
&fl6.daddr,
|
||||
&fl6.saddr, tun_key->tp_dst,
|
||||
tunnel_id_to_key32(tun_key->tun_id));
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
|
||||
}
|
||||
|
||||
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
|
||||
encap_size, encap_header, &e->encap_id);
|
||||
ipv6_encap_size, encap_header, &e->encap_id);
|
||||
out:
|
||||
if (err && n)
|
||||
neigh_release(n);
|
||||
|
@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int num_vports = esw->enabled_vports;
|
||||
int err;
|
||||
int vport;
|
||||
int err, vport;
|
||||
u8 mlx5_mode;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
|
||||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
|
||||
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
|
||||
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
|
||||
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
|
||||
return 0;
|
||||
/* fall through */
|
||||
case MLX5_CAP_INLINE_MODE_L2:
|
||||
esw_warn(dev, "Inline mode can't be set\n");
|
||||
return -EOPNOTSUPP;
|
||||
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
||||
break;
|
||||
}
|
||||
|
||||
if (esw->offloads.num_flows > 0) {
|
||||
esw_warn(dev, "Can't set inline mode when flows are configured\n");
|
||||
@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
|
||||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
|
||||
}
|
||||
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
||||
{
|
||||
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
int vport;
|
||||
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
|
||||
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
|
||||
return -EOPNOTSUPP;
|
||||
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
|
||||
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
|
||||
mlx5_mode = MLX5_INLINE_MODE_NONE;
|
||||
goto out;
|
||||
case MLX5_CAP_INLINE_MODE_L2:
|
||||
mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
goto out;
|
||||
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
||||
goto query_vports;
|
||||
}
|
||||
|
||||
query_vports:
|
||||
for (vport = 1; vport <= nvfs; vport++) {
|
||||
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
|
||||
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
|
||||
@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
||||
prev_mlx5_mode = mlx5_mode;
|
||||
}
|
||||
|
||||
out:
|
||||
*mode = mlx5_mode;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
||||
if (err) {
|
||||
dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
|
||||
FW_INIT_TIMEOUT_MILI);
|
||||
goto out_err;
|
||||
goto err_cmd_cleanup;
|
||||
}
|
||||
|
||||
err = mlx5_core_enable_hca(dev, 0);
|
||||
|
@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref)
|
||||
struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
|
||||
|
||||
list_del(&up->list);
|
||||
iounmap(up->map);
|
||||
if (mlx5_cmd_free_uar(up->mdev, up->index))
|
||||
mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
|
||||
kfree(up->reg_bitmap);
|
||||
|
Loading…
Reference in New Issue
Block a user