net/mlx4_core: Refactor mlx4_load_one

Refactor mlx4_load_one, as a preparation step for a new and
more complicated load function. The goal is to support both
newer firmware that required init_hca to be done before
enable_sriov and legacy firmwares that requires things to
be done the other way around.

Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Matan Barak 2014-11-13 14:45:30 +02:00 committed by David S. Miller
parent ffc39f6d6f
commit a0eacca948

View File

@ -1473,6 +1473,12 @@ static void mlx4_close_hca(struct mlx4_dev *dev)
else { else {
mlx4_CLOSE_HCA(dev, 0); mlx4_CLOSE_HCA(dev, 0);
mlx4_free_icms(dev); mlx4_free_icms(dev);
}
}
static void mlx4_close_fw(struct mlx4_dev *dev)
{
if (!mlx4_is_slave(dev)) {
mlx4_UNMAP_FA(dev); mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
} }
@ -1619,17 +1625,10 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
} }
static int mlx4_init_hca(struct mlx4_dev *dev) static int mlx4_init_fw(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_adapter adapter;
struct mlx4_dev_cap dev_cap;
struct mlx4_mod_stat_cfg mlx4_cfg; struct mlx4_mod_stat_cfg mlx4_cfg;
struct mlx4_profile profile; int err = 0;
struct mlx4_init_hca_param init_hca;
u64 icm_size;
int err;
struct mlx4_config_dev_params params;
if (!mlx4_is_slave(dev)) { if (!mlx4_is_slave(dev)) {
err = mlx4_QUERY_FW(dev); err = mlx4_QUERY_FW(dev);
@ -1652,7 +1651,23 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
if (err) if (err)
mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
}
return err;
}
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_adapter adapter;
struct mlx4_dev_cap dev_cap;
struct mlx4_profile profile;
struct mlx4_init_hca_param init_hca;
u64 icm_size;
struct mlx4_config_dev_params params;
int err;
if (!mlx4_is_slave(dev)) {
err = mlx4_dev_cap(dev, &dev_cap); err = mlx4_dev_cap(dev, &dev_cap);
if (err) { if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
@ -2275,6 +2290,53 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
iounmap(owner); iounmap(owner);
} }
#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
!!((flags) & MLX4_FLAG_MASTER))
static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
u8 total_vfs, int existing_vfs)
{
u64 dev_flags = dev->flags;
dev->dev_vfs = kzalloc(
total_vfs * sizeof(*dev->dev_vfs),
GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
goto disable_sriov;
} else if (!(dev->flags & MLX4_FLAG_SRIOV)) {
int err = 0;
atomic_inc(&pf_loading);
if (existing_vfs) {
if (existing_vfs != total_vfs)
mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
existing_vfs, total_vfs);
} else {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
atomic_dec(&pf_loading);
goto disable_sriov;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev_flags |= MLX4_FLAG_SRIOV |
MLX4_FLAG_MASTER;
dev_flags &= ~MLX4_FLAG_SLAVE;
dev->num_vfs = total_vfs;
}
}
return dev_flags;
disable_sriov:
dev->num_vfs = 0;
kfree(dev->dev_vfs);
return dev_flags & ~MLX4_FLAG_MASTER;
}
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv) int total_vfs, int *nvfs, struct mlx4_priv *priv)
{ {
@ -2320,37 +2382,12 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
} }
if (total_vfs) { if (total_vfs) {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", existing_vfs = pci_num_vf(pdev);
total_vfs); dev->flags = MLX4_FLAG_MASTER;
dev->dev_vfs = kzalloc( dev->flags = mlx4_enable_sriov(dev, pdev, total_vfs,
total_vfs * sizeof(*dev->dev_vfs), existing_vfs);
GFP_KERNEL); if (!SRIOV_VALID_STATE(dev->flags))
if (NULL == dev->dev_vfs) { goto err_sriov;
mlx4_err(dev, "Failed to allocate memory for VFs\n");
err = -ENOMEM;
goto err_free_own;
} else {
atomic_inc(&pf_loading);
existing_vfs = pci_num_vf(pdev);
if (existing_vfs) {
err = 0;
if (existing_vfs != total_vfs)
mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
existing_vfs, total_vfs);
} else {
err = pci_enable_sriov(pdev, total_vfs);
}
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
atomic_dec(&pf_loading);
} else {
mlx4_warn(dev, "Running in master mode\n");
dev->flags |= MLX4_FLAG_SRIOV |
MLX4_FLAG_MASTER;
dev->num_vfs = total_vfs;
}
}
} }
atomic_set(&priv->opreq_count, 0); atomic_set(&priv->opreq_count, 0);
@ -2391,17 +2428,33 @@ slave_start:
} }
} }
err = mlx4_init_fw(dev);
if (err) {
mlx4_err(dev, "Failed to init fw, aborting.\n");
goto err_mfunc;
}
err = mlx4_init_hca(dev); err = mlx4_init_hca(dev);
if (err) { if (err) {
if (err == -EACCES) { if (err == -EACCES) {
/* Not primary Physical function /* Not primary Physical function
* Running in slave mode */ * Running in slave mode */
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
/* We're not a PF */
if (dev->flags & MLX4_FLAG_SRIOV) {
if (!existing_vfs)
pci_disable_sriov(pdev);
if (mlx4_is_master(dev))
atomic_dec(&pf_loading);
dev->flags &= ~MLX4_FLAG_SRIOV;
}
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
dev->flags |= MLX4_FLAG_SLAVE; dev->flags |= MLX4_FLAG_SLAVE;
dev->flags &= ~MLX4_FLAG_MASTER; dev->flags &= ~MLX4_FLAG_MASTER;
goto slave_start; goto slave_start;
} else } else
goto err_mfunc; goto err_fw;
} }
/* check if the device is functioning at its maximum possible speed. /* check if the device is functioning at its maximum possible speed.
@ -2556,6 +2609,9 @@ err_master_mfunc:
err_close: err_close:
mlx4_close_hca(dev); mlx4_close_hca(dev);
err_fw:
mlx4_close_fw(dev);
err_mfunc: err_mfunc:
if (mlx4_is_slave(dev)) if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev); mlx4_multi_func_cleanup(dev);
@ -2572,7 +2628,6 @@ err_sriov:
kfree(priv->dev.dev_vfs); kfree(priv->dev.dev_vfs);
err_free_own:
if (!mlx4_is_slave(dev)) if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev); mlx4_free_ownership(dev);
@ -2803,6 +2858,7 @@ static void mlx4_unload_one(struct pci_dev *pdev)
if (mlx4_is_master(dev)) if (mlx4_is_master(dev))
mlx4_multi_func_cleanup(dev); mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev); mlx4_close_hca(dev);
mlx4_close_fw(dev);
if (mlx4_is_slave(dev)) if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev); mlx4_multi_func_cleanup(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
@ -2812,6 +2868,7 @@ static void mlx4_unload_one(struct pci_dev *pdev)
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n"); mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev); pci_disable_sriov(pdev);
dev->flags &= ~MLX4_FLAG_SRIOV;
dev->num_vfs = 0; dev->num_vfs = 0;
} }