2020-03-12 09:58:15 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2020, Intel Corporation. */
|
|
|
|
|
|
|
|
#include "ice.h"
|
2020-03-12 09:58:17 +08:00
|
|
|
#include "ice_lib.h"
|
2020-03-12 09:58:15 +08:00
|
|
|
#include "ice_devlink.h"
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
#include "ice_fw_update.h"
|
2020-03-12 09:58:15 +08:00
|
|
|
|
2020-10-08 01:54:42 +08:00
|
|
|
static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
|
2020-03-12 09:58:17 +08:00
|
|
|
{
|
|
|
|
u8 dsn[8];
|
|
|
|
|
|
|
|
/* Copy the DSN into an array in Big Endian format */
|
|
|
|
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
|
|
|
|
|
2020-10-08 01:54:40 +08:00
|
|
|
snprintf(buf, len, "%8phD", dsn);
|
2020-03-12 09:58:17 +08:00
|
|
|
}
|
|
|
|
|
2020-03-12 09:58:18 +08:00
|
|
|
static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
enum ice_status status;
|
|
|
|
|
|
|
|
status = ice_read_pba_string(hw, (u8 *)buf, len);
|
|
|
|
if (status)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-12 09:58:17 +08:00
|
|
|
static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
|
|
|
|
snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
|
|
|
|
hw->fw_patch);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
|
|
|
|
snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
|
|
|
|
snprintf(buf, len, "0x%08x", hw->fw_build);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_orom_info *orom = &pf->hw.nvm.orom;
|
|
|
|
|
|
|
|
snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_nvm_info *nvm = &pf->hw.nvm;
|
|
|
|
|
|
|
|
snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_nvm_info *nvm = &pf->hw.nvm;
|
|
|
|
|
|
|
|
snprintf(buf, len, "0x%08x", nvm->eetrack);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
|
|
|
|
snprintf(buf, len, "%s", hw->active_pkg_name);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
|
|
|
|
|
|
|
|
snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
|
|
|
|
pkg->draft);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-08 01:54:43 +08:00
|
|
|
static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-06 06:55:37 +08:00
|
|
|
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
|
|
|
|
|
|
|
|
/* The netlist version fields are BCD formatted */
|
|
|
|
snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
|
|
|
|
netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
|
|
|
|
netlist->cust_ver);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
|
|
|
|
|
|
|
|
snprintf(buf, len, "0x%08x", netlist->hash);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-12 09:58:18 +08:00
|
|
|
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
|
2020-03-12 09:58:17 +08:00
|
|
|
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
|
|
|
|
|
|
|
|
enum ice_version_type {
|
|
|
|
ICE_VERSION_FIXED,
|
|
|
|
ICE_VERSION_RUNNING,
|
|
|
|
ICE_VERSION_STORED,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct ice_devlink_version {
|
|
|
|
enum ice_version_type type;
|
|
|
|
const char *key;
|
|
|
|
int (*getter)(struct ice_pf *pf, char *buf, size_t len);
|
|
|
|
} ice_devlink_versions[] = {
|
2020-03-12 09:58:18 +08:00
|
|
|
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
|
2020-03-12 09:58:17 +08:00
|
|
|
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
|
|
|
|
running("fw.mgmt.api", ice_info_fw_api),
|
|
|
|
running("fw.mgmt.build", ice_info_fw_build),
|
|
|
|
running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver),
|
|
|
|
running("fw.psid.api", ice_info_nvm_ver),
|
|
|
|
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
|
|
|
|
running("fw.app.name", ice_info_ddp_pkg_name),
|
|
|
|
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
|
2020-10-08 01:54:43 +08:00
|
|
|
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
|
2020-05-06 06:55:37 +08:00
|
|
|
running("fw.netlist", ice_info_netlist_ver),
|
|
|
|
running("fw.netlist.build", ice_info_netlist_build),
|
2020-03-12 09:58:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_devlink_info_get - .info_get devlink handler
|
|
|
|
* @devlink: devlink instance structure
|
|
|
|
* @req: the devlink info request
|
|
|
|
* @extack: extended netdev ack structure
|
|
|
|
*
|
|
|
|
* Callback for the devlink .info_get operation. Reports information about the
|
|
|
|
* device.
|
|
|
|
*
|
2020-03-12 09:58:18 +08:00
|
|
|
* Return: zero on success or an error code on failure.
|
2020-03-12 09:58:17 +08:00
|
|
|
*/
|
|
|
|
static int ice_devlink_info_get(struct devlink *devlink,
|
|
|
|
struct devlink_info_req *req,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct ice_pf *pf = devlink_priv(devlink);
|
|
|
|
char buf[100];
|
|
|
|
size_t i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-08 01:54:42 +08:00
|
|
|
ice_info_get_dsn(pf, buf, sizeof(buf));
|
2020-03-12 09:58:17 +08:00
|
|
|
|
|
|
|
err = devlink_info_serial_number_put(req, buf);
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
|
|
|
|
enum ice_version_type type = ice_devlink_versions[i].type;
|
|
|
|
const char *key = ice_devlink_versions[i].key;
|
|
|
|
|
|
|
|
err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf));
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case ICE_VERSION_FIXED:
|
|
|
|
err = devlink_info_version_fixed_put(req, key, buf);
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ICE_VERSION_RUNNING:
|
|
|
|
err = devlink_info_version_running_put(req, key, buf);
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ICE_VERSION_STORED:
|
|
|
|
err = devlink_info_version_stored_put(req, key, buf);
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
/**
|
|
|
|
* ice_devlink_flash_update - Update firmware stored in flash on the device
|
|
|
|
* @devlink: pointer to devlink associated with device to update
|
2020-09-26 04:46:06 +08:00
|
|
|
* @params: flash update parameters
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
* @extack: netlink extended ACK structure
|
|
|
|
*
|
|
|
|
* Perform a device flash update. The bulk of the update logic is contained
|
|
|
|
* within the ice_flash_pldm_image function.
|
|
|
|
*
|
|
|
|
* Returns: zero on success, or an error code on failure.
|
|
|
|
*/
|
|
|
|
static int
|
2020-09-26 04:46:06 +08:00
|
|
|
ice_devlink_flash_update(struct devlink *devlink,
|
|
|
|
struct devlink_flash_update_params *params,
|
|
|
|
struct netlink_ext_ack *extack)
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
{
|
|
|
|
struct ice_pf *pf = devlink_priv(devlink);
|
|
|
|
struct device *dev = &pf->pdev->dev;
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
const struct firmware *fw;
|
2020-09-26 04:46:09 +08:00
|
|
|
u8 preservation;
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
int err;
|
|
|
|
|
2020-09-26 04:46:09 +08:00
|
|
|
if (!params->overwrite_mask) {
|
|
|
|
/* preserve all settings and identifiers */
|
|
|
|
preservation = ICE_AQC_NVM_PRESERVE_ALL;
|
|
|
|
} else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
|
|
|
|
/* overwrite settings, but preserve the vital device identifiers */
|
|
|
|
preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
|
|
|
|
} else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
|
|
|
|
DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
|
|
|
|
/* overwrite both settings and identifiers, preserve nothing */
|
|
|
|
preservation = ICE_AQC_NVM_NO_PRESERVATION;
|
|
|
|
} else {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
if (!hw->dev_caps.common_cap.nvm_unified_update) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2020-09-26 04:46:05 +08:00
|
|
|
err = ice_check_for_pending_update(pf, NULL, extack);
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-09-26 04:46:06 +08:00
|
|
|
err = request_firmware(&fw, params->file_name, dev);
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-10-08 01:54:45 +08:00
|
|
|
dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name);
|
|
|
|
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
devlink_flash_update_begin_notify(devlink);
|
2020-09-26 04:46:05 +08:00
|
|
|
devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
|
2020-09-26 04:46:09 +08:00
|
|
|
err = ice_flash_pldm_image(pf, fw, preservation, extack);
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
devlink_flash_update_end_notify(devlink);
|
|
|
|
|
|
|
|
release_firmware(fw);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-03-12 09:58:15 +08:00
|
|
|
static const struct devlink_ops ice_devlink_ops = {
|
2020-09-26 04:46:09 +08:00
|
|
|
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
|
2020-03-12 09:58:17 +08:00
|
|
|
.info_get = ice_devlink_info_get,
|
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
|
|
|
.flash_update = ice_devlink_flash_update,
|
2020-03-12 09:58:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void ice_devlink_free(void *devlink_ptr)
|
|
|
|
{
|
|
|
|
devlink_free((struct devlink *)devlink_ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_allocate_pf - Allocate devlink and return PF structure pointer
|
|
|
|
* @dev: the device to allocate for
|
|
|
|
*
|
|
|
|
* Allocate a devlink instance for this device and return the private area as
|
|
|
|
* the PF structure. The devlink memory is kept track of through devres by
|
|
|
|
* adding an action to remove it when unwinding.
|
|
|
|
*/
|
|
|
|
struct ice_pf *ice_allocate_pf(struct device *dev)
|
|
|
|
{
|
|
|
|
struct devlink *devlink;
|
|
|
|
|
|
|
|
devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf));
|
|
|
|
if (!devlink)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Add an action to teardown the devlink when unwinding the driver */
|
|
|
|
if (devm_add_action(dev, ice_devlink_free, devlink)) {
|
|
|
|
devlink_free(devlink);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return devlink_priv(devlink);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_devlink_register - Register devlink interface for this PF
|
|
|
|
* @pf: the PF to register the devlink for.
|
|
|
|
*
|
|
|
|
* Register the devlink instance associated with this physical function.
|
|
|
|
*
|
|
|
|
* Return: zero on success or an error code on failure.
|
|
|
|
*/
|
|
|
|
int ice_devlink_register(struct ice_pf *pf)
|
|
|
|
{
|
|
|
|
struct devlink *devlink = priv_to_devlink(pf);
|
|
|
|
struct device *dev = ice_pf_to_dev(pf);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = devlink_register(devlink, dev);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "devlink registration failed: %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_devlink_unregister - Unregister devlink resources for this PF.
|
|
|
|
* @pf: the PF structure to cleanup
|
|
|
|
*
|
|
|
|
* Releases resources used by devlink and cleans up associated memory.
|
|
|
|
*/
|
|
|
|
void ice_devlink_unregister(struct ice_pf *pf)
|
|
|
|
{
|
|
|
|
devlink_unregister(priv_to_devlink(pf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
* ice_devlink_create_port - Create a devlink port for this VSI
|
|
|
|
* @vsi: the VSI to create a port for
|
2020-03-12 09:58:15 +08:00
|
|
|
*
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
* Create and register a devlink_port for this VSI.
|
2020-03-12 09:58:15 +08:00
|
|
|
*
|
|
|
|
* Return: zero on success or an error code on failure.
|
|
|
|
*/
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
int ice_devlink_create_port(struct ice_vsi *vsi)
|
2020-03-12 09:58:15 +08:00
|
|
|
{
|
2020-07-09 21:18:16 +08:00
|
|
|
struct devlink_port_attrs attrs = {};
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
struct ice_port_info *pi;
|
|
|
|
struct devlink *devlink;
|
|
|
|
struct device *dev;
|
|
|
|
struct ice_pf *pf;
|
2020-03-12 09:58:15 +08:00
|
|
|
int err;
|
|
|
|
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
/* Currently we only create devlink_port instances for PF VSIs */
|
|
|
|
if (vsi->type != ICE_VSI_PF)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pf = vsi->back;
|
|
|
|
devlink = priv_to_devlink(pf);
|
|
|
|
dev = ice_pf_to_dev(pf);
|
|
|
|
pi = pf->hw.port_info;
|
2020-03-12 09:58:15 +08:00
|
|
|
|
2020-07-09 21:18:16 +08:00
|
|
|
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
attrs.phys.port_number = pi->lport;
|
|
|
|
devlink_port_attrs_set(&vsi->devlink_port, &attrs);
|
|
|
|
err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
|
2020-03-12 09:58:15 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "devlink_port_register failed: %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
vsi->devlink_port_registered = true;
|
|
|
|
|
2020-03-12 09:58:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
* ice_devlink_destroy_port - Destroy the devlink_port for this VSI
|
|
|
|
* @vsi: the VSI to cleanup
|
2020-03-12 09:58:15 +08:00
|
|
|
*
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
* Unregisters the devlink_port structure associated with this VSI.
|
2020-03-12 09:58:15 +08:00
|
|
|
*/
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
void ice_devlink_destroy_port(struct ice_vsi *vsi)
|
2020-03-12 09:58:15 +08:00
|
|
|
{
|
ice: refactor devlink_port to be per-VSI
Currently, the devlink_port structure is stored within the ice_pf. This
made sense because we create a single devlink_port for each PF. This
setup does not mesh with the abstractions in the driver very well, and
led to a flow where we accidentally call devlink_port_unregister twice
during error cleanup.
In particular, if devlink_port_register or devlink_port_unregister are
called twice, this leads to a kernel panic. This appears to occur during
some possible flows while cleaning up from a failure during driver
probe.
If register_netdev fails, then we will call devlink_port_unregister in
ice_cfg_netdev as it cleans up. Later, we again call
devlink_port_unregister since we assume that we must cleanup the port
that is associated with the PF structure.
This occurs because we cleanup the devlink_port for the main PF even
though it was not allocated. We allocated the port within a per-VSI
function for managing the main netdev, but did not release the port when
cleaning up that VSI, the allocation and destruction are not aligned.
Instead of attempting to manage the devlink_port as part of the PF
structure, manage it as part of the PF VSI. Doing this has advantages,
as we can match the de-allocation of the devlink_port with the
unregister_netdev associated with the main PF VSI.
Moving the port to the VSI is preferable as it paves the way for
handling devlink ports allocated for other purposes such as SR-IOV VFs.
Since we're changing up how we allocate the devlink_port, also change
the indexing. Originally, we indexed the port using the PF id number.
This came from an old goal of sharing a devlink for each physical
function. Managing devlink instances across multiple function drivers is
not workable. Instead, lets set the port number to the logical port
number returned by firmware and set the index using the VSI index
(sometimes referred to as VSI handle).
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
|
|
|
if (!vsi->devlink_port_registered)
|
|
|
|
return;
|
|
|
|
|
|
|
|
devlink_port_type_clear(&vsi->devlink_port);
|
|
|
|
devlink_port_unregister(&vsi->devlink_port);
|
|
|
|
|
|
|
|
vsi->devlink_port_registered = false;
|
2020-03-12 09:58:15 +08:00
|
|
|
}
|
2020-03-27 02:37:18 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
|
|
|
|
* @devlink: the devlink instance
|
2020-09-19 03:11:02 +08:00
|
|
|
* @ops: the devlink region being snapshotted
|
2020-03-27 02:37:18 +08:00
|
|
|
* @extack: extended ACK response structure
|
|
|
|
* @data: on exit points to snapshot data buffer
|
|
|
|
*
|
|
|
|
* This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
|
|
|
|
* the shadow-ram devlink region. It captures a snapshot of the shadow ram
|
|
|
|
* contents. This snapshot can later be viewed via the devlink-region
|
|
|
|
* interface.
|
|
|
|
*
|
|
|
|
* @returns zero on success, and updates the data pointer. Returns a non-zero
|
|
|
|
* error code on failure.
|
|
|
|
*/
|
|
|
|
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
|
2020-09-19 03:11:02 +08:00
|
|
|
const struct devlink_region_ops *ops,
|
2020-03-27 02:37:18 +08:00
|
|
|
struct netlink_ext_ack *extack, u8 **data)
|
|
|
|
{
|
|
|
|
struct ice_pf *pf = devlink_priv(devlink);
|
|
|
|
struct device *dev = ice_pf_to_dev(pf);
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
enum ice_status status;
|
|
|
|
void *nvm_data;
|
|
|
|
u32 nvm_size;
|
|
|
|
|
|
|
|
nvm_size = hw->nvm.flash_size;
|
|
|
|
nvm_data = vzalloc(nvm_size);
|
|
|
|
if (!nvm_data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
status = ice_acquire_nvm(hw, ICE_RES_READ);
|
|
|
|
if (status) {
|
|
|
|
dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
|
|
|
|
status, hw->adminq.sq_last_status);
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
|
|
|
|
vfree(nvm_data);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
|
|
|
|
if (status) {
|
|
|
|
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
|
|
|
|
nvm_size, status, hw->adminq.sq_last_status);
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
|
|
|
|
ice_release_nvm(hw);
|
|
|
|
vfree(nvm_data);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
ice_release_nvm(hw);
|
|
|
|
|
|
|
|
*data = nvm_data;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-19 02:46:11 +08:00
|
|
|
/**
|
|
|
|
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
|
|
|
|
* @devlink: the devlink instance
|
2020-09-19 03:11:02 +08:00
|
|
|
* @ops: the devlink region being snapshotted
|
2020-06-19 02:46:11 +08:00
|
|
|
* @extack: extended ACK response structure
|
|
|
|
* @data: on exit points to snapshot data buffer
|
|
|
|
*
|
|
|
|
* This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
|
|
|
|
* the device-caps devlink region. It captures a snapshot of the device
|
|
|
|
* capabilities reported by firmware.
|
|
|
|
*
|
|
|
|
* @returns zero on success, and updates the data pointer. Returns a non-zero
|
|
|
|
* error code on failure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ice_devlink_devcaps_snapshot(struct devlink *devlink,
|
2020-09-19 03:11:02 +08:00
|
|
|
const struct devlink_region_ops *ops,
|
2020-06-19 02:46:11 +08:00
|
|
|
struct netlink_ext_ack *extack, u8 **data)
|
|
|
|
{
|
|
|
|
struct ice_pf *pf = devlink_priv(devlink);
|
|
|
|
struct device *dev = ice_pf_to_dev(pf);
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
enum ice_status status;
|
|
|
|
void *devcaps;
|
|
|
|
|
|
|
|
devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
|
|
|
|
if (!devcaps)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
|
|
|
|
ice_aqc_opc_list_dev_caps, NULL);
|
|
|
|
if (status) {
|
|
|
|
dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
|
|
|
|
status, hw->adminq.sq_last_status);
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
|
|
|
|
vfree(devcaps);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
*data = (u8 *)devcaps;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-27 02:37:18 +08:00
|
|
|
static const struct devlink_region_ops ice_nvm_region_ops = {
|
|
|
|
.name = "nvm-flash",
|
|
|
|
.destructor = vfree,
|
|
|
|
.snapshot = ice_devlink_nvm_snapshot,
|
|
|
|
};
|
|
|
|
|
2020-06-19 02:46:11 +08:00
|
|
|
static const struct devlink_region_ops ice_devcaps_region_ops = {
|
|
|
|
.name = "device-caps",
|
|
|
|
.destructor = vfree,
|
|
|
|
.snapshot = ice_devlink_devcaps_snapshot,
|
|
|
|
};
|
|
|
|
|
2020-03-27 02:37:18 +08:00
|
|
|
/**
|
|
|
|
* ice_devlink_init_regions - Initialize devlink regions
|
|
|
|
* @pf: the PF device structure
|
|
|
|
*
|
|
|
|
* Create devlink regions used to enable access to dump the contents of the
|
|
|
|
* flash memory on the device.
|
|
|
|
*/
|
|
|
|
void ice_devlink_init_regions(struct ice_pf *pf)
|
|
|
|
{
|
|
|
|
struct devlink *devlink = priv_to_devlink(pf);
|
|
|
|
struct device *dev = ice_pf_to_dev(pf);
|
|
|
|
u64 nvm_size;
|
|
|
|
|
|
|
|
nvm_size = pf->hw.nvm.flash_size;
|
|
|
|
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
|
|
|
|
nvm_size);
|
|
|
|
if (IS_ERR(pf->nvm_region)) {
|
|
|
|
dev_err(dev, "failed to create NVM devlink region, err %ld\n",
|
|
|
|
PTR_ERR(pf->nvm_region));
|
|
|
|
pf->nvm_region = NULL;
|
|
|
|
}
|
2020-06-19 02:46:11 +08:00
|
|
|
|
|
|
|
pf->devcaps_region = devlink_region_create(devlink,
|
|
|
|
&ice_devcaps_region_ops, 10,
|
|
|
|
ICE_AQ_MAX_BUF_LEN);
|
|
|
|
if (IS_ERR(pf->devcaps_region)) {
|
|
|
|
dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
|
|
|
|
PTR_ERR(pf->devcaps_region));
|
|
|
|
pf->devcaps_region = NULL;
|
|
|
|
}
|
2020-03-27 02:37:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_devlink_destroy_regions - Destroy devlink regions
|
|
|
|
* @pf: the PF device structure
|
|
|
|
*
|
|
|
|
* Remove previously created regions for this PF.
|
|
|
|
*/
|
|
|
|
void ice_devlink_destroy_regions(struct ice_pf *pf)
|
|
|
|
{
|
|
|
|
if (pf->nvm_region)
|
|
|
|
devlink_region_destroy(pf->nvm_region);
|
2020-06-19 02:46:11 +08:00
|
|
|
if (pf->devcaps_region)
|
|
|
|
devlink_region_destroy(pf->devcaps_region);
|
2020-03-27 02:37:18 +08:00
|
|
|
}
|