linux/drivers/net/ethernet/intel/ice/ice_devlink.c

583 lines
15 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
#include "ice.h"
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
#include "ice_lib.h"
#include "ice_devlink.h"
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
#include "ice_fw_update.h"
static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
snprintf(buf, len, "%8phD", dsn);
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
}
static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
status = ice_read_pba_string(hw, (u8 *)buf, len);
if (status)
return -EIO;
return 0;
}
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
hw->fw_patch);
return 0;
}
static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver);
return 0;
}
static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "0x%08x", hw->fw_build);
return 0;
}
static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_orom_info *orom = &pf->hw.nvm.orom;
snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch);
return 0;
}
static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_nvm_info *nvm = &pf->hw.nvm;
snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver);
return 0;
}
static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_nvm_info *nvm = &pf->hw.nvm;
snprintf(buf, len, "0x%08x", nvm->eetrack);
return 0;
}
static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_hw *hw = &pf->hw;
snprintf(buf, len, "%s", hw->active_pkg_name);
return 0;
}
static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
pkg->draft);
return 0;
}
static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
{
snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
return 0;
}
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
/* The netlist version fields are BCD formatted */
snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
netlist->cust_ver);
return 0;
}
static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
snprintf(buf, len, "0x%08x", netlist->hash);
return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
enum ice_version_type {
ICE_VERSION_FIXED,
ICE_VERSION_RUNNING,
ICE_VERSION_STORED,
};
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
int (*getter)(struct ice_pf *pf, char *buf, size_t len);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
running("fw.mgmt.api", ice_info_fw_api),
running("fw.mgmt.build", ice_info_fw_build),
running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver),
running("fw.psid.api", ice_info_nvm_ver),
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
running("fw.netlist", ice_info_netlist_ver),
running("fw.netlist.build", ice_info_netlist_build),
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
};
/**
* ice_devlink_info_get - .info_get devlink handler
* @devlink: devlink instance structure
* @req: the devlink info request
* @extack: extended netdev ack structure
*
* Callback for the devlink .info_get operation. Reports information about the
* device.
*
* Return: zero on success or an error code on failure.
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
*/
static int ice_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
char buf[100];
size_t i;
int err;
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
return err;
}
ice_info_get_dsn(pf, buf, sizeof(buf));
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
err = devlink_info_serial_number_put(req, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
return err;
}
for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
enum ice_version_type type = ice_devlink_versions[i].type;
const char *key = ice_devlink_versions[i].key;
err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf));
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
return err;
}
switch (type) {
case ICE_VERSION_FIXED:
err = devlink_info_version_fixed_put(req, key, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
return err;
}
break;
case ICE_VERSION_RUNNING:
err = devlink_info_version_running_put(req, key, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
return err;
}
break;
case ICE_VERSION_STORED:
err = devlink_info_version_stored_put(req, key, buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
return err;
}
break;
}
}
return 0;
}
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
/**
* ice_devlink_flash_update - Update firmware stored in flash on the device
* @devlink: pointer to devlink associated with device to update
* @params: flash update parameters
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
* @extack: netlink extended ACK structure
*
* Perform a device flash update. The bulk of the update logic is contained
* within the ice_flash_pldm_image function.
*
* Returns: zero on success, or an error code on failure.
*/
static int
ice_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
const struct firmware *fw;
u8 preservation;
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
int err;
if (!params->overwrite_mask) {
/* preserve all settings and identifiers */
preservation = ICE_AQC_NVM_PRESERVE_ALL;
} else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
/* overwrite settings, but preserve the vital device identifiers */
preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
} else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
/* overwrite both settings and identifiers, preserve nothing */
preservation = ICE_AQC_NVM_NO_PRESERVATION;
} else {
NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
return -EOPNOTSUPP;
}
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
if (!hw->dev_caps.common_cap.nvm_unified_update) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
devlink: check flash_update parameter support in net core When implementing .flash_update, drivers which do not support per-component update are manually checking the component parameter to verify that it is NULL. Without this check, the driver might accept an update request with a component specified even though it will not honor such a request. Instead of having each driver check this, move the logic into net/core/devlink.c, and use a new `supported_flash_update_params` field in the devlink_ops. Drivers which will support per-component update must now specify this by setting DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT in the supported_flash_update_params in their devlink_ops. This helps ensure that drivers do not forget to check for a NULL component if they do not support per-component update. This also enables a slightly better error message by enabling the core stack to set the netlink bad attribute message to indicate precisely the unsupported attribute in the message. Going forward, any new additional parameter to flash update will require a bit in the supported_flash_update_params bitfield. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Cc: Jiri Pirko <jiri@mellanox.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Michael Chan <michael.chan@broadcom.com> Cc: Bin Luo <luobin9@huawei.com> Cc: Saeed Mahameed <saeedm@mellanox.com> Cc: Leon Romanovsky <leon@kernel.org> Cc: Ido Schimmel <idosch@mellanox.com> Cc: Danielle Ratson <danieller@mellanox.com> Cc: Shannon Nelson <snelson@pensando.io> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-26 04:46:05 +08:00
err = ice_check_for_pending_update(pf, NULL, extack);
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
if (err)
return err;
err = request_firmware(&fw, params->file_name, dev);
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
return err;
}
dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name);
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
devlink_flash_update_begin_notify(devlink);
devlink: check flash_update parameter support in net core When implementing .flash_update, drivers which do not support per-component update are manually checking the component parameter to verify that it is NULL. Without this check, the driver might accept an update request with a component specified even though it will not honor such a request. Instead of having each driver check this, move the logic into net/core/devlink.c, and use a new `supported_flash_update_params` field in the devlink_ops. Drivers which will support per-component update must now specify this by setting DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT in the supported_flash_update_params in their devlink_ops. This helps ensure that drivers do not forget to check for a NULL component if they do not support per-component update. This also enables a slightly better error message by enabling the core stack to set the netlink bad attribute message to indicate precisely the unsupported attribute in the message. Going forward, any new additional parameter to flash update will require a bit in the supported_flash_update_params bitfield. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Cc: Jiri Pirko <jiri@mellanox.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Michael Chan <michael.chan@broadcom.com> Cc: Bin Luo <luobin9@huawei.com> Cc: Saeed Mahameed <saeedm@mellanox.com> Cc: Leon Romanovsky <leon@kernel.org> Cc: Ido Schimmel <idosch@mellanox.com> Cc: Danielle Ratson <danieller@mellanox.com> Cc: Shannon Nelson <snelson@pensando.io> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-26 04:46:05 +08:00
devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
err = ice_flash_pldm_image(pf, fw, preservation, extack);
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
devlink_flash_update_end_notify(devlink);
release_firmware(fw);
return err;
}
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
ice: add basic handler for devlink .info_get The devlink .info_get callback allows the driver to report detailed version information. The following devlink versions are reported with this initial implementation: "fw.mgmt" -> The version of the firmware that controls PHY, link, etc "fw.mgmt.api" -> API version of interface exposed over the AdminQ "fw.mgmt.build" -> Unique build id of the source for the management fw "fw.undi" -> Version of the Option ROM containing the UEFI driver "fw.psid.api" -> Version of the NVM image format. "fw.bundle_id" -> Unique identifier for the combined flash image. "fw.app.name" -> The name of the active DDP package. "fw.app" -> The version of the active DDP package. With this, devlink dev info can report at least as much information as is reported by ETHTOOL_GDRVINFO. Compare the output from ethtool vs from devlink: $ ethtool -i ens785s0 driver: ice version: 0.8.1-k firmware-version: 0.80 0x80002ec0 1.2581.0 expansion-rom-version: bus-info: 0000:3b:00.0 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes $ devlink dev info pci/0000:3b:00.0 pci/0000:3b:00.0: driver ice serial number 00-01-ab-ff-ff-ca-05-68 versions: running: fw.mgmt 2.1.7 fw.mgmt.api 1.5 fw.mgmt.build 0x305d955f fw.undi 1.2581.0 fw.psid.api 0.80 fw.bundle_id 0x80002ec0 fw.app.name ICE OS Default Package fw.app 1.3.1.0 More pieces of information can be displayed, each version is kept separate instead of munged together, and each version has an identifier which comes with associated documentation. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Acked-by: Jakub Kicinski <kuba@kernel.org> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2020-03-12 09:58:17 +08:00
.info_get = ice_devlink_info_get,
ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 08:22:03 +08:00
.flash_update = ice_devlink_flash_update,
};
static void ice_devlink_free(void *devlink_ptr)
{
devlink_free((struct devlink *)devlink_ptr);
}
/**
* ice_allocate_pf - Allocate devlink and return PF structure pointer
* @dev: the device to allocate for
*
* Allocate a devlink instance for this device and return the private area as
* the PF structure. The devlink memory is kept track of through devres by
* adding an action to remove it when unwinding.
*/
struct ice_pf *ice_allocate_pf(struct device *dev)
{
struct devlink *devlink;
devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf));
if (!devlink)
return NULL;
/* Add an action to teardown the devlink when unwinding the driver */
if (devm_add_action(dev, ice_devlink_free, devlink)) {
devlink_free(devlink);
return NULL;
}
return devlink_priv(devlink);
}
/**
* ice_devlink_register - Register devlink interface for this PF
* @pf: the PF to register the devlink for.
*
* Register the devlink instance associated with this physical function.
*
* Return: zero on success or an error code on failure.
*/
int ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
int err;
err = devlink_register(devlink, dev);
if (err) {
dev_err(dev, "devlink registration failed: %d\n", err);
return err;
}
return 0;
}
/**
* ice_devlink_unregister - Unregister devlink resources for this PF.
* @pf: the PF structure to cleanup
*
* Releases resources used by devlink and cleans up associated memory.
*/
void ice_devlink_unregister(struct ice_pf *pf)
{
devlink_unregister(priv_to_devlink(pf));
}
/**
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
* ice_devlink_create_port - Create a devlink port for this VSI
* @vsi: the VSI to create a port for
*
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
* Create and register a devlink_port for this VSI.
*
* Return: zero on success or an error code on failure.
*/
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
int ice_devlink_create_port(struct ice_vsi *vsi)
{
struct devlink_port_attrs attrs = {};
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
struct ice_port_info *pi;
struct devlink *devlink;
struct device *dev;
struct ice_pf *pf;
int err;
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
/* Currently we only create devlink_port instances for PF VSIs */
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
pf = vsi->back;
devlink = priv_to_devlink(pf);
dev = ice_pf_to_dev(pf);
pi = pf->hw.port_info;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
attrs.phys.port_number = pi->lport;
devlink_port_attrs_set(&vsi->devlink_port, &attrs);
err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
return err;
}
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
vsi->devlink_port_registered = true;
return 0;
}
/**
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
* ice_devlink_destroy_port - Destroy the devlink_port for this VSI
* @vsi: the VSI to cleanup
*
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
* Unregisters the devlink_port structure associated with this VSI.
*/
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
void ice_devlink_destroy_port(struct ice_vsi *vsi)
{
ice: refactor devlink_port to be per-VSI Currently, the devlink_port structure is stored within the ice_pf. This made sense because we create a single devlink_port for each PF. This setup does not mesh with the abstractions in the driver very well, and led to a flow where we accidentally call devlink_port_unregister twice during error cleanup. In particular, if devlink_port_register or devlink_port_unregister are called twice, this leads to a kernel panic. This appears to occur during some possible flows while cleaning up from a failure during driver probe. If register_netdev fails, then we will call devlink_port_unregister in ice_cfg_netdev as it cleans up. Later, we again call devlink_port_unregister since we assume that we must cleanup the port that is associated with the PF structure. This occurs because we cleanup the devlink_port for the main PF even though it was not allocated. We allocated the port within a per-VSI function for managing the main netdev, but did not release the port when cleaning up that VSI, the allocation and destruction are not aligned. Instead of attempting to manage the devlink_port as part of the PF structure, manage it as part of the PF VSI. Doing this has advantages, as we can match the de-allocation of the devlink_port with the unregister_netdev associated with the main PF VSI. Moving the port to the VSI is preferable as it paves the way for handling devlink ports allocated for other purposes such as SR-IOV VFs. Since we're changing up how we allocate the devlink_port, also change the indexing. Originally, we indexed the port using the PF id number. This came from an old goal of sharing a devlink for each physical function. Managing devlink instances across multiple function drivers is not workable. Instead, lets set the port number to the logical port number returned by firmware and set the index using the VSI index (sometimes referred to as VSI handle). Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-10-08 01:54:44 +08:00
if (!vsi->devlink_port_registered)
return;
devlink_port_type_clear(&vsi->devlink_port);
devlink_port_unregister(&vsi->devlink_port);
vsi->devlink_port_registered = false;
}
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
* This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
* the shadow-ram devlink region. It captures a snapshot of the shadow ram
* contents. This snapshot can later be viewed via the devlink-region
* interface.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
* error code on failure.
*/
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
enum ice_status status;
void *nvm_data;
u32 nvm_size;
nvm_size = hw->nvm.flash_size;
nvm_data = vzalloc(nvm_size);
if (!nvm_data)
return -ENOMEM;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
vfree(nvm_data);
return -EIO;
}
status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
if (status) {
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
nvm_size, status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
vfree(nvm_data);
return -EIO;
}
ice_release_nvm(hw);
*data = nvm_data;
return 0;
}
/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
* This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
* the device-caps devlink region. It captures a snapshot of the device
* capabilities reported by firmware.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
* error code on failure.
*/
static int
ice_devlink_devcaps_snapshot(struct devlink *devlink,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
enum ice_status status;
void *devcaps;
devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
if (!devcaps)
return -ENOMEM;
status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
ice_aqc_opc_list_dev_caps, NULL);
if (status) {
dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
vfree(devcaps);
return -EIO;
}
*data = (u8 *)devcaps;
return 0;
}
static const struct devlink_region_ops ice_nvm_region_ops = {
.name = "nvm-flash",
.destructor = vfree,
.snapshot = ice_devlink_nvm_snapshot,
};
static const struct devlink_region_ops ice_devcaps_region_ops = {
.name = "device-caps",
.destructor = vfree,
.snapshot = ice_devlink_devcaps_snapshot,
};
/**
* ice_devlink_init_regions - Initialize devlink regions
* @pf: the PF device structure
*
* Create devlink regions used to enable access to dump the contents of the
* flash memory on the device.
*/
void ice_devlink_init_regions(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
u64 nvm_size;
nvm_size = pf->hw.nvm.flash_size;
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
nvm_size);
if (IS_ERR(pf->nvm_region)) {
dev_err(dev, "failed to create NVM devlink region, err %ld\n",
PTR_ERR(pf->nvm_region));
pf->nvm_region = NULL;
}
pf->devcaps_region = devlink_region_create(devlink,
&ice_devcaps_region_ops, 10,
ICE_AQ_MAX_BUF_LEN);
if (IS_ERR(pf->devcaps_region)) {
dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
PTR_ERR(pf->devcaps_region));
pf->devcaps_region = NULL;
}
}
/**
* ice_devlink_destroy_regions - Destroy devlink regions
* @pf: the PF device structure
*
* Remove previously created regions for this PF.
*/
void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
devlink_region_destroy(pf->nvm_region);
if (pf->devcaps_region)
devlink_region_destroy(pf->devcaps_region);
}