scsi: elx: efct: Driver initialization routines

Add driver definitions for:

 - Emulex FC Target driver init, attach and hardware setup routines.

Link: https://lore.kernel.org/r/20210601235512.20104-19-jsmart2021@gmail.com
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Co-developed-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
James Smart 2021-06-01 16:54:59 -07:00 committed by Martin K. Petersen
parent 75a10a7a91
commit 4df84e8466
6 changed files with 2749 additions and 0 deletions

View File

@ -0,0 +1,786 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
* Broadcom refers to Broadcom Inc. and/or its subsidiaries.
*/
#include "efct_driver.h"
#include "efct_hw.h"
#include "efct_unsol.h"
#include "efct_scsi.h"
LIST_HEAD(efct_devices);
static int logmask;
module_param(logmask, int, 0444);
MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
static struct libefc_function_template efct_libefc_templ = {
.issue_mbox_rqst = efct_issue_mbox_rqst,
.send_els = efct_els_hw_srrs_send,
.send_bls = efct_efc_bls_send,
.new_nport = efct_scsi_tgt_new_nport,
.del_nport = efct_scsi_tgt_del_nport,
.scsi_new_node = efct_scsi_new_initiator,
.scsi_del_node = efct_scsi_del_initiator,
.hw_seq_free = efct_efc_hw_sequence_free,
};
static int
efct_device_init(void)
{
int rc;
/* driver-wide init for target-server */
rc = efct_scsi_tgt_driver_init();
if (rc) {
pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
return rc;
}
rc = efct_scsi_reg_fc_transport();
if (rc) {
pr_err("failed to register to FC host\n");
return rc;
}
return 0;
}
static void
efct_device_shutdown(void)
{
efct_scsi_release_fc_transport();
efct_scsi_tgt_driver_exit();
}
static void *
efct_device_alloc(u32 nid)
{
struct efct *efct = NULL;
efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
if (!efct)
return efct;
INIT_LIST_HEAD(&efct->list_entry);
list_add_tail(&efct->list_entry, &efct_devices);
return efct;
}
static void
efct_teardown_msix(struct efct *efct)
{
u32 i;
for (i = 0; i < efct->n_msix_vec; i++) {
free_irq(pci_irq_vector(efct->pci, i),
&efct->intr_context[i]);
}
pci_free_irq_vectors(efct->pci);
}
static int
efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
{
struct efc *efc;
struct sli4 *sli;
int rc = 0;
efc = kzalloc(sizeof(*efc), GFP_KERNEL);
if (!efc)
return -ENOMEM;
efct->efcport = efc;
memcpy(&efc->tt, tt, sizeof(*tt));
efc->base = efct;
efc->pci = efct->pci;
efc->def_wwnn = efct_get_wwnn(&efct->hw);
efc->def_wwpn = efct_get_wwpn(&efct->hw);
efc->enable_tgt = 1;
efc->log_level = EFC_LOG_LIB;
sli = &efct->hw.sli;
efc->max_xfer_size = sli->sge_supported_length *
sli_get_max_sgl(&efct->hw.sli);
efc->sli = sli;
efc->fcfi = efct->hw.fcf_indicator;
rc = efcport_init(efc);
if (rc)
efc_log_err(efc, "efcport_init failed\n");
return rc;
}
static int efct_request_firmware_update(struct efct *efct);
static const char*
efct_pci_model(u16 device)
{
switch (device) {
case EFCT_DEVICE_LANCER_G6: return "LPE31004";
case EFCT_DEVICE_LANCER_G7: return "LPE36000";
default: return "unknown";
}
}
static int
efct_device_attach(struct efct *efct)
{
u32 rc = 0, i = 0;
if (efct->attached) {
efc_log_err(efct, "Device is already attached\n");
return -EIO;
}
snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
efct->instance_index);
efct->logmask = logmask;
efct->filter_def = EFCT_DEFAULT_FILTER;
efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
efct->model = efct_pci_model(efct->pci->device);
efct->efct_req_fw_upgrade = true;
/* Allocate transport object and bring online */
efct->xport = efct_xport_alloc(efct);
if (!efct->xport) {
efc_log_err(efct, "failed to allocate transport object\n");
rc = -ENOMEM;
goto out;
}
rc = efct_xport_attach(efct->xport);
if (rc) {
efc_log_err(efct, "failed to attach transport object\n");
goto xport_out;
}
rc = efct_xport_initialize(efct->xport);
if (rc) {
efc_log_err(efct, "failed to initialize transport object\n");
goto xport_out;
}
rc = efct_efclib_config(efct, &efct_libefc_templ);
if (rc) {
efc_log_err(efct, "failed to init efclib\n");
goto efclib_out;
}
for (i = 0; i < efct->n_msix_vec; i++) {
efc_log_debug(efct, "irq %d enabled\n", i);
enable_irq(pci_irq_vector(efct->pci, i));
}
efct->attached = true;
if (efct->efct_req_fw_upgrade)
efct_request_firmware_update(efct);
return rc;
efclib_out:
efct_xport_detach(efct->xport);
xport_out:
efct_xport_free(efct->xport);
efct->xport = NULL;
out:
return rc;
}
static int
efct_device_detach(struct efct *efct)
{
int i;
if (!efct || !efct->attached) {
pr_err("Device is not attached\n");
return -EIO;
}
if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
efc_log_err(efct, "Transport Shutdown timed out\n");
for (i = 0; i < efct->n_msix_vec; i++)
disable_irq(pci_irq_vector(efct->pci, i));
efct_xport_detach(efct->xport);
efct_xport_free(efct->xport);
efct->xport = NULL;
efcport_destroy(efct->efcport);
kfree(efct->efcport);
efct->attached = false;
return 0;
}
static void
efct_fw_write_cb(int status, u32 actual_write_length,
u32 change_status, void *arg)
{
struct efct_fw_write_result *result = arg;
result->status = status;
result->actual_xfer = actual_write_length;
result->change_status = change_status;
complete(&result->done);
}
static int
efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
u8 *change_status)
{
int rc = 0;
u32 bytes_left;
u32 xfer_size;
u32 offset;
struct efc_dma dma;
int last = 0;
struct efct_fw_write_result result;
init_completion(&result.done);
bytes_left = buf_len;
offset = 0;
dma.size = FW_WRITE_BUFSIZE;
dma.virt = dma_alloc_coherent(&efct->pci->dev,
dma.size, &dma.phys, GFP_DMA);
if (!dma.virt)
return -ENOMEM;
while (bytes_left > 0) {
if (bytes_left > FW_WRITE_BUFSIZE)
xfer_size = FW_WRITE_BUFSIZE;
else
xfer_size = bytes_left;
memcpy(dma.virt, buf + offset, xfer_size);
if (bytes_left == xfer_size)
last = 1;
efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
last, efct_fw_write_cb, &result);
if (wait_for_completion_interruptible(&result.done) != 0) {
rc = -ENXIO;
break;
}
if (result.actual_xfer == 0 || result.status != 0) {
rc = -EFAULT;
break;
}
if (last)
*change_status = result.change_status;
bytes_left -= result.actual_xfer;
offset += result.actual_xfer;
}
dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
return rc;
}
static int
efct_fw_reset(struct efct *efct)
{
/*
* Firmware reset to activate the new firmware.
* Function 0 will update and load the new firmware
* during attach.
*/
if (timer_pending(&efct->xport->stats_timer))
del_timer(&efct->xport->stats_timer);
if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
efc_log_info(efct, "failed to reset firmware\n");
return -EIO;
}
efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
efct_device_detach(efct);
return efct_device_attach(efct);
}
static int
efct_request_firmware_update(struct efct *efct)
{
int rc = 0;
u8 file_name[256], fw_change_status = 0;
const struct firmware *fw;
struct efct_hw_grp_hdr *fw_image;
snprintf(file_name, 256, "%s.grp", efct->model);
rc = request_firmware(&fw, file_name, &efct->pci->dev);
if (rc) {
efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
return rc;
}
fw_image = (struct efct_hw_grp_hdr *)fw->data;
if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
strnlen(fw_image->revision, 16))) {
efc_log_debug(efct,
"Skip update. Firmware is already up to date.\n");
goto exit;
}
efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
efct->hw.sli.fw_name[0], fw_image->revision);
rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
if (rc) {
efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
goto exit;
}
efc_log_info(efct, "Firmware updated successfully\n");
switch (fw_change_status) {
case 0x00:
efc_log_info(efct, "New firmware is active.\n");
break;
case 0x01:
efc_log_info(efct,
"System reboot needed to activate the new firmware\n");
break;
case 0x02:
case 0x03:
efc_log_info(efct,
"firmware reset to activate the new firmware\n");
efct_fw_reset(efct);
break;
default:
efc_log_info(efct, "Unexected value change_status:%d\n",
fw_change_status);
break;
}
exit:
release_firmware(fw);
return rc;
}
static void
efct_device_free(struct efct *efct)
{
if (efct) {
list_del(&efct->list_entry);
kfree(efct);
}
}
static int
efct_device_interrupts_required(struct efct *efct)
{
int rc;
rc = efct_hw_setup(&efct->hw, efct, efct->pci);
if (rc < 0)
return rc;
return efct->hw.config.n_eq;
}
static irqreturn_t
efct_intr_thread(int irq, void *handle)
{
struct efct_intr_context *intr_ctx = handle;
struct efct *efct = intr_ctx->efct;
efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
return IRQ_HANDLED;
}
static irqreturn_t
efct_intr_msix(int irq, void *handle)
{
return IRQ_WAKE_THREAD;
}
static int
efct_setup_msix(struct efct *efct, u32 num_intrs)
{
int rc = 0, i;
if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
dev_err(&efct->pci->dev,
"%s : MSI-X not available\n", __func__);
return -EIO;
}
efct->n_msix_vec = num_intrs;
rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (rc < 0) {
dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
return rc;
}
for (i = 0; i < num_intrs; i++) {
struct efct_intr_context *intr_ctx = NULL;
intr_ctx = &efct->intr_context[i];
intr_ctx->efct = efct;
intr_ctx->index = i;
rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
efct_intr_msix, efct_intr_thread, 0,
EFCT_DRIVER_NAME, intr_ctx);
if (rc) {
dev_err(&efct->pci->dev,
"Failed to register %d vector: %d\n", i, rc);
goto out;
}
}
return rc;
out:
while (--i >= 0)
free_irq(pci_irq_vector(efct->pci, i),
&efct->intr_context[i]);
pci_free_irq_vectors(efct->pci);
return rc;
}
static struct pci_device_id efct_pci_table[] = {
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
{} /* terminate list */
};
static int
efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct efct *efct = NULL;
int rc;
u32 i, r;
int num_interrupts = 0;
int nid;
dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
rc = pci_enable_device_mem(pdev);
if (rc)
return rc;
pci_set_master(pdev);
rc = pci_set_mwi(pdev);
if (rc) {
dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
goto mwi_out;
}
rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
if (rc) {
dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
goto req_regions_out;
}
/* Fetch the Numa node id for this device */
nid = dev_to_node(&pdev->dev);
if (nid < 0) {
dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
nid = 0;
}
/* Allocate efct */
efct = efct_device_alloc(nid);
if (!efct) {
dev_err(&pdev->dev, "Failed to allocate efct\n");
rc = -ENOMEM;
goto alloc_out;
}
efct->pci = pdev;
efct->numa_node = nid;
/* Map all memory BARs */
for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
efct->reg[r] = ioremap(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
r++;
}
/*
* If the 64-bit attribute is set, both this BAR and the
* next form the complete address. Skip processing the
* next BAR.
*/
if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
i++;
}
pci_set_drvdata(pdev, efct);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
rc = -1;
goto dma_mask_out;
}
}
num_interrupts = efct_device_interrupts_required(efct);
if (num_interrupts < 0) {
efc_log_err(efct, "efct_device_interrupts_required failed\n");
rc = -1;
goto dma_mask_out;
}
/*
* Initialize MSIX interrupts, note,
* efct_setup_msix() enables the interrupt
*/
rc = efct_setup_msix(efct, num_interrupts);
if (rc) {
dev_err(&pdev->dev, "Can't setup msix\n");
goto dma_mask_out;
}
/* Disable interrupt for now */
for (i = 0; i < efct->n_msix_vec; i++) {
efc_log_debug(efct, "irq %d disabled\n", i);
disable_irq(pci_irq_vector(efct->pci, i));
}
rc = efct_device_attach(efct);
if (rc)
goto attach_out;
return 0;
attach_out:
efct_teardown_msix(efct);
dma_mask_out:
pci_set_drvdata(pdev, NULL);
for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
if (efct->reg[i])
iounmap(efct->reg[i]);
}
efct_device_free(efct);
alloc_out:
pci_release_regions(pdev);
req_regions_out:
pci_clear_mwi(pdev);
mwi_out:
pci_disable_device(pdev);
return rc;
}
static void
efct_pci_remove(struct pci_dev *pdev)
{
struct efct *efct = pci_get_drvdata(pdev);
u32 i;
if (!efct)
return;
efct_device_detach(efct);
efct_teardown_msix(efct);
for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
if (efct->reg[i])
iounmap(efct->reg[i]);
}
pci_set_drvdata(pdev, NULL);
efct_device_free(efct);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void
efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
{
if (efct) {
efc_log_debug(efct,
"PCI channel disable preparing for reset\n");
efct_device_detach(efct);
/* Disable interrupt and pci device */
efct_teardown_msix(efct);
}
pci_disable_device(pdev);
}
static void
efct_device_prep_for_recover(struct efct *efct)
{
if (efct) {
efc_log_debug(efct, "PCI channel preparing for recovery\n");
efct_hw_io_abort_all(&efct->hw);
}
}
/**
* efct_pci_io_error_detected - method for handling PCI I/O error
* @pdev: pointer to PCI device.
* @state: the current PCI connection state.
*
* This routine is registered to the PCI subsystem for error handling. This
* function is called by the PCI subsystem after a PCI bus error affecting
* this device has been detected. When this routine is invoked, it dispatches
* device error detected handling routine, which will perform the proper
* error detected operation.
*
* Return codes
* PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
* PCI_ERS_RESULT_DISCONNECT - device could not be recovered
*/
static pci_ers_result_t
efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct efct *efct = pci_get_drvdata(pdev);
pci_ers_result_t rc;
switch (state) {
case pci_channel_io_normal:
efct_device_prep_for_recover(efct);
rc = PCI_ERS_RESULT_CAN_RECOVER;
break;
case pci_channel_io_frozen:
efct_device_prep_for_reset(efct, pdev);
rc = PCI_ERS_RESULT_NEED_RESET;
break;
case pci_channel_io_perm_failure:
efct_device_detach(efct);
rc = PCI_ERS_RESULT_DISCONNECT;
break;
default:
efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
efct_device_prep_for_reset(efct, pdev);
rc = PCI_ERS_RESULT_NEED_RESET;
break;
}
return rc;
}
static pci_ers_result_t
efct_pci_io_slot_reset(struct pci_dev *pdev)
{
int rc;
struct efct *efct = pci_get_drvdata(pdev);
rc = pci_enable_device_mem(pdev);
if (rc) {
efc_log_err(efct, "failed to enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
*/
pci_save_state(pdev);
pci_set_master(pdev);
rc = efct_setup_msix(efct, efct->n_msix_vec);
if (rc)
efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
rc);
/* Perform device reset */
efct_device_detach(efct);
/* Bring device to online*/
efct_device_attach(efct);
return PCI_ERS_RESULT_RECOVERED;
}
static void
efct_pci_io_resume(struct pci_dev *pdev)
{
struct efct *efct = pci_get_drvdata(pdev);
/* Perform device reset */
efct_device_detach(efct);
/* Bring device to online*/
efct_device_attach(efct);
}
MODULE_DEVICE_TABLE(pci, efct_pci_table);
static struct pci_error_handlers efct_pci_err_handler = {
.error_detected = efct_pci_io_error_detected,
.slot_reset = efct_pci_io_slot_reset,
.resume = efct_pci_io_resume,
};
static struct pci_driver efct_pci_driver = {
.name = EFCT_DRIVER_NAME,
.id_table = efct_pci_table,
.probe = efct_pci_probe,
.remove = efct_pci_remove,
.err_handler = &efct_pci_err_handler,
};
static
int __init efct_init(void)
{
int rc;
rc = efct_device_init();
if (rc) {
pr_err("efct_device_init failed rc=%d\n", rc);
return rc;
}
rc = pci_register_driver(&efct_pci_driver);
if (rc) {
pr_err("pci_register_driver failed rc=%d\n", rc);
efct_device_shutdown();
}
return rc;
}
static void __exit efct_exit(void)
{
pci_unregister_driver(&efct_pci_driver);
efct_device_shutdown();
}
module_init(efct_init);
module_exit(efct_exit);
MODULE_VERSION(EFCT_DRIVER_VERSION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Broadcom");

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
* Broadcom refers to Broadcom Inc. and/or its subsidiaries.
*/
#if !defined(__EFCT_DRIVER_H__)
#define __EFCT_DRIVER_H__
/***************************************************************************
* OS specific includes
*/
#include <stdarg.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/firmware.h>
#include "../include/efc_common.h"
#include "../libefc/efclib.h"
#include "efct_hw.h"
#include "efct_io.h"
#include "efct_xport.h"
#define EFCT_DRIVER_NAME "efct"
#define EFCT_DRIVER_VERSION "1.0.0.0"
/* EFCT_DEFAULT_FILTER-
* MRQ filter to segregate the IO flow.
*/
#define EFCT_DEFAULT_FILTER "0x01ff22ff,0,0,0"
/* EFCT_OS_MAX_ISR_TIME_MSEC -
* maximum time driver code should spend in an interrupt
* or kernel thread context without yielding
*/
#define EFCT_OS_MAX_ISR_TIME_MSEC 1000
#define EFCT_FC_MAX_SGL 64
#define EFCT_FC_DIF_SEED 0
/* Watermark */
#define EFCT_WATERMARK_HIGH_PCT 90
#define EFCT_WATERMARK_LOW_PCT 80
#define EFCT_IO_WATERMARK_PER_INITIATOR 8
#define EFCT_PCI_MAX_REGS 6
#define MAX_PCI_INTERRUPTS 16
struct efct_intr_context {
struct efct *efct;
u32 index;
};
struct efct {
struct pci_dev *pci;
void __iomem *reg[EFCT_PCI_MAX_REGS];
u32 n_msix_vec;
bool attached;
bool soft_wwn_enable;
u8 efct_req_fw_upgrade;
struct efct_intr_context intr_context[MAX_PCI_INTERRUPTS];
u32 numa_node;
char name[EFC_NAME_LENGTH];
u32 instance_index;
struct list_head list_entry;
struct efct_scsi_tgt tgt_efct;
struct efct_xport *xport;
struct efc *efcport;
struct Scsi_Host *shost;
int logmask;
u32 max_isr_time_msec;
const char *desc;
const char *model;
struct efct_hw hw;
u32 rq_selection_policy;
char *filter_def;
int topology;
/* Look up for target node */
struct xarray lookup;
/*
* Target IO timer value:
* Zero: target command timeout disabled.
* Non-zero: Timeout value, in seconds, for target commands
*/
u32 target_io_timer_sec;
int speed;
struct dentry *sess_debugfs_dir;
};
#define FW_WRITE_BUFSIZE (64 * 1024)
struct efct_fw_write_result {
struct completion done;
int status;
u32 actual_xfer;
u32 change_status;
};
extern struct list_head efct_devices;
#endif /* __EFCT_DRIVER_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -583,4 +583,19 @@ struct efct_hw_grp_hdr {
u8 revision[32];
};
static inline int
efct_hw_get_link_speed(struct efct_hw *hw) {
return hw->link.speed;
}
int
efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev);
int efct_hw_init(struct efct_hw *hw);
int
efct_hw_parse_filter(struct efct_hw *hw, void *value);
uint64_t
efct_get_wwnn(struct efct_hw *hw);
uint64_t
efct_get_wwpn(struct efct_hw *hw);
#endif /* __EFCT_H__ */

View File

@ -0,0 +1,500 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
* Broadcom refers to Broadcom Inc. and/or its subsidiaries.
*/
#include "efct_driver.h"
#include "efct_unsol.h"
static struct dentry *efct_debugfs_root;
static atomic_t efct_debugfs_count;
static struct scsi_host_template efct_template = {
.module = THIS_MODULE,
.name = EFCT_DRIVER_NAME,
.supported_mode = MODE_TARGET,
};
/* globals */
static struct fc_function_template efct_xport_functions;
static struct fc_function_template efct_vport_functions;
static struct scsi_transport_template *efct_xport_fc_tt;
static struct scsi_transport_template *efct_vport_fc_tt;
struct efct_xport *
efct_xport_alloc(struct efct *efct)
{
struct efct_xport *xport;
xport = kzalloc(sizeof(*xport), GFP_KERNEL);
if (!xport)
return xport;
xport->efct = efct;
return xport;
}
static int
efct_xport_init_debugfs(struct efct *efct)
{
/* Setup efct debugfs root directory */
if (!efct_debugfs_root) {
efct_debugfs_root = debugfs_create_dir("efct", NULL);
atomic_set(&efct_debugfs_count, 0);
if (!efct_debugfs_root) {
efc_log_err(efct, "failed to create debugfs entry\n");
goto debugfs_fail;
}
}
/* Create a directory for sessions in root */
if (!efct->sess_debugfs_dir) {
efct->sess_debugfs_dir = debugfs_create_dir("sessions", NULL);
if (!efct->sess_debugfs_dir) {
efc_log_err(efct,
"failed to create debugfs entry for sessions\n");
goto debugfs_fail;
}
atomic_inc(&efct_debugfs_count);
}
return 0;
debugfs_fail:
return -EIO;
}
static void efct_xport_delete_debugfs(struct efct *efct)
{
/* Remove session debugfs directory */
debugfs_remove(efct->sess_debugfs_dir);
efct->sess_debugfs_dir = NULL;
atomic_dec(&efct_debugfs_count);
if (atomic_read(&efct_debugfs_count) == 0) {
/* remove root debugfs directory */
debugfs_remove(efct_debugfs_root);
efct_debugfs_root = NULL;
}
}
int
efct_xport_attach(struct efct_xport *xport)
{
struct efct *efct = xport->efct;
int rc;
rc = efct_hw_setup(&efct->hw, efct, efct->pci);
if (rc) {
efc_log_err(efct, "%s: Can't setup hardware\n", efct->desc);
return rc;
}
efct_hw_parse_filter(&efct->hw, (void *)efct->filter_def);
xport->io_pool = efct_io_pool_create(efct, efct->hw.config.n_sgl);
if (!xport->io_pool) {
efc_log_err(efct, "Can't allocate IO pool\n");
return -ENOMEM;
}
return 0;
}
static void
efct_xport_link_stats_cb(int status, u32 num_counters,
struct efct_hw_link_stat_counts *counters, void *arg)
{
union efct_xport_stats_u *result = arg;
result->stats.link_stats.link_failure_error_count =
counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
result->stats.link_stats.loss_of_sync_error_count =
counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
result->stats.link_stats.primitive_sequence_error_count =
counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
result->stats.link_stats.invalid_transmission_word_error_count =
counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
result->stats.link_stats.crc_error_count =
counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
complete(&result->stats.done);
}
static void
efct_xport_host_stats_cb(int status, u32 num_counters,
struct efct_hw_host_stat_counts *counters, void *arg)
{
union efct_xport_stats_u *result = arg;
result->stats.host_stats.transmit_kbyte_count =
counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
result->stats.host_stats.receive_kbyte_count =
counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
result->stats.host_stats.transmit_frame_count =
counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
result->stats.host_stats.receive_frame_count =
counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
complete(&result->stats.done);
}
static void
efct_xport_async_link_stats_cb(int status, u32 num_counters,
struct efct_hw_link_stat_counts *counters,
void *arg)
{
union efct_xport_stats_u *result = arg;
result->stats.link_stats.link_failure_error_count =
counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
result->stats.link_stats.loss_of_sync_error_count =
counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
result->stats.link_stats.primitive_sequence_error_count =
counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
result->stats.link_stats.invalid_transmission_word_error_count =
counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
result->stats.link_stats.crc_error_count =
counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
}
static void
efct_xport_async_host_stats_cb(int status, u32 num_counters,
struct efct_hw_host_stat_counts *counters,
void *arg)
{
union efct_xport_stats_u *result = arg;
result->stats.host_stats.transmit_kbyte_count =
counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
result->stats.host_stats.receive_kbyte_count =
counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
result->stats.host_stats.transmit_frame_count =
counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
result->stats.host_stats.receive_frame_count =
counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
}
static void
efct_xport_config_stats_timer(struct efct *efct);
static void
efct_xport_stats_timer_cb(struct timer_list *t)
{
struct efct_xport *xport = from_timer(xport, t, stats_timer);
struct efct *efct = xport->efct;
efct_xport_config_stats_timer(efct);
}
static void
efct_xport_config_stats_timer(struct efct *efct)
{
u32 timeout = 3 * 1000;
struct efct_xport *xport = NULL;
if (!efct) {
pr_err("%s: failed to locate EFCT device\n", __func__);
return;
}
xport = efct->xport;
efct_hw_get_link_stats(&efct->hw, 0, 0, 0,
efct_xport_async_link_stats_cb,
&xport->fc_xport_stats);
efct_hw_get_host_stats(&efct->hw, 0, efct_xport_async_host_stats_cb,
&xport->fc_xport_stats);
timer_setup(&xport->stats_timer,
&efct_xport_stats_timer_cb, 0);
mod_timer(&xport->stats_timer,
jiffies + msecs_to_jiffies(timeout));
}
int
efct_xport_initialize(struct efct_xport *xport)
{
struct efct *efct = xport->efct;
int rc = 0;
/* Initialize io lists */
spin_lock_init(&xport->io_pending_lock);
INIT_LIST_HEAD(&xport->io_pending_list);
atomic_set(&xport->io_active_count, 0);
atomic_set(&xport->io_pending_count, 0);
atomic_set(&xport->io_total_free, 0);
atomic_set(&xport->io_total_pending, 0);
atomic_set(&xport->io_alloc_failed_count, 0);
atomic_set(&xport->io_pending_recursing, 0);
rc = efct_hw_init(&efct->hw);
if (rc) {
efc_log_err(efct, "efct_hw_init failure\n");
goto out;
}
rc = efct_scsi_tgt_new_device(efct);
if (rc) {
efc_log_err(efct, "failed to initialize target\n");
goto hw_init_out;
}
rc = efct_scsi_new_device(efct);
if (rc) {
efc_log_err(efct, "failed to initialize initiator\n");
goto tgt_dev_out;
}
/* Get FC link and host statistics perodically*/
efct_xport_config_stats_timer(efct);
efct_xport_init_debugfs(efct);
return rc;
tgt_dev_out:
efct_scsi_tgt_del_device(efct);
hw_init_out:
efct_hw_teardown(&efct->hw);
out:
return rc;
}
int
efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
union efct_xport_stats_u *result)
{
int rc = 0;
struct efct *efct = NULL;
union efct_xport_stats_u value;
efct = xport->efct;
switch (cmd) {
case EFCT_XPORT_CONFIG_PORT_STATUS:
if (xport->configured_link_state == 0) {
/*
* Initial state is offline. configured_link_state is
* set to online explicitly when port is brought online
*/
xport->configured_link_state = EFCT_XPORT_PORT_OFFLINE;
}
result->value = xport->configured_link_state;
break;
case EFCT_XPORT_PORT_STATUS:
/* Determine port status based on link speed. */
value.value = efct_hw_get_link_speed(&efct->hw);
if (value.value == 0)
result->value = EFCT_XPORT_PORT_OFFLINE;
else
result->value = EFCT_XPORT_PORT_ONLINE;
break;
case EFCT_XPORT_LINK_SPEED:
result->value = efct_hw_get_link_speed(&efct->hw);
break;
case EFCT_XPORT_LINK_STATISTICS:
memcpy((void *)result, &efct->xport->fc_xport_stats,
sizeof(union efct_xport_stats_u));
break;
case EFCT_XPORT_LINK_STAT_RESET: {
/* Create a completion to synchronize the stat reset process */
init_completion(&result->stats.done);
/* First reset the link stats */
rc = efct_hw_get_link_stats(&efct->hw, 0, 1, 1,
efct_xport_link_stats_cb, result);
if (rc)
break;
/* Wait for completion to be signaled when the cmd completes */
if (wait_for_completion_interruptible(&result->stats.done)) {
/* Undefined failure */
efc_log_debug(efct, "sem wait failed\n");
rc = -EIO;
break;
}
/* Next reset the host stats */
rc = efct_hw_get_host_stats(&efct->hw, 1,
efct_xport_host_stats_cb, result);
if (rc)
break;
/* Wait for completion to be signaled when the cmd completes */
if (wait_for_completion_interruptible(&result->stats.done)) {
/* Undefined failure */
efc_log_debug(efct, "sem wait failed\n");
rc = -EIO;
break;
}
break;
}
default:
rc = -EIO;
break;
}
return rc;
}
static int
efct_get_link_supported_speeds(struct efct *efct)
{
u32 supported_speeds = 0;
u32 link_module_type, i;
struct {
u32 lmt_speed;
u32 speed;
} supported_speed_list[] = {
{SLI4_LINK_MODULE_TYPE_1GB, FC_PORTSPEED_1GBIT},
{SLI4_LINK_MODULE_TYPE_2GB, FC_PORTSPEED_2GBIT},
{SLI4_LINK_MODULE_TYPE_4GB, FC_PORTSPEED_4GBIT},
{SLI4_LINK_MODULE_TYPE_8GB, FC_PORTSPEED_8GBIT},
{SLI4_LINK_MODULE_TYPE_16GB, FC_PORTSPEED_16GBIT},
{SLI4_LINK_MODULE_TYPE_32GB, FC_PORTSPEED_32GBIT},
{SLI4_LINK_MODULE_TYPE_64GB, FC_PORTSPEED_64GBIT},
{SLI4_LINK_MODULE_TYPE_128GB, FC_PORTSPEED_128GBIT},
};
link_module_type = sli_get_lmt(&efct->hw.sli);
/* populate link supported speeds */
for (i = 0; i < ARRAY_SIZE(supported_speed_list); i++) {
if (link_module_type & supported_speed_list[i].lmt_speed)
supported_speeds |= supported_speed_list[i].speed;
}
return supported_speeds;
}
int
efct_scsi_new_device(struct efct *efct)
{
struct Scsi_Host *shost = NULL;
int error = 0;
struct efct_vport *vport = NULL;
shost = scsi_host_alloc(&efct_template, sizeof(*vport));
if (!shost) {
efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
return -ENOMEM;
}
/* save shost to initiator-client context */
efct->shost = shost;
/* save efct information to shost LLD-specific space */
vport = (struct efct_vport *)shost->hostdata;
vport->efct = efct;
/*
* Set initial can_queue value to the max SCSI IOs. This is the maximum
* global queue depth (as opposed to the per-LUN queue depth --
* .cmd_per_lun This may need to be adjusted for I+T mode.
*/
shost->can_queue = efct->hw.config.n_io;
shost->max_cmd_len = 16; /* 16-byte CDBs */
shost->max_id = 0xffff;
shost->max_lun = 0xffffffff;
/*
* can only accept (from mid-layer) as many SGEs as we've
* pre-registered
*/
shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
/* attach FC Transport template to shost */
shost->transportt = efct_xport_fc_tt;
efc_log_debug(efct, "transport template=%p\n", efct_xport_fc_tt);
/* get pci_dev structure and add host to SCSI ML */
error = scsi_add_host_with_dma(shost, &efct->pci->dev,
&efct->pci->dev);
if (error) {
efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
return -EIO;
}
/* Set symbolic name for host port */
snprintf(fc_host_symbolic_name(shost),
sizeof(fc_host_symbolic_name(shost)),
"Emulex %s FV%s DV%s", efct->model,
efct->hw.sli.fw_name[0], EFCT_DRIVER_VERSION);
/* Set host port supported classes */
fc_host_supported_classes(shost) = FC_COS_CLASS3;
fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
fc_host_node_name(shost) = efct_get_wwnn(&efct->hw);
fc_host_port_name(shost) = efct_get_wwpn(&efct->hw);
fc_host_max_npiv_vports(shost) = 128;
return 0;
}
struct scsi_transport_template *
efct_attach_fc_transport(void)
{
struct scsi_transport_template *efct_fc_template = NULL;
efct_fc_template = fc_attach_transport(&efct_xport_functions);
if (!efct_fc_template)
pr_err("failed to attach EFCT with fc transport\n");
return efct_fc_template;
}
struct scsi_transport_template *
efct_attach_vport_fc_transport(void)
{
struct scsi_transport_template *efct_fc_template = NULL;
efct_fc_template = fc_attach_transport(&efct_vport_functions);
if (!efct_fc_template)
pr_err("failed to attach EFCT with fc transport\n");
return efct_fc_template;
}
int
efct_scsi_reg_fc_transport(void)
{
/* attach to appropriate scsi_tranport_* module */
efct_xport_fc_tt = efct_attach_fc_transport();
if (!efct_xport_fc_tt) {
pr_err("%s: failed to attach to scsi_transport_*", __func__);
return -EIO;
}
efct_vport_fc_tt = efct_attach_vport_fc_transport();
if (!efct_vport_fc_tt) {
pr_err("%s: failed to attach to scsi_transport_*", __func__);
efct_release_fc_transport(efct_xport_fc_tt);
efct_xport_fc_tt = NULL;
return -EIO;
}
return 0;
}
void
efct_scsi_release_fc_transport(void)
{
/* detach from scsi_transport_* */
efct_release_fc_transport(efct_xport_fc_tt);
efct_xport_fc_tt = NULL;
if (efct_vport_fc_tt)
efct_release_fc_transport(efct_vport_fc_tt);
efct_vport_fc_tt = NULL;
}

View File

@ -0,0 +1,186 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
* Broadcom refers to Broadcom Inc. and/or its subsidiaries.
*/
#if !defined(__EFCT_XPORT_H__)
#define __EFCT_XPORT_H__
enum efct_xport_ctrl {
EFCT_XPORT_PORT_ONLINE = 1,
EFCT_XPORT_PORT_OFFLINE,
EFCT_XPORT_SHUTDOWN,
EFCT_XPORT_POST_NODE_EVENT,
EFCT_XPORT_WWNN_SET,
EFCT_XPORT_WWPN_SET,
};
enum efct_xport_status {
EFCT_XPORT_PORT_STATUS,
EFCT_XPORT_CONFIG_PORT_STATUS,
EFCT_XPORT_LINK_SPEED,
EFCT_XPORT_IS_SUPPORTED_LINK_SPEED,
EFCT_XPORT_LINK_STATISTICS,
EFCT_XPORT_LINK_STAT_RESET,
EFCT_XPORT_IS_QUIESCED
};
struct efct_xport_link_stats {
bool rec;
bool gec;
bool w02of;
bool w03of;
bool w04of;
bool w05of;
bool w06of;
bool w07of;
bool w08of;
bool w09of;
bool w10of;
bool w11of;
bool w12of;
bool w13of;
bool w14of;
bool w15of;
bool w16of;
bool w17of;
bool w18of;
bool w19of;
bool w20of;
bool w21of;
bool clrc;
bool clof1;
u32 link_failure_error_count;
u32 loss_of_sync_error_count;
u32 loss_of_signal_error_count;
u32 primitive_sequence_error_count;
u32 invalid_transmission_word_error_count;
u32 crc_error_count;
u32 primitive_sequence_event_timeout_count;
u32 elastic_buffer_overrun_error_count;
u32 arbitration_fc_al_timeout_count;
u32 advertised_receive_bufftor_to_buffer_credit;
u32 current_receive_buffer_to_buffer_credit;
u32 advertised_transmit_buffer_to_buffer_credit;
u32 current_transmit_buffer_to_buffer_credit;
u32 received_eofa_count;
u32 received_eofdti_count;
u32 received_eofni_count;
u32 received_soff_count;
u32 received_dropped_no_aer_count;
u32 received_dropped_no_available_rpi_resources_count;
u32 received_dropped_no_available_xri_resources_count;
};
struct efct_xport_host_stats {
bool cc;
u32 transmit_kbyte_count;
u32 receive_kbyte_count;
u32 transmit_frame_count;
u32 receive_frame_count;
u32 transmit_sequence_count;
u32 receive_sequence_count;
u32 total_exchanges_originator;
u32 total_exchanges_responder;
u32 receive_p_bsy_count;
u32 receive_f_bsy_count;
u32 dropped_frames_due_to_no_rq_buffer_count;
u32 empty_rq_timeout_count;
u32 dropped_frames_due_to_no_xri_count;
u32 empty_xri_pool_count;
};
struct efct_xport_host_statistics {
struct completion done;
struct efct_xport_link_stats link_stats;
struct efct_xport_host_stats host_stats;
};
union efct_xport_stats_u {
u32 value;
struct efct_xport_host_statistics stats;
};
struct efct_xport_fcp_stats {
u64 input_bytes;
u64 output_bytes;
u64 input_requests;
u64 output_requests;
u64 control_requests;
};
struct efct_xport {
struct efct *efct;
/* wwpn requested by user for primary nport */
u64 req_wwpn;
/* wwnn requested by user for primary nport */
u64 req_wwnn;
/* Nodes */
/* number of allocated nodes */
u32 nodes_count;
/* used to track how often IO pool is empty */
atomic_t io_alloc_failed_count;
/* array of pointers to nodes */
struct efc_node **nodes;
/* Io pool and counts */
/* pointer to IO pool */
struct efct_io_pool *io_pool;
/* lock for io_pending_list */
spinlock_t io_pending_lock;
/* list of IOs waiting for HW resources
* lock: xport->io_pending_lock
* link: efct_io_s->io_pending_link
*/
struct list_head io_pending_list;
/* count of totals IOS allocated */
atomic_t io_total_alloc;
/* count of totals IOS free'd */
atomic_t io_total_free;
/* count of totals IOS that were pended */
atomic_t io_total_pending;
/* count of active IOS */
atomic_t io_active_count;
/* count of pending IOS */
atomic_t io_pending_count;
/* non-zero if efct_scsi_check_pending is executing */
atomic_t io_pending_recursing;
/* Port */
/* requested link state */
u32 configured_link_state;
/* Timer for Statistics */
struct timer_list stats_timer;
union efct_xport_stats_u fc_xport_stats;
struct efct_xport_fcp_stats fcp_stats;
};
struct efct_rport_data {
struct efc_node *node;
};
struct efct_xport *
efct_xport_alloc(struct efct *efct);
int
efct_xport_attach(struct efct_xport *xport);
int
efct_xport_initialize(struct efct_xport *xport);
void
efct_xport_detach(struct efct_xport *xport);
int
efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...);
int
efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
union efct_xport_stats_u *result);
void
efct_xport_free(struct efct_xport *xport);
struct scsi_transport_template *efct_attach_fc_transport(void);
struct scsi_transport_template *efct_attach_vport_fc_transport(void);
void
efct_release_fc_transport(struct scsi_transport_template *transport_template);
#endif /* __EFCT_XPORT_H__ */