mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (45 commits) [SCSI] qla2xxx: Update version number to 8.03.00-k1. [SCSI] qla2xxx: Add ISP81XX support. [SCSI] qla2xxx: Use proper request/response queues with MQ instantiations. [SCSI] qla2xxx: Correct MQ-chain information retrieval during a firmware dump. [SCSI] qla2xxx: Collapse EFT/FCE copy procedures during a firmware dump. [SCSI] qla2xxx: Don't pollute kernel logs with ZIO/RIO status messages. [SCSI] qla2xxx: Don't fallback to interrupt-polling during re-initialization with MSI-X enabled. [SCSI] qla2xxx: Remove support for reading/writing HW-event-log. [SCSI] cxgb3i: add missing include [SCSI] scsi_lib: fix DID_RESET status problems [SCSI] fc transport: restore missing dev_loss_tmo callback to LLDD [SCSI] aha152x_cs: Fix regression that keeps driver from using shared interrupts [SCSI] sd: Correctly handle 6-byte commands with DIX [SCSI] sd: DIF: Fix tagging on platforms with signed char [SCSI] sd: DIF: Show app tag on error [SCSI] Fix error handling for DIF/DIX [SCSI] scsi_lib: don't decrement busy counters when inserting commands [SCSI] libsas: fix test for negative unsigned and typos [SCSI] a2091, gvp11: kill warn_unused_result warnings [SCSI] fusion: Move a dereference below a NULL test ... Fixed up trivial conflict due to moving the async part of sd_probe around in the async probes vs using dev_set_name() in naming.
This commit is contained in:
commit
cd764695b6
@ -42,7 +42,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
|
||||
|
||||
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
struct rq_map_data *map_data, void __user *ubuf,
|
||||
unsigned int len, int null_mapped, gfp_t gfp_mask)
|
||||
unsigned int len, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long uaddr;
|
||||
struct bio *bio, *orig_bio;
|
||||
@ -63,7 +63,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
if (null_mapped)
|
||||
if (map_data && map_data->null_mapped)
|
||||
bio->bi_flags |= (1 << BIO_NULL_MAPPED);
|
||||
|
||||
orig_bio = bio;
|
||||
@ -114,17 +114,15 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
{
|
||||
unsigned long bytes_read = 0;
|
||||
struct bio *bio = NULL;
|
||||
int ret, null_mapped = 0;
|
||||
int ret;
|
||||
|
||||
if (len > (q->max_hw_sectors << 9))
|
||||
return -EINVAL;
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
if (!ubuf) {
|
||||
if (!map_data || rq_data_dir(rq) != READ)
|
||||
return -EINVAL;
|
||||
null_mapped = 1;
|
||||
}
|
||||
|
||||
if (!ubuf && (!map_data || !map_data->null_mapped))
|
||||
return -EINVAL;
|
||||
|
||||
while (bytes_read != len) {
|
||||
unsigned long map_len, end, start;
|
||||
@ -143,13 +141,16 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||
map_len -= PAGE_SIZE;
|
||||
|
||||
ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
|
||||
null_mapped, gfp_mask);
|
||||
gfp_mask);
|
||||
if (ret < 0)
|
||||
goto unmap_rq;
|
||||
if (!bio)
|
||||
bio = rq->bio;
|
||||
bytes_read += ret;
|
||||
ubuf += ret;
|
||||
|
||||
if (map_data)
|
||||
map_data->offset += ret;
|
||||
}
|
||||
|
||||
if (!bio_flagged(bio, BIO_USER_MAPPED))
|
||||
|
@ -308,10 +308,11 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
|
||||
{
|
||||
int rc = 1;
|
||||
|
||||
dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
|
||||
ioctl->ioc->name, ioctl->ioc->id));
|
||||
if (ioctl == NULL)
|
||||
return;
|
||||
dctlprintk(ioctl->ioc,
|
||||
printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
|
||||
ioctl->ioc->name, ioctl->ioc->id));
|
||||
|
||||
ioctl->wait_done = 0;
|
||||
if (ioctl->reset & MPTCTL_RESET_OK)
|
||||
|
@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components,
|
||||
edev->edev.class = &enclosure_class;
|
||||
edev->edev.parent = get_device(dev);
|
||||
edev->cb = cb;
|
||||
snprintf(edev->edev.bus_id, BUS_ID_SIZE, "%s", name);
|
||||
dev_set_name(&edev->edev, name);
|
||||
err = device_register(&edev->edev);
|
||||
if (err)
|
||||
goto err;
|
||||
@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(enclosure_unregister);
|
||||
static void enclosure_link_name(struct enclosure_component *cdev, char *name)
|
||||
{
|
||||
strcpy(name, "enclosure_device:");
|
||||
strcat(name, cdev->cdev.bus_id);
|
||||
strcat(name, dev_name(&cdev->cdev));
|
||||
}
|
||||
|
||||
static void enclosure_remove_links(struct enclosure_component *cdev)
|
||||
@ -256,9 +256,9 @@ enclosure_component_register(struct enclosure_device *edev,
|
||||
cdev = &ecomp->cdev;
|
||||
cdev->parent = get_device(&edev->edev);
|
||||
if (name)
|
||||
snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name);
|
||||
dev_set_name(cdev, name);
|
||||
else
|
||||
snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number);
|
||||
dev_set_name(cdev, "%u", number);
|
||||
|
||||
cdev->release = enclosure_component_release;
|
||||
cdev->groups = enclosure_groups;
|
||||
|
@ -318,7 +318,7 @@ NCR_D700_probe(struct device *dev)
|
||||
return -ENOMEM;
|
||||
|
||||
p->dev = dev;
|
||||
snprintf(p->name, sizeof(p->name), "D700(%s)", dev->bus_id);
|
||||
snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
|
||||
if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
|
||||
printk(KERN_ERR "D700: request_irq failed\n");
|
||||
kfree(p);
|
||||
|
@ -169,10 +169,8 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
|
||||
continue;
|
||||
|
||||
instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
|
||||
if (instance == NULL) {
|
||||
release_mem_region(address, 256);
|
||||
continue;
|
||||
}
|
||||
if (instance == NULL)
|
||||
goto release;
|
||||
instance->base = ZTWO_VADDR(address);
|
||||
instance->irq = IRQ_AMIGA_PORTS;
|
||||
instance->unique_id = z->slotaddr;
|
||||
@ -183,10 +181,18 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
|
||||
HDATA(instance)->fast = 0;
|
||||
HDATA(instance)->dma_mode = CTRL_DMA;
|
||||
wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
|
||||
request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
|
||||
instance);
|
||||
if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
|
||||
instance))
|
||||
goto unregister;
|
||||
DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
|
||||
num_a2091++;
|
||||
continue;
|
||||
|
||||
unregister:
|
||||
scsi_unregister(instance);
|
||||
wd33c93_release();
|
||||
release:
|
||||
release_mem_region(address, 256);
|
||||
}
|
||||
|
||||
return num_a2091;
|
||||
|
@ -2527,7 +2527,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
|
||||
{
|
||||
struct asc_board *boardp = shost_priv(s);
|
||||
|
||||
printk("Scsi_Host at addr 0x%p, device %s\n", s, boardp->dev->bus_id);
|
||||
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
|
||||
printk(" host_busy %u, host_no %d, last_reset %d,\n",
|
||||
s->host_busy, s->host_no, (unsigned)s->last_reset);
|
||||
|
||||
|
@ -189,7 +189,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
|
||||
asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
|
||||
/* send a hard reset */
|
||||
ASD_DPRINTK("sending %s reset to %s\n",
|
||||
reset_type ? "hard" : "soft", phy->dev.bus_id);
|
||||
reset_type ? "hard" : "soft", dev_name(&phy->dev));
|
||||
res = sas_phy_reset(phy, reset_type);
|
||||
if (res == TMF_RESP_FUNC_COMPLETE) {
|
||||
/* wait for the maximum settle time */
|
||||
|
@ -11,6 +11,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
/* from cxgb3 LLD */
|
||||
#include "common.h"
|
||||
|
@ -329,12 +329,16 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
|
||||
(epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
|
||||
: WD33C93_FS_12_15);
|
||||
|
||||
request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
|
||||
instance);
|
||||
if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
|
||||
instance))
|
||||
goto unregister;
|
||||
DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
|
||||
num_gvp11++;
|
||||
continue;
|
||||
|
||||
unregister:
|
||||
scsi_unregister(instance);
|
||||
wd33c93_release();
|
||||
release:
|
||||
release_mem_region(address, 256);
|
||||
}
|
||||
|
@ -388,8 +388,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||
shost->dma_boundary = 0xffffffff;
|
||||
|
||||
device_initialize(&shost->shost_gendev);
|
||||
snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
|
||||
shost->host_no);
|
||||
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
|
||||
#ifndef CONFIG_SYSFS_DEPRECATED
|
||||
shost->shost_gendev.bus = &scsi_bus_type;
|
||||
#endif
|
||||
@ -398,8 +397,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||
device_initialize(&shost->shost_dev);
|
||||
shost->shost_dev.parent = &shost->shost_gendev;
|
||||
shost->shost_dev.class = &shost_class;
|
||||
snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d",
|
||||
shost->host_no);
|
||||
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
|
||||
shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
|
||||
|
||||
shost->ehandler = kthread_run(scsi_error_handler, shost,
|
||||
|
@ -101,7 +101,7 @@ static const struct {
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
|
||||
{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
|
||||
@ -115,11 +115,11 @@ static const struct {
|
||||
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
|
||||
{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
|
||||
@ -1145,10 +1145,10 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
|
||||
login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
|
||||
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
|
||||
strncpy(login_info->device_name,
|
||||
vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
|
||||
dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
|
||||
|
||||
location = of_get_property(of_node, "ibm,loc-code", NULL);
|
||||
location = location ? location : vhost->dev->bus_id;
|
||||
location = location ? location : dev_name(vhost->dev);
|
||||
strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
|
||||
}
|
||||
|
||||
|
@ -89,6 +89,7 @@ static int max_id = 64;
|
||||
static int max_channel = 3;
|
||||
static int init_timeout = 5;
|
||||
static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
|
||||
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
|
||||
|
||||
static struct scsi_transport_template *ibmvscsi_transport_template;
|
||||
|
||||
@ -1633,7 +1634,7 @@ static struct scsi_host_template driver_template = {
|
||||
static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
|
||||
{
|
||||
/* iu_storage data allocated in initialize_event_pool */
|
||||
unsigned long desired_io = max_requests * sizeof(union viosrp_iu);
|
||||
unsigned long desired_io = max_events * sizeof(union viosrp_iu);
|
||||
|
||||
/* add io space for sg data */
|
||||
desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
|
||||
@ -1657,7 +1658,6 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
|
||||
vdev->dev.driver_data = NULL;
|
||||
|
||||
driver_template.can_queue = max_requests - 2;
|
||||
host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
|
||||
if (!host) {
|
||||
dev_err(&vdev->dev, "couldn't allocate host data\n");
|
||||
@ -1673,12 +1673,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
atomic_set(&hostdata->request_limit, -1);
|
||||
hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
|
||||
|
||||
rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests);
|
||||
rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
|
||||
if (rc != 0 && rc != H_RESOURCE) {
|
||||
dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
|
||||
goto init_crq_failed;
|
||||
}
|
||||
if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
|
||||
if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
|
||||
dev_err(&vdev->dev, "couldn't initialize event pool\n");
|
||||
goto init_pool_failed;
|
||||
}
|
||||
@ -1730,7 +1730,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
add_host_failed:
|
||||
release_event_pool(&hostdata->pool, hostdata);
|
||||
init_pool_failed:
|
||||
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_requests);
|
||||
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
|
||||
init_crq_failed:
|
||||
scsi_host_put(host);
|
||||
scsi_host_alloc_failed:
|
||||
@ -1742,7 +1742,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
|
||||
struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
|
||||
release_event_pool(&hostdata->pool, hostdata);
|
||||
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
|
||||
max_requests);
|
||||
max_events);
|
||||
|
||||
srp_remove_host(hostdata->host);
|
||||
scsi_remove_host(hostdata->host);
|
||||
@ -1779,6 +1779,10 @@ int __init ibmvscsi_module_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Ensure we have two requests to do error recovery */
|
||||
driver_template.can_queue = max_requests;
|
||||
max_events = max_requests + 2;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_ISERIES))
|
||||
ibmvscsi_ops = &iseriesvscsi_ops;
|
||||
else if (firmware_has_feature(FW_FEATURE_VIO))
|
||||
|
@ -2184,7 +2184,7 @@ static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
|
||||
sizeof(struct ipr_dump_entry_header);
|
||||
driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
|
||||
driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
|
||||
strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
|
||||
strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
|
||||
driver_dump->hdr.num_entries++;
|
||||
}
|
||||
|
||||
|
@ -1272,7 +1272,7 @@ struct ipr_dump_entry_header {
|
||||
|
||||
struct ipr_dump_location_entry {
|
||||
struct ipr_dump_entry_header hdr;
|
||||
u8 location[BUS_ID_SIZE];
|
||||
u8 location[20];
|
||||
}__attribute__((packed));
|
||||
|
||||
struct ipr_dump_trace_entry {
|
||||
|
@ -103,8 +103,7 @@ lasi700_probe(struct parisc_device *dev)
|
||||
|
||||
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
|
||||
if (!hostdata) {
|
||||
printk(KERN_ERR "%s: Failed to allocate host data\n",
|
||||
dev->dev.bus_id);
|
||||
dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
|
||||
if (res) {
|
||||
printk("sas: driver on pcidev %s cannot handle "
|
||||
"device %llx, error:%d\n",
|
||||
sas_ha->dev->bus_id,
|
||||
dev_name(sas_ha->dev),
|
||||
SAS_ADDR(dev->sas_addr), res);
|
||||
}
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ void sas_dprint_phye(int phyid, enum phy_event pe)
|
||||
|
||||
void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
|
||||
{
|
||||
SAS_DPRINTK("ha %s: %s event\n", sas_ha->dev->bus_id,
|
||||
SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
|
||||
sas_hae_str[he]);
|
||||
}
|
||||
|
||||
|
@ -199,8 +199,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
|
||||
break;
|
||||
|
||||
case SMP_DISCOVER:
|
||||
req->data_len =- 16;
|
||||
if (req->data_len < 0) {
|
||||
req->data_len -= 16;
|
||||
if ((int)req->data_len < 0) {
|
||||
req->data_len = 0;
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
@ -215,8 +215,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
|
||||
break;
|
||||
|
||||
case SMP_REPORT_PHY_SATA:
|
||||
req->data_len =- 16;
|
||||
if (req->data_len < 0) {
|
||||
req->data_len -= 16;
|
||||
if ((int)req->data_len < 0) {
|
||||
req->data_len = 0;
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
@ -238,8 +238,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
|
||||
break;
|
||||
|
||||
case SMP_PHY_CONTROL:
|
||||
req->data_len =- 44;
|
||||
if (req->data_len < 0) {
|
||||
req->data_len -= 44;
|
||||
if ((int)req->data_len < 0) {
|
||||
req->data_len = 0;
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
|
@ -113,7 +113,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
|
||||
sas_port_add_phy(port->port, phy->phy);
|
||||
|
||||
SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
|
||||
phy->phy->dev.bus_id,port->port->dev.bus_id,
|
||||
dev_name(&phy->phy->dev), dev_name(&port->port->dev),
|
||||
port->phy_mask,
|
||||
SAS_ADDR(port->attached_sas_addr));
|
||||
|
||||
|
@ -1795,12 +1795,13 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
|
||||
int
|
||||
lpfc_online(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_vport *vport = phba->pport;
|
||||
struct lpfc_vport *vport;
|
||||
struct lpfc_vport **vports;
|
||||
int i;
|
||||
|
||||
if (!phba)
|
||||
return 0;
|
||||
vport = phba->pport;
|
||||
|
||||
if (!(vport->fc_flag & FC_OFFLINE_MODE))
|
||||
return 0;
|
||||
|
@ -2959,7 +2959,7 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
|
||||
|
||||
/* enable auto port detection */
|
||||
mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
|
||||
msleep(100);
|
||||
msleep(1100);
|
||||
/* init and reset phys */
|
||||
for (i = 0; i < mvi->chip->n_phy; i++) {
|
||||
u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
|
||||
|
@ -114,7 +114,7 @@ static int aha152x_probe(struct pcmcia_device *link)
|
||||
link->io.NumPorts1 = 0x20;
|
||||
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
|
||||
link->io.IOAddrLines = 10;
|
||||
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
|
||||
link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
|
||||
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
|
||||
link->conf.Attributes = CONF_ENABLE_IRQ;
|
||||
link->conf.IntType = INT_MEMORY_AND_IO;
|
||||
|
@ -303,7 +303,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
|
||||
else if (start == (ha->flt_region_boot * 4) ||
|
||||
start == (ha->flt_region_fw * 4))
|
||||
valid = 1;
|
||||
else if (IS_QLA25XX(ha) &&
|
||||
else if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) &&
|
||||
start == (ha->flt_region_vpd_nvram * 4))
|
||||
valid = 1;
|
||||
if (!valid) {
|
||||
@ -815,6 +815,21 @@ qla2x00_total_isp_aborts_show(struct device *dev,
|
||||
ha->qla_stats.total_isp_aborts);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!IS_QLA81XX(ha))
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x (%x)\n",
|
||||
ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
|
||||
ha->mpi_version[3], ha->mpi_capabilities);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
|
||||
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
|
||||
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
|
||||
@ -839,6 +854,7 @@ static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
|
||||
NULL);
|
||||
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
|
||||
NULL);
|
||||
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
|
||||
|
||||
struct device_attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_driver_version,
|
||||
@ -858,6 +874,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_optrom_fcode_version,
|
||||
&dev_attr_optrom_fw_version,
|
||||
&dev_attr_total_isp_aborts,
|
||||
&dev_attr_mpi_version,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -892,6 +909,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
|
||||
case PORT_SPEED_8GB:
|
||||
speed = FC_PORTSPEED_8GBIT;
|
||||
break;
|
||||
case PORT_SPEED_10GB:
|
||||
speed = FC_PORTSPEED_10GBIT;
|
||||
break;
|
||||
}
|
||||
fc_host_speed(shost) = speed;
|
||||
}
|
||||
@ -1382,7 +1402,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
|
||||
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
|
||||
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
|
||||
|
||||
if (IS_QLA25XX(ha))
|
||||
if (IS_QLA81XX(ha))
|
||||
speed = FC_PORTSPEED_10GBIT;
|
||||
else if (IS_QLA25XX(ha))
|
||||
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
|
||||
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
|
||||
else if (IS_QLA24XX_TYPE(ha))
|
||||
|
@ -310,6 +310,76 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
|
||||
*buf++ = htons(RD_REG_WORD(dmp_reg++));
|
||||
}
|
||||
|
||||
static inline void *
|
||||
qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
|
||||
{
|
||||
if (!ha->eft)
|
||||
return ptr;
|
||||
|
||||
memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
|
||||
return ptr + ntohl(ha->fw_dump->eft_size);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
{
|
||||
uint32_t cnt;
|
||||
uint32_t *iter_reg;
|
||||
struct qla2xxx_fce_chain *fcec = ptr;
|
||||
|
||||
if (!ha->fce)
|
||||
return ptr;
|
||||
|
||||
*last_chain = &fcec->type;
|
||||
fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
|
||||
fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
|
||||
fce_calc_size(ha->fce_bufs));
|
||||
fcec->size = htonl(fce_calc_size(ha->fce_bufs));
|
||||
fcec->addr_l = htonl(LSD(ha->fce_dma));
|
||||
fcec->addr_h = htonl(MSD(ha->fce_dma));
|
||||
|
||||
iter_reg = fcec->eregs;
|
||||
for (cnt = 0; cnt < 8; cnt++)
|
||||
*iter_reg++ = htonl(ha->fce_mb[cnt]);
|
||||
|
||||
memcpy(iter_reg, ha->fce, ntohl(fcec->size));
|
||||
|
||||
return iter_reg;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
{
|
||||
uint32_t cnt, que_idx;
|
||||
uint8_t req_cnt, rsp_cnt, que_cnt;
|
||||
struct qla2xxx_mq_chain *mq = ptr;
|
||||
struct device_reg_25xxmq __iomem *reg;
|
||||
|
||||
if (!ha->mqenable)
|
||||
return ptr;
|
||||
|
||||
mq = ptr;
|
||||
*last_chain = &mq->type;
|
||||
mq->type = __constant_htonl(DUMP_CHAIN_MQ);
|
||||
mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
|
||||
|
||||
req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
|
||||
rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
|
||||
que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
|
||||
mq->count = htonl(que_cnt);
|
||||
for (cnt = 0; cnt < que_cnt; cnt++) {
|
||||
reg = (struct device_reg_25xxmq *) ((void *)
|
||||
ha->mqiobase + cnt * QLA_QUE_PAGE);
|
||||
que_idx = cnt * 4;
|
||||
mq->qregs[que_idx] = htonl(RD_REG_DWORD(®->req_q_in));
|
||||
mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(®->req_q_out));
|
||||
mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(®->rsp_q_in));
|
||||
mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(®->rsp_q_out));
|
||||
}
|
||||
|
||||
return ptr + sizeof(struct qla2xxx_mq_chain);
|
||||
}
|
||||
|
||||
/**
|
||||
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
|
||||
* @ha: HA context
|
||||
@ -913,8 +983,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
goto qla24xx_fw_dump_failed_0;
|
||||
|
||||
nxt = qla2xxx_copy_queues(ha, nxt);
|
||||
if (ha->eft)
|
||||
memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
|
||||
|
||||
qla24xx_copy_eft(ha, nxt);
|
||||
|
||||
qla24xx_fw_dump_failed_0:
|
||||
if (rval != QLA_SUCCESS) {
|
||||
@ -942,19 +1012,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
uint32_t risc_address;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
struct device_reg_25xxmq __iomem *reg25;
|
||||
uint32_t __iomem *dmp_reg;
|
||||
uint32_t *iter_reg;
|
||||
uint16_t __iomem *mbx_reg;
|
||||
unsigned long flags;
|
||||
struct qla25xx_fw_dump *fw;
|
||||
uint32_t ext_mem_cnt;
|
||||
void *nxt;
|
||||
struct qla2xxx_fce_chain *fcec;
|
||||
struct qla2xxx_mq_chain *mq = NULL;
|
||||
uint32_t qreg_size;
|
||||
uint8_t req_cnt, rsp_cnt, que_cnt;
|
||||
uint32_t que_idx;
|
||||
void *nxt, *nxt_chain;
|
||||
uint32_t *last_chain = NULL;
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
@ -1001,28 +1066,6 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
|
||||
fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window));
|
||||
|
||||
/* Multi queue registers */
|
||||
if (ha->mqenable) {
|
||||
qreg_size = sizeof(struct qla2xxx_mq_chain);
|
||||
mq = kzalloc(qreg_size, GFP_KERNEL);
|
||||
if (!mq)
|
||||
goto qla25xx_fw_dump_failed_0;
|
||||
req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
|
||||
rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
|
||||
que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
|
||||
mq->count = htonl(que_cnt);
|
||||
mq->chain_size = htonl(qreg_size);
|
||||
mq->type = __constant_htonl(DUMP_CHAIN_MQ);
|
||||
for (cnt = 0; cnt < que_cnt; cnt++) {
|
||||
reg25 = (struct device_reg_25xxmq *) ((void *)
|
||||
ha->mqiobase + cnt * QLA_QUE_PAGE);
|
||||
que_idx = cnt * 4;
|
||||
mq->qregs[que_idx] = htonl(reg25->req_q_in);
|
||||
mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
|
||||
mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
|
||||
mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
|
||||
}
|
||||
}
|
||||
WRT_REG_DWORD(®->iobase_window, 0x00);
|
||||
RD_REG_DWORD(®->iobase_window);
|
||||
|
||||
@ -1240,6 +1283,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
|
||||
|
||||
/* Multi queue registers */
|
||||
nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
|
||||
&last_chain);
|
||||
|
||||
rval = qla24xx_soft_reset(ha);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla25xx_fw_dump_failed_0;
|
||||
@ -1249,37 +1296,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla25xx_fw_dump_failed_0;
|
||||
|
||||
/* Fibre Channel Trace Buffer. */
|
||||
nxt = qla2xxx_copy_queues(ha, nxt);
|
||||
if (ha->eft)
|
||||
memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
|
||||
|
||||
/* Fibre Channel Event Buffer. */
|
||||
if (!ha->fce)
|
||||
goto qla25xx_fw_dump_failed_0;
|
||||
nxt = qla24xx_copy_eft(ha, nxt);
|
||||
|
||||
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
|
||||
|
||||
if (ha->mqenable) {
|
||||
nxt = nxt + ntohl(ha->fw_dump->eft_size);
|
||||
memcpy(nxt, mq, qreg_size);
|
||||
kfree(mq);
|
||||
fcec = nxt + qreg_size;
|
||||
} else {
|
||||
fcec = nxt + ntohl(ha->fw_dump->eft_size);
|
||||
/* Chain entries -- started with MQ. */
|
||||
qla25xx_copy_fce(ha, nxt_chain, &last_chain);
|
||||
if (last_chain) {
|
||||
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
|
||||
}
|
||||
fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
|
||||
fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
|
||||
fce_calc_size(ha->fce_bufs));
|
||||
fcec->size = htonl(fce_calc_size(ha->fce_bufs));
|
||||
fcec->addr_l = htonl(LSD(ha->fce_dma));
|
||||
fcec->addr_h = htonl(MSD(ha->fce_dma));
|
||||
|
||||
iter_reg = fcec->eregs;
|
||||
for (cnt = 0; cnt < 8; cnt++)
|
||||
*iter_reg++ = htonl(ha->fce_mb[cnt]);
|
||||
|
||||
memcpy(iter_reg, ha->fce, ntohl(fcec->size));
|
||||
|
||||
qla25xx_fw_dump_failed_0:
|
||||
if (rval != QLA_SUCCESS) {
|
||||
@ -1298,6 +1324,330 @@ qla25xx_fw_dump_failed:
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
{
|
||||
int rval;
|
||||
uint32_t cnt;
|
||||
uint32_t risc_address;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
uint32_t __iomem *dmp_reg;
|
||||
uint32_t *iter_reg;
|
||||
uint16_t __iomem *mbx_reg;
|
||||
unsigned long flags;
|
||||
struct qla81xx_fw_dump *fw;
|
||||
uint32_t ext_mem_cnt;
|
||||
void *nxt, *nxt_chain;
|
||||
uint32_t *last_chain = NULL;
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"No buffer available for dump!!!\n");
|
||||
goto qla81xx_fw_dump_failed;
|
||||
}
|
||||
|
||||
if (ha->fw_dumped) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Firmware has been previously dumped (%p) -- ignoring "
|
||||
"request...\n", ha->fw_dump);
|
||||
goto qla81xx_fw_dump_failed;
|
||||
}
|
||||
fw = &ha->fw_dump->isp.isp81;
|
||||
qla2xxx_prep_dump(ha, ha->fw_dump);
|
||||
|
||||
fw->host_status = htonl(RD_REG_DWORD(®->host_status));
|
||||
|
||||
/* Pause RISC. */
|
||||
rval = qla24xx_pause_risc(reg);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla81xx_fw_dump_failed_0;
|
||||
|
||||
/* Host/Risc registers. */
|
||||
iter_reg = fw->host_risc_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
|
||||
|
||||
/* PCIe registers. */
|
||||
WRT_REG_DWORD(®->iobase_addr, 0x7C00);
|
||||
RD_REG_DWORD(®->iobase_addr);
|
||||
WRT_REG_DWORD(®->iobase_window, 0x01);
|
||||
dmp_reg = ®->iobase_c4;
|
||||
fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
|
||||
fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
|
||||
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
|
||||
fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_window, 0x00);
|
||||
RD_REG_DWORD(®->iobase_window);
|
||||
|
||||
/* Host interface registers. */
|
||||
dmp_reg = ®->flash_addr;
|
||||
for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
|
||||
fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
|
||||
|
||||
/* Disable interrupts. */
|
||||
WRT_REG_DWORD(®->ictrl, 0);
|
||||
RD_REG_DWORD(®->ictrl);
|
||||
|
||||
/* Shadow registers. */
|
||||
WRT_REG_DWORD(®->iobase_addr, 0x0F70);
|
||||
RD_REG_DWORD(®->iobase_addr);
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0000000);
|
||||
fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0100000);
|
||||
fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0200000);
|
||||
fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0300000);
|
||||
fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0400000);
|
||||
fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0500000);
|
||||
fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0600000);
|
||||
fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0700000);
|
||||
fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0800000);
|
||||
fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0900000);
|
||||
fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
WRT_REG_DWORD(®->iobase_select, 0xB0A00000);
|
||||
fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata));
|
||||
|
||||
/* RISC I/O register. */
|
||||
WRT_REG_DWORD(®->iobase_addr, 0x0010);
|
||||
fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window));
|
||||
|
||||
/* Mailbox registers. */
|
||||
mbx_reg = ®->mailbox0;
|
||||
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
|
||||
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
|
||||
|
||||
/* Transfer sequence registers. */
|
||||
iter_reg = fw->xseq_gp_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->xseq_0_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
|
||||
|
||||
qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
|
||||
|
||||
/* Receive sequence registers. */
|
||||
iter_reg = fw->rseq_gp_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->rseq_0_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
|
||||
|
||||
qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
|
||||
qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
|
||||
|
||||
/* Auxiliary sequence registers. */
|
||||
iter_reg = fw->aseq_gp_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0xB070, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->aseq_0_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
|
||||
|
||||
qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
|
||||
qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
|
||||
|
||||
/* Command DMA registers. */
|
||||
qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
|
||||
|
||||
/* Queues. */
|
||||
iter_reg = fw->req0_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
|
||||
dmp_reg = ®->iobase_q;
|
||||
for (cnt = 0; cnt < 7; cnt++)
|
||||
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
|
||||
|
||||
iter_reg = fw->resp0_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
|
||||
dmp_reg = ®->iobase_q;
|
||||
for (cnt = 0; cnt < 7; cnt++)
|
||||
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
|
||||
|
||||
iter_reg = fw->req1_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
|
||||
dmp_reg = ®->iobase_q;
|
||||
for (cnt = 0; cnt < 7; cnt++)
|
||||
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
|
||||
|
||||
/* Transmit DMA registers. */
|
||||
iter_reg = fw->xmt0_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7610, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->xmt1_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7630, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->xmt2_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7650, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->xmt3_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7670, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->xmt4_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7690, 16, iter_reg);
|
||||
|
||||
qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
|
||||
|
||||
/* Receive DMA registers. */
|
||||
iter_reg = fw->rcvt0_data_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7710, 16, iter_reg);
|
||||
|
||||
iter_reg = fw->rcvt1_data_dma_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x7730, 16, iter_reg);
|
||||
|
||||
/* RISC registers. */
|
||||
iter_reg = fw->risc_gp_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
|
||||
|
||||
/* Local memory controller registers. */
|
||||
iter_reg = fw->lmc_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x3070, 16, iter_reg);
|
||||
|
||||
/* Fibre Protocol Module registers. */
|
||||
iter_reg = fw->fpm_hdw_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
|
||||
|
||||
/* Frame Buffer registers. */
|
||||
iter_reg = fw->fb_hdw_reg;
|
||||
iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
|
||||
iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
|
||||
qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
|
||||
|
||||
/* Multi queue registers */
|
||||
nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
|
||||
&last_chain);
|
||||
|
||||
rval = qla24xx_soft_reset(ha);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla81xx_fw_dump_failed_0;
|
||||
|
||||
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
|
||||
&nxt);
|
||||
if (rval != QLA_SUCCESS)
|
||||
goto qla81xx_fw_dump_failed_0;
|
||||
|
||||
nxt = qla2xxx_copy_queues(ha, nxt);
|
||||
|
||||
nxt = qla24xx_copy_eft(ha, nxt);
|
||||
|
||||
/* Chain entries -- started with MQ. */
|
||||
qla25xx_copy_fce(ha, nxt_chain, &last_chain);
|
||||
if (last_chain) {
|
||||
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
|
||||
}
|
||||
|
||||
qla81xx_fw_dump_failed_0:
|
||||
if (rval != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Failed to dump firmware (%x)!!!\n", rval);
|
||||
ha->fw_dumped = 0;
|
||||
|
||||
} else {
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"Firmware dump saved to temp buffer (%ld/%p).\n",
|
||||
base_vha->host_no, ha->fw_dump);
|
||||
ha->fw_dumped = 1;
|
||||
}
|
||||
|
||||
qla81xx_fw_dump_failed:
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
/* Driver Debug Functions. */
|
||||
/****************************************************************************/
|
||||
|
@ -251,6 +251,45 @@ struct qla25xx_fw_dump {
|
||||
uint32_t ext_mem[1];
|
||||
};
|
||||
|
||||
struct qla81xx_fw_dump {
|
||||
uint32_t host_status;
|
||||
uint32_t host_risc_reg[32];
|
||||
uint32_t pcie_regs[4];
|
||||
uint32_t host_reg[32];
|
||||
uint32_t shadow_reg[11];
|
||||
uint32_t risc_io_reg;
|
||||
uint16_t mailbox_reg[32];
|
||||
uint32_t xseq_gp_reg[128];
|
||||
uint32_t xseq_0_reg[48];
|
||||
uint32_t xseq_1_reg[16];
|
||||
uint32_t rseq_gp_reg[128];
|
||||
uint32_t rseq_0_reg[32];
|
||||
uint32_t rseq_1_reg[16];
|
||||
uint32_t rseq_2_reg[16];
|
||||
uint32_t aseq_gp_reg[128];
|
||||
uint32_t aseq_0_reg[32];
|
||||
uint32_t aseq_1_reg[16];
|
||||
uint32_t aseq_2_reg[16];
|
||||
uint32_t cmd_dma_reg[16];
|
||||
uint32_t req0_dma_reg[15];
|
||||
uint32_t resp0_dma_reg[15];
|
||||
uint32_t req1_dma_reg[15];
|
||||
uint32_t xmt0_dma_reg[32];
|
||||
uint32_t xmt1_dma_reg[32];
|
||||
uint32_t xmt2_dma_reg[32];
|
||||
uint32_t xmt3_dma_reg[32];
|
||||
uint32_t xmt4_dma_reg[32];
|
||||
uint32_t xmt_data_dma_reg[16];
|
||||
uint32_t rcvt0_data_dma_reg[32];
|
||||
uint32_t rcvt1_data_dma_reg[32];
|
||||
uint32_t risc_gp_reg[128];
|
||||
uint32_t lmc_reg[128];
|
||||
uint32_t fpm_hdw_reg[224];
|
||||
uint32_t fb_hdw_reg[208];
|
||||
uint32_t code_ram[0x2000];
|
||||
uint32_t ext_mem[1];
|
||||
};
|
||||
|
||||
#define EFT_NUM_BUFFERS 4
|
||||
#define EFT_BYTES_PER_BUFFER 0x4000
|
||||
#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
|
||||
@ -313,5 +352,6 @@ struct qla2xxx_fw_dump {
|
||||
struct qla2300_fw_dump isp23;
|
||||
struct qla24xx_fw_dump isp24;
|
||||
struct qla25xx_fw_dump isp25;
|
||||
struct qla81xx_fw_dump isp81;
|
||||
} isp;
|
||||
};
|
||||
|
@ -187,7 +187,6 @@ struct req_que;
|
||||
* SCSI Request Block
|
||||
*/
|
||||
typedef struct srb {
|
||||
struct scsi_qla_host *vha; /* HA the SP is queued on */
|
||||
struct req_que *que;
|
||||
struct fc_port *fcport;
|
||||
|
||||
@ -2136,7 +2135,6 @@ struct qla_msix_entry {
|
||||
/* Work events. */
|
||||
enum qla_work_type {
|
||||
QLA_EVT_AEN,
|
||||
QLA_EVT_HWE_LOG,
|
||||
};
|
||||
|
||||
|
||||
@ -2151,10 +2149,6 @@ struct qla_work_evt {
|
||||
enum fc_host_event_code code;
|
||||
u32 data;
|
||||
} aen;
|
||||
struct {
|
||||
uint16_t code;
|
||||
uint16_t d1, d2, d3;
|
||||
} hwe;
|
||||
} u;
|
||||
};
|
||||
|
||||
@ -2309,6 +2303,7 @@ struct qla_hw_data {
|
||||
#define PORT_SPEED_2GB 0x01
|
||||
#define PORT_SPEED_4GB 0x03
|
||||
#define PORT_SPEED_8GB 0x04
|
||||
#define PORT_SPEED_10GB 0x13
|
||||
uint16_t link_data_rate; /* F/W operating speed */
|
||||
|
||||
uint8_t current_topology;
|
||||
@ -2328,6 +2323,7 @@ struct qla_hw_data {
|
||||
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
|
||||
uint32_t device_type;
|
||||
#define DT_ISP2100 BIT_0
|
||||
#define DT_ISP2200 BIT_1
|
||||
@ -2342,7 +2338,8 @@ struct qla_hw_data {
|
||||
#define DT_ISP5432 BIT_10
|
||||
#define DT_ISP2532 BIT_11
|
||||
#define DT_ISP8432 BIT_12
|
||||
#define DT_ISP_LAST (DT_ISP8432 << 1)
|
||||
#define DT_ISP8001 BIT_13
|
||||
#define DT_ISP_LAST (DT_ISP8001 << 1)
|
||||
|
||||
#define DT_IIDMA BIT_26
|
||||
#define DT_FWI2 BIT_27
|
||||
@ -2364,6 +2361,7 @@ struct qla_hw_data {
|
||||
#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
|
||||
#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
|
||||
#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
|
||||
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
|
||||
|
||||
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
|
||||
IS_QLA6312(ha) || IS_QLA6322(ha))
|
||||
@ -2373,8 +2371,11 @@ struct qla_hw_data {
|
||||
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
|
||||
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
|
||||
IS_QLA84XX(ha))
|
||||
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
|
||||
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
|
||||
IS_QLA25XX(ha))
|
||||
IS_QLA25XX(ha) || IS_QLA81XX(ha))
|
||||
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
|
||||
(ha)->flags.msix_enabled)
|
||||
|
||||
#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
|
||||
#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
|
||||
@ -2472,6 +2473,9 @@ struct qla_hw_data {
|
||||
uint8_t fw_seriallink_options[4];
|
||||
uint16_t fw_seriallink_options24[4];
|
||||
|
||||
uint8_t mpi_version[4];
|
||||
uint32_t mpi_capabilities;
|
||||
|
||||
/* Firmware dump information. */
|
||||
struct qla2xxx_fw_dump *fw_dump;
|
||||
uint32_t fw_dump_len;
|
||||
@ -2480,6 +2484,7 @@ struct qla_hw_data {
|
||||
dma_addr_t eft_dma;
|
||||
void *eft;
|
||||
|
||||
uint32_t chain_offset;
|
||||
struct dentry *dfs_dir;
|
||||
struct dentry *dfs_fce;
|
||||
dma_addr_t fce_dma;
|
||||
@ -2489,10 +2494,6 @@ struct qla_hw_data {
|
||||
uint64_t fce_wr, fce_rd;
|
||||
struct mutex fce_mutex;
|
||||
|
||||
uint32_t hw_event_start;
|
||||
uint32_t hw_event_ptr;
|
||||
uint32_t hw_event_pause_errors;
|
||||
|
||||
uint32_t pci_attr;
|
||||
uint16_t chip_revision;
|
||||
|
||||
@ -2522,6 +2523,12 @@ struct qla_hw_data {
|
||||
uint8_t fcode_revision[16];
|
||||
uint32_t fw_revision[4];
|
||||
|
||||
/* Offsets for flash/nvram access (set to ~0 if not used). */
|
||||
uint32_t flash_conf_off;
|
||||
uint32_t flash_data_off;
|
||||
uint32_t nvram_conf_off;
|
||||
uint32_t nvram_data_off;
|
||||
|
||||
uint32_t fdt_wrt_disable;
|
||||
uint32_t fdt_erase_cmd;
|
||||
uint32_t fdt_block_size;
|
||||
@ -2533,7 +2540,6 @@ struct qla_hw_data {
|
||||
uint32_t flt_region_boot;
|
||||
uint32_t flt_region_fw;
|
||||
uint32_t flt_region_vpd_nvram;
|
||||
uint32_t flt_region_hw_event;
|
||||
uint32_t flt_region_npiv_conf;
|
||||
|
||||
/* Needed for BEACON */
|
||||
@ -2737,6 +2743,7 @@ typedef struct scsi_qla_host {
|
||||
#define OPTROM_SIZE_2322 0x100000
|
||||
#define OPTROM_SIZE_24XX 0x100000
|
||||
#define OPTROM_SIZE_25XX 0x200000
|
||||
#define OPTROM_SIZE_81XX 0x400000
|
||||
|
||||
#include "qla_gbl.h"
|
||||
#include "qla_dbg.h"
|
||||
|
@ -113,7 +113,8 @@ int
|
||||
qla2x00_dfs_setup(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
if (!IS_QLA25XX(ha))
|
||||
|
||||
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
|
||||
goto out;
|
||||
if (!ha->fce)
|
||||
goto out;
|
||||
|
@ -1215,9 +1215,10 @@ struct qla_fdt_layout {
|
||||
|
||||
struct qla_flt_location {
|
||||
uint8_t sig[4];
|
||||
uint32_t start_lo;
|
||||
uint32_t start_hi;
|
||||
uint16_t unused;
|
||||
uint16_t start_lo;
|
||||
uint16_t start_hi;
|
||||
uint8_t version;
|
||||
uint8_t unused[5];
|
||||
uint16_t checksum;
|
||||
};
|
||||
|
||||
@ -1390,4 +1391,291 @@ struct access_chip_rsp_84xx {
|
||||
|
||||
uint32_t reserved[12];
|
||||
};
|
||||
|
||||
/* 81XX Support **************************************************************/
|
||||
|
||||
#define MBA_DCBX_START 0x8016
|
||||
#define MBA_DCBX_COMPLETE 0x8030
|
||||
#define MBA_FCF_CONF_ERR 0x8031
|
||||
#define MBA_DCBX_PARAM_UPDATE 0x8032
|
||||
#define MBA_IDC_COMPLETE 0x8100
|
||||
#define MBA_IDC_NOTIFY 0x8101
|
||||
#define MBA_IDC_TIME_EXT 0x8102
|
||||
|
||||
struct nvram_81xx {
|
||||
/* NVRAM header. */
|
||||
uint8_t id[4];
|
||||
uint16_t nvram_version;
|
||||
uint16_t reserved_0;
|
||||
|
||||
/* Firmware Initialization Control Block. */
|
||||
uint16_t version;
|
||||
uint16_t reserved_1;
|
||||
uint16_t frame_payload_size;
|
||||
uint16_t execution_throttle;
|
||||
uint16_t exchange_count;
|
||||
uint16_t reserved_2;
|
||||
|
||||
uint8_t port_name[WWN_SIZE];
|
||||
uint8_t node_name[WWN_SIZE];
|
||||
|
||||
uint16_t login_retry_count;
|
||||
uint16_t reserved_3;
|
||||
uint16_t interrupt_delay_timer;
|
||||
uint16_t login_timeout;
|
||||
|
||||
uint32_t firmware_options_1;
|
||||
uint32_t firmware_options_2;
|
||||
uint32_t firmware_options_3;
|
||||
|
||||
uint16_t reserved_4[4];
|
||||
|
||||
/* Offset 64. */
|
||||
uint8_t enode_mac[6];
|
||||
uint16_t reserved_5[5];
|
||||
|
||||
/* Offset 80. */
|
||||
uint16_t reserved_6[24];
|
||||
|
||||
/* Offset 128. */
|
||||
uint16_t reserved_7[64];
|
||||
|
||||
/*
|
||||
* BIT 0 = Enable spinup delay
|
||||
* BIT 1 = Disable BIOS
|
||||
* BIT 2 = Enable Memory Map BIOS
|
||||
* BIT 3 = Enable Selectable Boot
|
||||
* BIT 4 = Disable RISC code load
|
||||
* BIT 5 = Disable Serdes
|
||||
* BIT 6 = Opt boot mode
|
||||
* BIT 7 = Interrupt enable
|
||||
*
|
||||
* BIT 8 = EV Control enable
|
||||
* BIT 9 = Enable lip reset
|
||||
* BIT 10 = Enable lip full login
|
||||
* BIT 11 = Enable target reset
|
||||
* BIT 12 = Stop firmware
|
||||
* BIT 13 = Enable nodename option
|
||||
* BIT 14 = Default WWPN valid
|
||||
* BIT 15 = Enable alternate WWN
|
||||
*
|
||||
* BIT 16 = CLP LUN string
|
||||
* BIT 17 = CLP Target string
|
||||
* BIT 18 = CLP BIOS enable string
|
||||
* BIT 19 = CLP Serdes string
|
||||
* BIT 20 = CLP WWPN string
|
||||
* BIT 21 = CLP WWNN string
|
||||
* BIT 22 =
|
||||
* BIT 23 =
|
||||
* BIT 24 = Keep WWPN
|
||||
* BIT 25 = Temp WWPN
|
||||
* BIT 26-31 =
|
||||
*/
|
||||
uint32_t host_p;
|
||||
|
||||
uint8_t alternate_port_name[WWN_SIZE];
|
||||
uint8_t alternate_node_name[WWN_SIZE];
|
||||
|
||||
uint8_t boot_port_name[WWN_SIZE];
|
||||
uint16_t boot_lun_number;
|
||||
uint16_t reserved_8;
|
||||
|
||||
uint8_t alt1_boot_port_name[WWN_SIZE];
|
||||
uint16_t alt1_boot_lun_number;
|
||||
uint16_t reserved_9;
|
||||
|
||||
uint8_t alt2_boot_port_name[WWN_SIZE];
|
||||
uint16_t alt2_boot_lun_number;
|
||||
uint16_t reserved_10;
|
||||
|
||||
uint8_t alt3_boot_port_name[WWN_SIZE];
|
||||
uint16_t alt3_boot_lun_number;
|
||||
uint16_t reserved_11;
|
||||
|
||||
/*
|
||||
* BIT 0 = Selective Login
|
||||
* BIT 1 = Alt-Boot Enable
|
||||
* BIT 2 = Reserved
|
||||
* BIT 3 = Boot Order List
|
||||
* BIT 4 = Reserved
|
||||
* BIT 5 = Selective LUN
|
||||
* BIT 6 = Reserved
|
||||
* BIT 7-31 =
|
||||
*/
|
||||
uint32_t efi_parameters;
|
||||
|
||||
uint8_t reset_delay;
|
||||
uint8_t reserved_12;
|
||||
uint16_t reserved_13;
|
||||
|
||||
uint16_t boot_id_number;
|
||||
uint16_t reserved_14;
|
||||
|
||||
uint16_t max_luns_per_target;
|
||||
uint16_t reserved_15;
|
||||
|
||||
uint16_t port_down_retry_count;
|
||||
uint16_t link_down_timeout;
|
||||
|
||||
/* FCode parameters. */
|
||||
uint16_t fcode_parameter;
|
||||
|
||||
uint16_t reserved_16[3];
|
||||
|
||||
/* Offset 352. */
|
||||
uint8_t reserved_17[4];
|
||||
uint16_t reserved_18[5];
|
||||
uint8_t reserved_19[2];
|
||||
uint16_t reserved_20[8];
|
||||
|
||||
/* Offset 384. */
|
||||
uint8_t reserved_21[16];
|
||||
uint16_t reserved_22[8];
|
||||
|
||||
/* Offset 416. */
|
||||
uint16_t reserved_23[32];
|
||||
|
||||
/* Offset 480. */
|
||||
uint8_t model_name[16];
|
||||
|
||||
/* Offset 496. */
|
||||
uint16_t feature_mask_l;
|
||||
uint16_t feature_mask_h;
|
||||
uint16_t reserved_24[2];
|
||||
|
||||
uint16_t subsystem_vendor_id;
|
||||
uint16_t subsystem_device_id;
|
||||
|
||||
uint32_t checksum;
|
||||
};
|
||||
|
||||
/*
|
||||
* ISP Initialization Control Block.
|
||||
* Little endian except where noted.
|
||||
*/
|
||||
#define ICB_VERSION 1
|
||||
struct init_cb_81xx {
|
||||
uint16_t version;
|
||||
uint16_t reserved_1;
|
||||
|
||||
uint16_t frame_payload_size;
|
||||
uint16_t execution_throttle;
|
||||
uint16_t exchange_count;
|
||||
|
||||
uint16_t reserved_2;
|
||||
|
||||
uint8_t port_name[WWN_SIZE]; /* Big endian. */
|
||||
uint8_t node_name[WWN_SIZE]; /* Big endian. */
|
||||
|
||||
uint16_t response_q_inpointer;
|
||||
uint16_t request_q_outpointer;
|
||||
|
||||
uint16_t login_retry_count;
|
||||
|
||||
uint16_t prio_request_q_outpointer;
|
||||
|
||||
uint16_t response_q_length;
|
||||
uint16_t request_q_length;
|
||||
|
||||
uint16_t reserved_3;
|
||||
|
||||
uint16_t prio_request_q_length;
|
||||
|
||||
uint32_t request_q_address[2];
|
||||
uint32_t response_q_address[2];
|
||||
uint32_t prio_request_q_address[2];
|
||||
|
||||
uint8_t reserved_4[8];
|
||||
|
||||
uint16_t atio_q_inpointer;
|
||||
uint16_t atio_q_length;
|
||||
uint32_t atio_q_address[2];
|
||||
|
||||
uint16_t interrupt_delay_timer; /* 100us increments. */
|
||||
uint16_t login_timeout;
|
||||
|
||||
/*
|
||||
* BIT 0-3 = Reserved
|
||||
* BIT 4 = Enable Target Mode
|
||||
* BIT 5 = Disable Initiator Mode
|
||||
* BIT 6 = Reserved
|
||||
* BIT 7 = Reserved
|
||||
*
|
||||
* BIT 8-13 = Reserved
|
||||
* BIT 14 = Node Name Option
|
||||
* BIT 15-31 = Reserved
|
||||
*/
|
||||
uint32_t firmware_options_1;
|
||||
|
||||
/*
|
||||
* BIT 0 = Operation Mode bit 0
|
||||
* BIT 1 = Operation Mode bit 1
|
||||
* BIT 2 = Operation Mode bit 2
|
||||
* BIT 3 = Operation Mode bit 3
|
||||
* BIT 4-7 = Reserved
|
||||
*
|
||||
* BIT 8 = Enable Class 2
|
||||
* BIT 9 = Enable ACK0
|
||||
* BIT 10 = Reserved
|
||||
* BIT 11 = Enable FC-SP Security
|
||||
* BIT 12 = FC Tape Enable
|
||||
* BIT 13 = Reserved
|
||||
* BIT 14 = Enable Target PRLI Control
|
||||
* BIT 15-31 = Reserved
|
||||
*/
|
||||
uint32_t firmware_options_2;
|
||||
|
||||
/*
|
||||
* BIT 0-3 = Reserved
|
||||
* BIT 4 = FCP RSP Payload bit 0
|
||||
* BIT 5 = FCP RSP Payload bit 1
|
||||
* BIT 6 = Enable Receive Out-of-Order data frame handling
|
||||
* BIT 7 = Reserved
|
||||
*
|
||||
* BIT 8 = Reserved
|
||||
* BIT 9 = Enable Out-of-Order FCP_XFER_RDY relative offset handling
|
||||
* BIT 10-16 = Reserved
|
||||
* BIT 17 = Enable multiple FCFs
|
||||
* BIT 18-20 = MAC addressing mode
|
||||
* BIT 21-25 = Ethernet data rate
|
||||
* BIT 26 = Enable ethernet header rx IOCB for ATIO q
|
||||
* BIT 27 = Enable ethernet header rx IOCB for response q
|
||||
* BIT 28 = SPMA selection bit 0
|
||||
* BIT 28 = SPMA selection bit 1
|
||||
* BIT 30-31 = Reserved
|
||||
*/
|
||||
uint32_t firmware_options_3;
|
||||
|
||||
uint8_t reserved_5[8];
|
||||
|
||||
uint8_t enode_mac[6];
|
||||
|
||||
uint8_t reserved_6[10];
|
||||
};
|
||||
|
||||
struct mid_init_cb_81xx {
|
||||
struct init_cb_81xx init_cb;
|
||||
|
||||
uint16_t count;
|
||||
uint16_t options;
|
||||
|
||||
struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
|
||||
};
|
||||
|
||||
#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
|
||||
#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
|
||||
|
||||
/* 81XX Flash locations -- occupies second 2MB region. */
|
||||
#define FA_BOOT_CODE_ADDR_81 0x80000
|
||||
#define FA_RISC_CODE_ADDR_81 0xA0000
|
||||
#define FA_FW_AREA_ADDR_81 0xC0000
|
||||
#define FA_VPD_NVRAM_ADDR_81 0xD0000
|
||||
#define FA_FEATURE_ADDR_81 0xD4000
|
||||
#define FA_FLASH_DESCR_ADDR_81 0xD8000
|
||||
#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
|
||||
#define FA_HW_EVENT0_ADDR_81 0xDC000
|
||||
#define FA_HW_EVENT1_ADDR_81 0xDC400
|
||||
#define FA_NPIV_CONF0_ADDR_81 0xD1000
|
||||
#define FA_NPIV_CONF1_ADDR_81 0xD2000
|
||||
|
||||
#endif
|
||||
|
@ -28,8 +28,10 @@ extern void qla2x00_reset_adapter(struct scsi_qla_host *);
|
||||
extern void qla24xx_reset_adapter(struct scsi_qla_host *);
|
||||
extern int qla2x00_nvram_config(struct scsi_qla_host *);
|
||||
extern int qla24xx_nvram_config(struct scsi_qla_host *);
|
||||
extern int qla81xx_nvram_config(struct scsi_qla_host *);
|
||||
extern void qla2x00_update_fw_options(struct scsi_qla_host *);
|
||||
extern void qla24xx_update_fw_options(scsi_qla_host_t *);
|
||||
extern void qla81xx_update_fw_options(scsi_qla_host_t *);
|
||||
extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
|
||||
extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
|
||||
|
||||
@ -69,8 +71,6 @@ extern int qla2x00_loop_reset(scsi_qla_host_t *);
|
||||
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
|
||||
extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
|
||||
fc_host_event_code, u32);
|
||||
extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
|
||||
uint16_t, uint16_t);
|
||||
|
||||
extern void qla2x00_abort_fcport_cmds(fc_port_t *);
|
||||
extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
|
||||
@ -143,7 +143,7 @@ qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
|
||||
|
||||
extern void
|
||||
qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *,
|
||||
uint16_t *, uint16_t *, uint16_t *, uint32_t *);
|
||||
uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *);
|
||||
|
||||
extern int
|
||||
qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
|
||||
@ -317,9 +317,6 @@ extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
|
||||
extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
|
||||
extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
|
||||
|
||||
extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
|
||||
uint16_t, uint16_t);
|
||||
|
||||
extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
|
||||
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
|
||||
|
||||
@ -332,6 +329,7 @@ extern void qla2100_fw_dump(scsi_qla_host_t *, int);
|
||||
extern void qla2300_fw_dump(scsi_qla_host_t *, int);
|
||||
extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
|
||||
extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
|
||||
extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
|
||||
extern void qla2x00_dump_regs(scsi_qla_host_t *);
|
||||
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
|
||||
|
||||
|
@ -1535,7 +1535,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
eiter = (struct ct_fdmi_port_attr *) (entries + size);
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
|
||||
eiter->len = __constant_cpu_to_be16(4 + 4);
|
||||
if (IS_QLA25XX(ha))
|
||||
if (IS_QLA81XX(ha))
|
||||
eiter->a.sup_speed = __constant_cpu_to_be32(
|
||||
FDMI_PORT_SPEED_10GB);
|
||||
else if (IS_QLA25XX(ha))
|
||||
eiter->a.sup_speed = __constant_cpu_to_be32(
|
||||
FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
|
||||
FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
|
||||
@ -1575,6 +1578,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
|
||||
eiter->a.cur_speed =
|
||||
__constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
|
||||
break;
|
||||
case PORT_SPEED_10GB:
|
||||
eiter->a.cur_speed =
|
||||
__constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
|
||||
break;
|
||||
default:
|
||||
eiter->a.cur_speed =
|
||||
__constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
|
||||
|
@ -552,10 +552,6 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
d2 = RD_REG_DWORD(®->ctrl_status);
|
||||
barrier();
|
||||
}
|
||||
if (cnt == 0 || hw_evt)
|
||||
qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
|
||||
RD_REG_WORD(®->mailbox1), RD_REG_WORD(®->mailbox2),
|
||||
RD_REG_WORD(®->mailbox3));
|
||||
|
||||
WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
|
||||
RD_REG_DWORD(®->hccr);
|
||||
@ -574,6 +570,9 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
if (IS_NOPOLLING_TYPE(ha))
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -779,16 +778,19 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
|
||||
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
|
||||
sizeof(uint16_t);
|
||||
} else if (IS_FWI2_CAPABLE(ha)) {
|
||||
fixed_size = IS_QLA25XX(ha) ?
|
||||
offsetof(struct qla25xx_fw_dump, ext_mem) :
|
||||
offsetof(struct qla24xx_fw_dump, ext_mem);
|
||||
if (IS_QLA81XX(ha))
|
||||
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
|
||||
else if (IS_QLA25XX(ha))
|
||||
fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
|
||||
else
|
||||
fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
|
||||
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
|
||||
sizeof(uint32_t);
|
||||
if (ha->mqenable)
|
||||
mq_size = sizeof(struct qla2xxx_mq_chain);
|
||||
|
||||
/* Allocate memory for Fibre Channel Event Buffer. */
|
||||
if (!IS_QLA25XX(ha))
|
||||
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
|
||||
goto try_eft;
|
||||
|
||||
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
|
||||
@ -851,7 +853,9 @@ cont_alloc:
|
||||
|
||||
dump_size = offsetof(struct qla2xxx_fw_dump, isp);
|
||||
dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
|
||||
mq_size + eft_size + fce_size;
|
||||
eft_size;
|
||||
ha->chain_offset = dump_size;
|
||||
dump_size += mq_size + fce_size;
|
||||
|
||||
ha->fw_dump = vmalloc(dump_size);
|
||||
if (!ha->fw_dump) {
|
||||
@ -987,7 +991,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
||||
&ha->fw_major_version,
|
||||
&ha->fw_minor_version,
|
||||
&ha->fw_subminor_version,
|
||||
&ha->fw_attributes, &ha->fw_memory_size);
|
||||
&ha->fw_attributes, &ha->fw_memory_size,
|
||||
ha->mpi_version, &ha->mpi_capabilities);
|
||||
ha->flags.npiv_supported = 0;
|
||||
if (IS_QLA2XXX_MIDTYPE(ha) &&
|
||||
(ha->fw_attributes & BIT_2)) {
|
||||
@ -1665,10 +1670,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
|
||||
qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
|
||||
"invalid -- WWPN) defaults.\n");
|
||||
|
||||
if (chksum)
|
||||
qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
|
||||
MSW(chksum), LSW(chksum));
|
||||
|
||||
/*
|
||||
* Set default initialization control block.
|
||||
*/
|
||||
@ -4255,3 +4256,269 @@ qla84xx_init_chip(scsi_qla_host_t *vha)
|
||||
return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
|
||||
QLA_SUCCESS;
|
||||
}
|
||||
|
||||
/* 81XX Support **************************************************************/
|
||||
|
||||
int
|
||||
qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
{
|
||||
int rval;
|
||||
struct init_cb_81xx *icb;
|
||||
struct nvram_81xx *nv;
|
||||
uint32_t *dptr;
|
||||
uint8_t *dptr1, *dptr2;
|
||||
uint32_t chksum;
|
||||
uint16_t cnt;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
rval = QLA_SUCCESS;
|
||||
icb = (struct init_cb_81xx *)ha->init_cb;
|
||||
nv = ha->nvram;
|
||||
|
||||
/* Determine NVRAM starting address. */
|
||||
ha->nvram_size = sizeof(struct nvram_81xx);
|
||||
ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
|
||||
ha->vpd_size = FA_NVRAM_VPD_SIZE;
|
||||
ha->vpd_base = FA_NVRAM_VPD0_ADDR;
|
||||
if (PCI_FUNC(ha->pdev->devfn) & 1) {
|
||||
ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
|
||||
ha->vpd_base = FA_NVRAM_VPD1_ADDR;
|
||||
}
|
||||
|
||||
/* Get VPD data into cache */
|
||||
ha->vpd = ha->nvram + VPD_OFFSET;
|
||||
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
|
||||
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
|
||||
|
||||
/* Get NVRAM data into cache and calculate checksum. */
|
||||
dptr = (uint32_t *)nv;
|
||||
ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
|
||||
ha->nvram_size);
|
||||
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
|
||||
chksum += le32_to_cpu(*dptr++);
|
||||
|
||||
DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
|
||||
DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
|
||||
|
||||
/* Bad NVRAM data, set defaults parameters. */
|
||||
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|
||||
|| nv->id[3] != ' ' ||
|
||||
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
|
||||
/* Reset NVRAM data. */
|
||||
qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
|
||||
"checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
|
||||
le16_to_cpu(nv->nvram_version));
|
||||
qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
|
||||
"invalid -- WWPN) defaults.\n");
|
||||
|
||||
/*
|
||||
* Set default initialization control block.
|
||||
*/
|
||||
memset(nv, 0, ha->nvram_size);
|
||||
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->frame_payload_size = __constant_cpu_to_le16(2048);
|
||||
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = __constant_cpu_to_le16(0);
|
||||
nv->port_name[0] = 0x21;
|
||||
nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
|
||||
nv->port_name[2] = 0x00;
|
||||
nv->port_name[3] = 0xe0;
|
||||
nv->port_name[4] = 0x8b;
|
||||
nv->port_name[5] = 0x1c;
|
||||
nv->port_name[6] = 0x55;
|
||||
nv->port_name[7] = 0x86;
|
||||
nv->node_name[0] = 0x20;
|
||||
nv->node_name[1] = 0x00;
|
||||
nv->node_name[2] = 0x00;
|
||||
nv->node_name[3] = 0xe0;
|
||||
nv->node_name[4] = 0x8b;
|
||||
nv->node_name[5] = 0x1c;
|
||||
nv->node_name[6] = 0x55;
|
||||
nv->node_name[7] = 0x86;
|
||||
nv->login_retry_count = __constant_cpu_to_le16(8);
|
||||
nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
|
||||
nv->login_timeout = __constant_cpu_to_le16(0);
|
||||
nv->firmware_options_1 =
|
||||
__constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
|
||||
nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
|
||||
nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
|
||||
nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
|
||||
nv->efi_parameters = __constant_cpu_to_le32(0);
|
||||
nv->reset_delay = 5;
|
||||
nv->max_luns_per_target = __constant_cpu_to_le16(128);
|
||||
nv->port_down_retry_count = __constant_cpu_to_le16(30);
|
||||
nv->link_down_timeout = __constant_cpu_to_le16(30);
|
||||
nv->enode_mac[0] = 0x01;
|
||||
nv->enode_mac[1] = 0x02;
|
||||
nv->enode_mac[2] = 0x03;
|
||||
nv->enode_mac[3] = 0x04;
|
||||
nv->enode_mac[4] = 0x05;
|
||||
nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
|
||||
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
/* Reset Initialization control block */
|
||||
memset(icb, 0, sizeof(struct init_cb_81xx));
|
||||
|
||||
/* Copy 1st segment. */
|
||||
dptr1 = (uint8_t *)icb;
|
||||
dptr2 = (uint8_t *)&nv->version;
|
||||
cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
|
||||
while (cnt--)
|
||||
*dptr1++ = *dptr2++;
|
||||
|
||||
icb->login_retry_count = nv->login_retry_count;
|
||||
|
||||
/* Copy 2nd segment. */
|
||||
dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
|
||||
dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
|
||||
cnt = (uint8_t *)&icb->reserved_5 -
|
||||
(uint8_t *)&icb->interrupt_delay_timer;
|
||||
while (cnt--)
|
||||
*dptr1++ = *dptr2++;
|
||||
|
||||
memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
|
||||
/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
|
||||
if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
|
||||
icb->enode_mac[0] = 0x01;
|
||||
icb->enode_mac[1] = 0x02;
|
||||
icb->enode_mac[2] = 0x03;
|
||||
icb->enode_mac[3] = 0x04;
|
||||
icb->enode_mac[4] = 0x05;
|
||||
icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup driver NVRAM options.
|
||||
*/
|
||||
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
|
||||
"QLE81XX");
|
||||
|
||||
/* Use alternate WWN? */
|
||||
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
|
||||
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
|
||||
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
|
||||
}
|
||||
|
||||
/* Prepare nodename */
|
||||
if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
|
||||
/*
|
||||
* Firmware will apply the following mask if the nodename was
|
||||
* not provided.
|
||||
*/
|
||||
memcpy(icb->node_name, icb->port_name, WWN_SIZE);
|
||||
icb->node_name[0] &= 0xF0;
|
||||
}
|
||||
|
||||
/* Set host adapter parameters. */
|
||||
ha->flags.disable_risc_code_load = 0;
|
||||
ha->flags.enable_lip_reset = 0;
|
||||
ha->flags.enable_lip_full_login =
|
||||
le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
|
||||
ha->flags.enable_target_reset =
|
||||
le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
|
||||
ha->flags.enable_led_scheme = 0;
|
||||
ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
|
||||
|
||||
ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
|
||||
(BIT_6 | BIT_5 | BIT_4)) >> 4;
|
||||
|
||||
/* save HBA serial number */
|
||||
ha->serial0 = icb->port_name[5];
|
||||
ha->serial1 = icb->port_name[6];
|
||||
ha->serial2 = icb->port_name[7];
|
||||
memcpy(vha->node_name, icb->node_name, WWN_SIZE);
|
||||
memcpy(vha->port_name, icb->port_name, WWN_SIZE);
|
||||
|
||||
icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
|
||||
ha->retry_count = le16_to_cpu(nv->login_retry_count);
|
||||
|
||||
/* Set minimum login_timeout to 4 seconds. */
|
||||
if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
|
||||
nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
|
||||
if (le16_to_cpu(nv->login_timeout) < 4)
|
||||
nv->login_timeout = __constant_cpu_to_le16(4);
|
||||
ha->login_timeout = le16_to_cpu(nv->login_timeout);
|
||||
icb->login_timeout = nv->login_timeout;
|
||||
|
||||
/* Set minimum RATOV to 100 tenths of a second. */
|
||||
ha->r_a_tov = 100;
|
||||
|
||||
ha->loop_reset_delay = nv->reset_delay;
|
||||
|
||||
/* Link Down Timeout = 0:
|
||||
*
|
||||
* When Port Down timer expires we will start returning
|
||||
* I/O's to OS with "DID_NO_CONNECT".
|
||||
*
|
||||
* Link Down Timeout != 0:
|
||||
*
|
||||
* The driver waits for the link to come up after link down
|
||||
* before returning I/Os to OS with "DID_NO_CONNECT".
|
||||
*/
|
||||
if (le16_to_cpu(nv->link_down_timeout) == 0) {
|
||||
ha->loop_down_abort_time =
|
||||
(LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
|
||||
} else {
|
||||
ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
|
||||
ha->loop_down_abort_time =
|
||||
(LOOP_DOWN_TIME - ha->link_down_timeout);
|
||||
}
|
||||
|
||||
/* Need enough time to try and get the port back. */
|
||||
ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
|
||||
if (qlport_down_retry)
|
||||
ha->port_down_retry_count = qlport_down_retry;
|
||||
|
||||
/* Set login_retry_count */
|
||||
ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
|
||||
if (ha->port_down_retry_count ==
|
||||
le16_to_cpu(nv->port_down_retry_count) &&
|
||||
ha->port_down_retry_count > 3)
|
||||
ha->login_retry_count = ha->port_down_retry_count;
|
||||
else if (ha->port_down_retry_count > (int)ha->login_retry_count)
|
||||
ha->login_retry_count = ha->port_down_retry_count;
|
||||
if (ql2xloginretrycount)
|
||||
ha->login_retry_count = ql2xloginretrycount;
|
||||
|
||||
/* Enable ZIO. */
|
||||
if (!vha->flags.init_done) {
|
||||
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
|
||||
(BIT_3 | BIT_2 | BIT_1 | BIT_0);
|
||||
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
|
||||
le16_to_cpu(icb->interrupt_delay_timer): 2;
|
||||
}
|
||||
icb->firmware_options_2 &= __constant_cpu_to_le32(
|
||||
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
|
||||
vha->flags.process_response_queue = 0;
|
||||
if (ha->zio_mode != QLA_ZIO_DISABLED) {
|
||||
ha->zio_mode = QLA_ZIO_MODE_6;
|
||||
|
||||
DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
|
||||
"(%d us).\n", vha->host_no, ha->zio_mode,
|
||||
ha->zio_timer * 100));
|
||||
qla_printk(KERN_INFO, ha,
|
||||
"ZIO mode %d enabled; timer delay (%d us).\n",
|
||||
ha->zio_mode, ha->zio_timer * 100);
|
||||
|
||||
icb->firmware_options_2 |= cpu_to_le32(
|
||||
(uint32_t)ha->zio_mode);
|
||||
icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
|
||||
vha->flags.process_response_queue = 1;
|
||||
}
|
||||
|
||||
if (rval) {
|
||||
DEBUG2_3(printk(KERN_WARNING
|
||||
"scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
|
||||
}
|
||||
return (rval);
|
||||
}
|
||||
|
||||
void
|
||||
qla81xx_update_fw_options(scsi_qla_host_t *ha)
|
||||
{
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
|
||||
return;
|
||||
}
|
||||
|
||||
vha = sp->vha;
|
||||
vha = sp->fcport->vha;
|
||||
req = sp->que;
|
||||
|
||||
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
|
||||
@ -234,7 +234,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
|
||||
return;
|
||||
}
|
||||
|
||||
vha = sp->vha;
|
||||
vha = sp->fcport->vha;
|
||||
req = sp->que;
|
||||
|
||||
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
|
||||
@ -294,7 +294,7 @@ qla2x00_start_scsi(srb_t *sp)
|
||||
|
||||
/* Setup device pointers. */
|
||||
ret = 0;
|
||||
vha = sp->vha;
|
||||
vha = sp->fcport->vha;
|
||||
ha = vha->hw;
|
||||
reg = &ha->iobase->isp;
|
||||
cmd = sp->cmd;
|
||||
@ -353,7 +353,6 @@ qla2x00_start_scsi(srb_t *sp)
|
||||
/* Build command packet */
|
||||
req->current_outstanding_cmd = handle;
|
||||
req->outstanding_cmds[handle] = sp;
|
||||
sp->vha = vha;
|
||||
sp->que = req;
|
||||
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
|
||||
req->cnt -= req_cnt;
|
||||
@ -656,7 +655,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
|
||||
return;
|
||||
}
|
||||
|
||||
vha = sp->vha;
|
||||
vha = sp->fcport->vha;
|
||||
req = sp->que;
|
||||
|
||||
/* Set transfer direction */
|
||||
@ -723,7 +722,7 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
struct req_que *req = NULL;
|
||||
struct rsp_que *rsp = NULL;
|
||||
struct scsi_cmnd *cmd = sp->cmd;
|
||||
struct scsi_qla_host *vha = sp->vha;
|
||||
struct scsi_qla_host *vha = sp->fcport->vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint16_t que_id;
|
||||
|
||||
@ -791,7 +790,6 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
/* Build command packet. */
|
||||
req->current_outstanding_cmd = handle;
|
||||
req->outstanding_cmds[handle] = sp;
|
||||
sp->vha = vha;
|
||||
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
|
||||
req->cnt -= req_cnt;
|
||||
|
||||
|
@ -275,7 +275,7 @@ void
|
||||
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
{
|
||||
#define LS_UNKNOWN 2
|
||||
static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
|
||||
static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
|
||||
char *link_speed;
|
||||
uint16_t handle_cnt;
|
||||
uint16_t cnt;
|
||||
@ -288,6 +288,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
|
||||
/* Setup to process RIO completion. */
|
||||
handle_cnt = 0;
|
||||
if (IS_QLA81XX(ha))
|
||||
goto skip_rio;
|
||||
switch (mb[0]) {
|
||||
case MBA_SCSI_COMPLETION:
|
||||
handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
|
||||
@ -339,7 +341,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
skip_rio:
|
||||
switch (mb[0]) {
|
||||
case MBA_SCSI_COMPLETION: /* Fast Post */
|
||||
if (!vha->flags.online)
|
||||
@ -362,7 +364,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
|
||||
mb[1], mb[2], mb[3]);
|
||||
|
||||
qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
|
||||
ha->isp_ops->fw_dump(vha, 1);
|
||||
|
||||
if (IS_FWI2_CAPABLE(ha)) {
|
||||
@ -387,7 +388,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
vha->host_no));
|
||||
qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
|
||||
|
||||
qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
break;
|
||||
|
||||
@ -396,7 +396,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
vha->host_no));
|
||||
qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
|
||||
|
||||
qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
break;
|
||||
|
||||
@ -436,6 +435,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
link_speed = link_speeds[LS_UNKNOWN];
|
||||
if (mb[1] < 5)
|
||||
link_speed = link_speeds[mb[1]];
|
||||
else if (mb[1] == 0x13)
|
||||
link_speed = link_speeds[5];
|
||||
ha->link_data_rate = mb[1];
|
||||
}
|
||||
|
||||
@ -495,12 +496,17 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
|
||||
break;
|
||||
|
||||
/* case MBA_DCBX_COMPLETE: */
|
||||
case MBA_POINT_TO_POINT: /* Point-to-Point */
|
||||
if (IS_QLA2100(ha))
|
||||
break;
|
||||
|
||||
DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
|
||||
vha->host_no));
|
||||
if (IS_QLA81XX(ha))
|
||||
DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
|
||||
"%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
|
||||
else
|
||||
DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
|
||||
"received.\n", vha->host_no));
|
||||
|
||||
/*
|
||||
* Until there's a transition from loop down to loop up, treat
|
||||
@ -641,10 +647,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
|
||||
/* case MBA_RIO_RESPONSE: */
|
||||
case MBA_ZIO_RESPONSE:
|
||||
DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
|
||||
vha->host_no));
|
||||
DEBUG(printk(KERN_INFO
|
||||
"scsi(%ld): [R|Z]IO update completion.\n",
|
||||
DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
|
||||
vha->host_no));
|
||||
|
||||
if (IS_FWI2_CAPABLE(ha))
|
||||
@ -698,6 +701,35 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
|
||||
break;
|
||||
case MBA_DCBX_START:
|
||||
DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
|
||||
vha->host_no, mb[1], mb[2], mb[3]));
|
||||
break;
|
||||
case MBA_DCBX_PARAM_UPDATE:
|
||||
DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
|
||||
"%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
|
||||
break;
|
||||
case MBA_FCF_CONF_ERR:
|
||||
DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
|
||||
"%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
|
||||
break;
|
||||
case MBA_IDC_COMPLETE:
|
||||
DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
|
||||
"Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2],
|
||||
mb[3]));
|
||||
break;
|
||||
case MBA_IDC_NOTIFY:
|
||||
DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
|
||||
"Request Notification -- %04x %04x %04x\n", vha->host_no,
|
||||
mb[1], mb[2], mb[3]));
|
||||
/**** Mailbox registers 4 - 7 valid!!! */
|
||||
break;
|
||||
case MBA_IDC_TIME_EXT:
|
||||
DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
|
||||
"Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1],
|
||||
mb[2], mb[3]));
|
||||
/**** Mailbox registers 4 - 7 valid!!! */
|
||||
break;
|
||||
}
|
||||
|
||||
if (!vha->vp_idx && ha->num_vhosts)
|
||||
@ -1510,7 +1542,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
|
||||
if (!IS_QLA25XX(ha))
|
||||
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
|
||||
return;
|
||||
|
||||
rval = QLA_SUCCESS;
|
||||
@ -1590,12 +1622,6 @@ qla24xx_intr_handler(int irq, void *dev_id)
|
||||
if (pci_channel_offline(ha->pdev))
|
||||
break;
|
||||
|
||||
if (ha->hw_event_pause_errors == 0)
|
||||
qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
|
||||
0, MSW(stat), LSW(stat));
|
||||
else if (ha->hw_event_pause_errors < 0xffffffff)
|
||||
ha->hw_event_pause_errors++;
|
||||
|
||||
hccr = RD_REG_DWORD(®->hccr);
|
||||
|
||||
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
|
||||
@ -1740,12 +1766,6 @@ qla24xx_msix_default(int irq, void *dev_id)
|
||||
if (pci_channel_offline(ha->pdev))
|
||||
break;
|
||||
|
||||
if (ha->hw_event_pause_errors == 0)
|
||||
qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
|
||||
0, MSW(stat), LSW(stat));
|
||||
else if (ha->hw_event_pause_errors < 0xffffffff)
|
||||
ha->hw_event_pause_errors++;
|
||||
|
||||
hccr = RD_REG_DWORD(®->hccr);
|
||||
|
||||
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
|
||||
@ -1944,7 +1964,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
|
||||
device_reg_t __iomem *reg = ha->iobase;
|
||||
|
||||
/* If possible, enable MSI-X. */
|
||||
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
|
||||
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
|
||||
!IS_QLA8432(ha) && !IS_QLA8001(ha))
|
||||
goto skip_msix;
|
||||
|
||||
if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
|
||||
@ -1979,7 +2000,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
|
||||
"MSI-X: Falling back-to INTa mode -- %d.\n", ret);
|
||||
skip_msix:
|
||||
|
||||
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
|
||||
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
|
||||
!IS_QLA8001(ha))
|
||||
goto skip_msi;
|
||||
|
||||
ret = pci_enable_msi(ha->pdev);
|
||||
@ -2000,6 +2022,12 @@ skip_msi:
|
||||
ha->flags.inta_enabled = 1;
|
||||
clear_risc_ints:
|
||||
|
||||
/*
|
||||
* FIXME: Noted that 8014s were being dropped during NK testing.
|
||||
* Timing deltas during MSI-X/INTa transitions?
|
||||
*/
|
||||
if (IS_QLA81XX(ha))
|
||||
goto fail;
|
||||
spin_lock_irq(&ha->hardware_lock);
|
||||
if (IS_FWI2_CAPABLE(ha)) {
|
||||
WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT);
|
||||
@ -2044,7 +2072,7 @@ qla2x00_get_rsp_host(struct rsp_que *rsp)
|
||||
if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
|
||||
sp = req->outstanding_cmds[pkt->handle];
|
||||
if (sp)
|
||||
vha = sp->vha;
|
||||
vha = sp->fcport->vha;
|
||||
}
|
||||
}
|
||||
if (!vha)
|
||||
|
@ -123,8 +123,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
|
||||
/* Wait for mbx cmd completion until timeout */
|
||||
|
||||
if (!abort_active && io_lock_on) {
|
||||
|
||||
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
|
||||
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
|
||||
|
||||
if (IS_FWI2_CAPABLE(ha))
|
||||
@ -218,7 +217,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
/* Clean up */
|
||||
ha->mcp = NULL;
|
||||
|
||||
if (abort_active || !io_lock_on) {
|
||||
if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
|
||||
DEBUG11(printk("%s(%ld): checking for additional resp "
|
||||
"interrupt.\n", __func__, base_vha->host_no));
|
||||
|
||||
@ -412,7 +411,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
|
||||
*/
|
||||
void
|
||||
qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
|
||||
uint16_t *subminor, uint16_t *attributes, uint32_t *memory)
|
||||
uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
|
||||
uint32_t *mpi_caps)
|
||||
{
|
||||
int rval;
|
||||
mbx_cmd_t mc;
|
||||
@ -423,6 +423,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
|
||||
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
|
||||
mcp->out_mb = MBX_0;
|
||||
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
if (IS_QLA81XX(vha->hw))
|
||||
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
|
||||
mcp->flags = 0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
@ -436,6 +438,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
|
||||
*memory = 0x1FFFF; /* Defaults to 128KB. */
|
||||
else
|
||||
*memory = (mcp->mb[5] << 16) | mcp->mb[4];
|
||||
if (IS_QLA81XX(vha->hw)) {
|
||||
mpi[0] = mcp->mb[10] >> 8;
|
||||
mpi[1] = mcp->mb[10] & 0xff;
|
||||
mpi[2] = mcp->mb[11] >> 8;
|
||||
mpi[3] = mcp->mb[11] & 0xff;
|
||||
*mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
|
||||
}
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
@ -568,7 +577,6 @@ int
|
||||
qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
|
||||
{
|
||||
int rval;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
@ -595,14 +603,6 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
|
||||
if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
|
||||
mcp->mb[7] != 0x2525)
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
if (rval == QLA_FUNCTION_FAILED) {
|
||||
struct device_reg_24xx __iomem *reg =
|
||||
&ha->iobase->isp24;
|
||||
|
||||
qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
|
||||
LSW(RD_REG_DWORD(®->hccr)),
|
||||
LSW(RD_REG_DWORD(®->istatus)));
|
||||
}
|
||||
}
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
@ -1363,7 +1363,13 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
|
||||
if (IS_FWI2_CAPABLE(vha->hw)) {
|
||||
if (IS_QLA81XX(vha->hw)) {
|
||||
/* Logout across all FCFs. */
|
||||
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
|
||||
mcp->mb[1] = BIT_1;
|
||||
mcp->mb[2] = 0;
|
||||
mcp->out_mb = MBX_2|MBX_1|MBX_0;
|
||||
} else if (IS_FWI2_CAPABLE(vha->hw)) {
|
||||
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
|
||||
mcp->mb[1] = BIT_6;
|
||||
mcp->mb[2] = 0;
|
||||
@ -1853,6 +1859,9 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (IS_QLA81XX(vha->hw))
|
||||
return QLA_SUCCESS;
|
||||
|
||||
DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
|
||||
vha->host_no));
|
||||
|
||||
@ -2512,7 +2521,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA25XX(vha->hw))
|
||||
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
|
||||
@ -3155,7 +3164,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
|
||||
mcp->mb[7] = LSW(MSD(rsp->dma));
|
||||
mcp->mb[5] = rsp->length;
|
||||
mcp->mb[11] = rsp->vp_idx;
|
||||
mcp->mb[14] = rsp->msix->vector;
|
||||
mcp->mb[14] = rsp->msix->entry;
|
||||
mcp->mb[13] = rsp->rid;
|
||||
|
||||
reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
|
||||
|
@ -614,8 +614,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
|
||||
req->vp_idx = vp_idx;
|
||||
req->qos = qos;
|
||||
|
||||
if (ha->rsp_q_map[rsp_que])
|
||||
if (ha->rsp_q_map[rsp_que]) {
|
||||
req->rsp = ha->rsp_q_map[rsp_que];
|
||||
req->rsp->req = req;
|
||||
}
|
||||
/* Use alternate PCI bus number */
|
||||
if (MSB(req->rid))
|
||||
options |= BIT_4;
|
||||
|
@ -404,26 +404,9 @@ static char *
|
||||
qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
|
||||
ha->fw_minor_version,
|
||||
ha->fw_subminor_version);
|
||||
|
||||
if (ha->fw_attributes & BIT_0)
|
||||
strcat(str, "[Class 2] ");
|
||||
if (ha->fw_attributes & BIT_1)
|
||||
strcat(str, "[IP] ");
|
||||
if (ha->fw_attributes & BIT_2)
|
||||
strcat(str, "[Multi-ID] ");
|
||||
if (ha->fw_attributes & BIT_3)
|
||||
strcat(str, "[SB-2] ");
|
||||
if (ha->fw_attributes & BIT_4)
|
||||
strcat(str, "[T10 CRC] ");
|
||||
if (ha->fw_attributes & BIT_5)
|
||||
strcat(str, "[VI] ");
|
||||
if (ha->fw_attributes & BIT_10)
|
||||
strcat(str, "[84XX] ");
|
||||
if (ha->fw_attributes & BIT_13)
|
||||
strcat(str, "[Experimental]");
|
||||
sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
|
||||
ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
|
||||
return str;
|
||||
}
|
||||
|
||||
@ -438,7 +421,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
|
||||
if (!sp)
|
||||
return sp;
|
||||
|
||||
sp->vha = vha;
|
||||
sp->fcport = fcport;
|
||||
sp->cmd = cmd;
|
||||
sp->que = ha->req_q_map[0];
|
||||
@ -1182,7 +1164,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
|
||||
continue;
|
||||
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
|
||||
sp = req->outstanding_cmds[cnt];
|
||||
if (sp && sp->vha == vha) {
|
||||
if (sp && sp->fcport->vha == vha) {
|
||||
req->outstanding_cmds[cnt] = NULL;
|
||||
sp->cmd->result = res;
|
||||
qla2x00_sp_compl(ha, sp);
|
||||
@ -1329,6 +1311,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
|
||||
unsigned long flags = 0;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
|
||||
if (IS_NOPOLLING_TYPE(ha))
|
||||
return;
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
ha->interrupts_on = 0;
|
||||
WRT_REG_DWORD(®->ictrl, 0);
|
||||
@ -1488,6 +1472,44 @@ static struct isp_operations qla25xx_isp_ops = {
|
||||
.rd_req_reg = qla24xx_rd_req_reg,
|
||||
};
|
||||
|
||||
static struct isp_operations qla81xx_isp_ops = {
|
||||
.pci_config = qla25xx_pci_config,
|
||||
.reset_chip = qla24xx_reset_chip,
|
||||
.chip_diag = qla24xx_chip_diag,
|
||||
.config_rings = qla24xx_config_rings,
|
||||
.reset_adapter = qla24xx_reset_adapter,
|
||||
.nvram_config = qla81xx_nvram_config,
|
||||
.update_fw_options = qla81xx_update_fw_options,
|
||||
.load_risc = qla24xx_load_risc,
|
||||
.pci_info_str = qla24xx_pci_info_str,
|
||||
.fw_version_str = qla24xx_fw_version_str,
|
||||
.intr_handler = qla24xx_intr_handler,
|
||||
.enable_intrs = qla24xx_enable_intrs,
|
||||
.disable_intrs = qla24xx_disable_intrs,
|
||||
.abort_command = qla24xx_abort_command,
|
||||
.target_reset = qla24xx_abort_target,
|
||||
.lun_reset = qla24xx_lun_reset,
|
||||
.fabric_login = qla24xx_login_fabric,
|
||||
.fabric_logout = qla24xx_fabric_logout,
|
||||
.calc_req_entries = NULL,
|
||||
.build_iocbs = NULL,
|
||||
.prep_ms_iocb = qla24xx_prep_ms_iocb,
|
||||
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
|
||||
.read_nvram = qla25xx_read_nvram_data,
|
||||
.write_nvram = qla25xx_write_nvram_data,
|
||||
.fw_dump = qla81xx_fw_dump,
|
||||
.beacon_on = qla24xx_beacon_on,
|
||||
.beacon_off = qla24xx_beacon_off,
|
||||
.beacon_blink = qla24xx_beacon_blink,
|
||||
.read_optrom = qla25xx_read_optrom_data,
|
||||
.write_optrom = qla24xx_write_optrom_data,
|
||||
.get_flash_version = qla24xx_get_flash_version,
|
||||
.start_scsi = qla24xx_start_scsi,
|
||||
.wrt_req_reg = qla24xx_wrt_req_reg,
|
||||
.wrt_rsp_reg = qla24xx_wrt_rsp_reg,
|
||||
.rd_req_reg = qla24xx_rd_req_reg,
|
||||
};
|
||||
|
||||
static inline void
|
||||
qla2x00_set_isp_flags(struct qla_hw_data *ha)
|
||||
{
|
||||
@ -1567,6 +1589,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
|
||||
ha->device_type |= DT_IIDMA;
|
||||
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
||||
break;
|
||||
case PCI_DEVICE_ID_QLOGIC_ISP8001:
|
||||
ha->device_type |= DT_ISP8001;
|
||||
ha->device_type |= DT_ZIO_SUPPORTED;
|
||||
ha->device_type |= DT_FWI2;
|
||||
ha->device_type |= DT_IIDMA;
|
||||
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1629,7 +1658,7 @@ skip_pio:
|
||||
|
||||
/* Determine queue resources */
|
||||
ha->max_queues = 1;
|
||||
if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha))
|
||||
if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
|
||||
goto mqiobase_exit;
|
||||
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
|
||||
pci_resource_len(ha->pdev, 3));
|
||||
@ -1706,7 +1735,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) {
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
sht = &qla24xx_driver_template;
|
||||
mem_only = 1;
|
||||
@ -1760,6 +1790,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
rsp_length = RESPONSE_ENTRY_CNT_2100;
|
||||
ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
|
||||
ha->gid_list_info_size = 4;
|
||||
ha->flash_conf_off = ~0;
|
||||
ha->flash_data_off = ~0;
|
||||
ha->nvram_conf_off = ~0;
|
||||
ha->nvram_data_off = ~0;
|
||||
ha->isp_ops = &qla2100_isp_ops;
|
||||
} else if (IS_QLA2200(ha)) {
|
||||
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
||||
@ -1767,6 +1801,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
rsp_length = RESPONSE_ENTRY_CNT_2100;
|
||||
ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
|
||||
ha->gid_list_info_size = 4;
|
||||
ha->flash_conf_off = ~0;
|
||||
ha->flash_data_off = ~0;
|
||||
ha->nvram_conf_off = ~0;
|
||||
ha->nvram_data_off = ~0;
|
||||
ha->isp_ops = &qla2100_isp_ops;
|
||||
} else if (IS_QLA23XX(ha)) {
|
||||
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
||||
@ -1776,6 +1814,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
ha->gid_list_info_size = 6;
|
||||
if (IS_QLA2322(ha) || IS_QLA6322(ha))
|
||||
ha->optrom_size = OPTROM_SIZE_2322;
|
||||
ha->flash_conf_off = ~0;
|
||||
ha->flash_data_off = ~0;
|
||||
ha->nvram_conf_off = ~0;
|
||||
ha->nvram_data_off = ~0;
|
||||
ha->isp_ops = &qla2300_isp_ops;
|
||||
} else if (IS_QLA24XX_TYPE(ha)) {
|
||||
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
||||
@ -1787,6 +1829,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
ha->optrom_size = OPTROM_SIZE_24XX;
|
||||
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
|
||||
ha->isp_ops = &qla24xx_isp_ops;
|
||||
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
|
||||
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
|
||||
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
|
||||
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
|
||||
} else if (IS_QLA25XX(ha)) {
|
||||
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
||||
req_length = REQUEST_ENTRY_CNT_24XX;
|
||||
@ -1797,6 +1843,23 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
ha->optrom_size = OPTROM_SIZE_25XX;
|
||||
ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
|
||||
ha->isp_ops = &qla25xx_isp_ops;
|
||||
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
|
||||
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
|
||||
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
|
||||
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
|
||||
} else if (IS_QLA81XX(ha)) {
|
||||
ha->mbx_count = MAILBOX_REGISTER_COUNT;
|
||||
req_length = REQUEST_ENTRY_CNT_24XX;
|
||||
rsp_length = RESPONSE_ENTRY_CNT_2300;
|
||||
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
|
||||
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
|
||||
ha->gid_list_info_size = 8;
|
||||
ha->optrom_size = OPTROM_SIZE_81XX;
|
||||
ha->isp_ops = &qla81xx_isp_ops;
|
||||
ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
|
||||
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
|
||||
ha->nvram_conf_off = ~0;
|
||||
ha->nvram_data_off = ~0;
|
||||
}
|
||||
|
||||
mutex_init(&ha->vport_lock);
|
||||
@ -2458,23 +2521,6 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
|
||||
return qla2x00_post_work(vha, e, 1);
|
||||
}
|
||||
|
||||
int
|
||||
qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
|
||||
uint16_t d2, uint16_t d3)
|
||||
{
|
||||
struct qla_work_evt *e;
|
||||
|
||||
e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
|
||||
if (!e)
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
e->u.hwe.code = code;
|
||||
e->u.hwe.d1 = d1;
|
||||
e->u.hwe.d2 = d2;
|
||||
e->u.hwe.d3 = d3;
|
||||
return qla2x00_post_work(vha, e, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
qla2x00_do_work(struct scsi_qla_host *vha)
|
||||
{
|
||||
@ -2492,10 +2538,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
|
||||
fc_host_post_event(vha->host, fc_get_event_number(),
|
||||
e->u.aen.code, e->u.aen.data);
|
||||
break;
|
||||
case QLA_EVT_HWE_LOG:
|
||||
qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
|
||||
e->u.hwe.d2, e->u.hwe.d3);
|
||||
break;
|
||||
}
|
||||
if (e->flags & QLA_EVT_FLAG_FREE)
|
||||
kfree(e);
|
||||
@ -2914,13 +2956,14 @@ qla2x00_timer(scsi_qla_host_t *vha)
|
||||
|
||||
/* Firmware interface routines. */
|
||||
|
||||
#define FW_BLOBS 6
|
||||
#define FW_BLOBS 7
|
||||
#define FW_ISP21XX 0
|
||||
#define FW_ISP22XX 1
|
||||
#define FW_ISP2300 2
|
||||
#define FW_ISP2322 3
|
||||
#define FW_ISP24XX 4
|
||||
#define FW_ISP25XX 5
|
||||
#define FW_ISP81XX 6
|
||||
|
||||
#define FW_FILE_ISP21XX "ql2100_fw.bin"
|
||||
#define FW_FILE_ISP22XX "ql2200_fw.bin"
|
||||
@ -2928,6 +2971,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
|
||||
#define FW_FILE_ISP2322 "ql2322_fw.bin"
|
||||
#define FW_FILE_ISP24XX "ql2400_fw.bin"
|
||||
#define FW_FILE_ISP25XX "ql2500_fw.bin"
|
||||
#define FW_FILE_ISP81XX "ql8100_fw.bin"
|
||||
|
||||
static DEFINE_MUTEX(qla_fw_lock);
|
||||
|
||||
@ -2938,6 +2982,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
|
||||
{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
|
||||
{ .name = FW_FILE_ISP24XX, },
|
||||
{ .name = FW_FILE_ISP25XX, },
|
||||
{ .name = FW_FILE_ISP81XX, },
|
||||
};
|
||||
|
||||
struct fw_blob *
|
||||
@ -2959,6 +3004,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
|
||||
blob = &qla_fw_blobs[FW_ISP24XX];
|
||||
} else if (IS_QLA25XX(ha)) {
|
||||
blob = &qla_fw_blobs[FW_ISP25XX];
|
||||
} else if (IS_QLA81XX(ha)) {
|
||||
blob = &qla_fw_blobs[FW_ISP81XX];
|
||||
}
|
||||
|
||||
mutex_lock(&qla_fw_lock);
|
||||
@ -3112,6 +3159,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
|
||||
{ 0 },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
|
||||
@ -3200,3 +3248,4 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
|
||||
MODULE_FIRMWARE(FW_FILE_ISP2322);
|
||||
MODULE_FIRMWARE(FW_FILE_ISP24XX);
|
||||
MODULE_FIRMWARE(FW_FILE_ISP25XX);
|
||||
MODULE_FIRMWARE(FW_FILE_ISP81XX);
|
||||
|
@ -425,27 +425,27 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
|
||||
#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
|
||||
|
||||
static inline uint32_t
|
||||
flash_conf_to_access_addr(uint32_t faddr)
|
||||
flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
|
||||
{
|
||||
return FARX_ACCESS_FLASH_CONF | faddr;
|
||||
return ha->flash_conf_off | faddr;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
flash_data_to_access_addr(uint32_t faddr)
|
||||
flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
|
||||
{
|
||||
return FARX_ACCESS_FLASH_DATA | faddr;
|
||||
return ha->flash_data_off | faddr;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
nvram_conf_to_access_addr(uint32_t naddr)
|
||||
nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
|
||||
{
|
||||
return FARX_ACCESS_NVRAM_CONF | naddr;
|
||||
return ha->nvram_conf_off | naddr;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
nvram_data_to_access_addr(uint32_t naddr)
|
||||
nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
|
||||
{
|
||||
return FARX_ACCESS_NVRAM_DATA | naddr;
|
||||
return ha->nvram_data_off | naddr;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
@ -481,10 +481,12 @@ qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
uint32_t dwords)
|
||||
{
|
||||
uint32_t i;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
/* Dword reads to flash. */
|
||||
for (i = 0; i < dwords; i++, faddr++)
|
||||
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
|
||||
flash_data_to_access_addr(faddr)));
|
||||
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
|
||||
flash_data_addr(ha, faddr)));
|
||||
|
||||
return dwptr;
|
||||
}
|
||||
@ -518,7 +520,7 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
|
||||
{
|
||||
uint32_t ids;
|
||||
|
||||
ids = qla24xx_read_flash_dword(ha, flash_data_to_access_addr(0xd03ab));
|
||||
ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
|
||||
*man_id = LSB(ids);
|
||||
*flash_id = MSB(ids);
|
||||
|
||||
@ -530,8 +532,7 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
|
||||
* Example: ATMEL 0x00 01 45 1F
|
||||
* Extract MFG and Dev ID from last two bytes.
|
||||
*/
|
||||
ids = qla24xx_read_flash_dword(ha,
|
||||
flash_data_to_access_addr(0xd009f));
|
||||
ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
|
||||
*man_id = LSB(ids);
|
||||
*flash_id = MSB(ids);
|
||||
}
|
||||
@ -555,9 +556,13 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
|
||||
|
||||
/* Begin with sane defaults. */
|
||||
loc = locations[0];
|
||||
*start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
|
||||
FA_FLASH_LAYOUT_ADDR;
|
||||
|
||||
*start = 0;
|
||||
if (IS_QLA24XX_TYPE(ha))
|
||||
*start = FA_FLASH_LAYOUT_ADDR_24;
|
||||
else if (IS_QLA25XX(ha))
|
||||
*start = FA_FLASH_LAYOUT_ADDR;
|
||||
else if (IS_QLA81XX(ha))
|
||||
*start = FA_FLASH_LAYOUT_ADDR_81;
|
||||
/* Begin with first PCI expansion ROM header. */
|
||||
buf = (uint8_t *)req->ring;
|
||||
dcode = (uint32_t *)req->ring;
|
||||
@ -618,6 +623,22 @@ static void
|
||||
qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
{
|
||||
const char *loc, *locations[] = { "DEF", "FLT" };
|
||||
const uint32_t def_fw[] =
|
||||
{ FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
|
||||
const uint32_t def_boot[] =
|
||||
{ FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 };
|
||||
const uint32_t def_vpd_nvram[] =
|
||||
{ FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 };
|
||||
const uint32_t def_fdt[] =
|
||||
{ FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR,
|
||||
FA_FLASH_DESCR_ADDR_81 };
|
||||
const uint32_t def_npiv_conf0[] =
|
||||
{ FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR,
|
||||
FA_NPIV_CONF0_ADDR_81 };
|
||||
const uint32_t def_npiv_conf1[] =
|
||||
{ FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
|
||||
FA_NPIV_CONF1_ADDR_81 };
|
||||
uint32_t def;
|
||||
uint16_t *wptr;
|
||||
uint16_t cnt, chksum;
|
||||
uint32_t start;
|
||||
@ -676,20 +697,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
case FLT_REG_FDT:
|
||||
ha->flt_region_fdt = start;
|
||||
break;
|
||||
case FLT_REG_HW_EVENT_0:
|
||||
if (!PCI_FUNC(ha->pdev->devfn))
|
||||
ha->flt_region_hw_event = start;
|
||||
break;
|
||||
case FLT_REG_HW_EVENT_1:
|
||||
if (PCI_FUNC(ha->pdev->devfn))
|
||||
ha->flt_region_hw_event = start;
|
||||
break;
|
||||
case FLT_REG_NPIV_CONF_0:
|
||||
if (!PCI_FUNC(ha->pdev->devfn))
|
||||
if (!(PCI_FUNC(ha->pdev->devfn) & 1))
|
||||
ha->flt_region_npiv_conf = start;
|
||||
break;
|
||||
case FLT_REG_NPIV_CONF_1:
|
||||
if (PCI_FUNC(ha->pdev->devfn))
|
||||
if (PCI_FUNC(ha->pdev->devfn) & 1)
|
||||
ha->flt_region_npiv_conf = start;
|
||||
break;
|
||||
}
|
||||
@ -699,22 +712,24 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
no_flash_data:
|
||||
/* Use hardcoded defaults. */
|
||||
loc = locations[0];
|
||||
ha->flt_region_fw = FA_RISC_CODE_ADDR;
|
||||
ha->flt_region_boot = FA_BOOT_CODE_ADDR;
|
||||
ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
|
||||
ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
|
||||
FA_FLASH_DESCR_ADDR;
|
||||
ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
|
||||
FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
|
||||
ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
|
||||
(IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
|
||||
(IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
|
||||
def = 0;
|
||||
if (IS_QLA24XX_TYPE(ha))
|
||||
def = 0;
|
||||
else if (IS_QLA25XX(ha))
|
||||
def = 1;
|
||||
else if (IS_QLA81XX(ha))
|
||||
def = 2;
|
||||
ha->flt_region_fw = def_fw[def];
|
||||
ha->flt_region_boot = def_boot[def];
|
||||
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
|
||||
ha->flt_region_fdt = def_fdt[def];
|
||||
ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
|
||||
def_npiv_conf0[def]: def_npiv_conf1[def];
|
||||
done:
|
||||
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
|
||||
"vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
|
||||
"vpd_nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x.\n", loc,
|
||||
ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
|
||||
ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
|
||||
ha->flt_region_npiv_conf));
|
||||
ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -757,14 +772,14 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
|
||||
mid = le16_to_cpu(fdt->man_id);
|
||||
fid = le16_to_cpu(fdt->id);
|
||||
ha->fdt_wrt_disable = fdt->wrt_disable_bits;
|
||||
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
|
||||
ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
|
||||
ha->fdt_block_size = le32_to_cpu(fdt->block_size);
|
||||
if (fdt->unprotect_sec_cmd) {
|
||||
ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0300 |
|
||||
ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
|
||||
fdt->unprotect_sec_cmd);
|
||||
ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
|
||||
flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
|
||||
flash_conf_to_access_addr(0x0336);
|
||||
flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
|
||||
flash_conf_addr(ha, 0x0336);
|
||||
}
|
||||
goto done;
|
||||
no_flash_data:
|
||||
@ -773,7 +788,7 @@ no_flash_data:
|
||||
mid = man_id;
|
||||
fid = flash_id;
|
||||
ha->fdt_wrt_disable = 0x9c;
|
||||
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
|
||||
ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8);
|
||||
switch (man_id) {
|
||||
case 0xbf: /* STT flash. */
|
||||
if (flash_id == 0x8e)
|
||||
@ -782,16 +797,16 @@ no_flash_data:
|
||||
ha->fdt_block_size = FLASH_BLK_SIZE_32K;
|
||||
|
||||
if (flash_id == 0x80)
|
||||
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0352);
|
||||
ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352);
|
||||
break;
|
||||
case 0x13: /* ST M25P80. */
|
||||
ha->fdt_block_size = FLASH_BLK_SIZE_64K;
|
||||
break;
|
||||
case 0x1f: /* Atmel 26DF081A. */
|
||||
ha->fdt_block_size = FLASH_BLK_SIZE_4K;
|
||||
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320);
|
||||
ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339);
|
||||
ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336);
|
||||
ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320);
|
||||
ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339);
|
||||
ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336);
|
||||
break;
|
||||
default:
|
||||
/* Default to 64 kb sector size. */
|
||||
@ -813,7 +828,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
|
||||
uint32_t flt_addr;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
|
||||
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
|
||||
return QLA_SUCCESS;
|
||||
|
||||
ret = qla2xxx_find_flt_start(vha, &flt_addr);
|
||||
@ -838,7 +853,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
|
||||
struct qla_npiv_entry *entry;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
|
||||
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
|
||||
return;
|
||||
|
||||
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
|
||||
@ -930,9 +945,9 @@ qla24xx_unprotect_flash(struct qla_hw_data *ha)
|
||||
return;
|
||||
|
||||
/* Disable flash write-protection. */
|
||||
qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
|
||||
qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
|
||||
/* Some flash parts need an additional zero-write to clear bits.*/
|
||||
qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
|
||||
qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -945,11 +960,10 @@ qla24xx_protect_flash(struct qla_hw_data *ha)
|
||||
goto skip_wrt_protect;
|
||||
|
||||
/* Enable flash write-protection and wait for completion. */
|
||||
qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101),
|
||||
qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
|
||||
ha->fdt_wrt_disable);
|
||||
for (cnt = 300; cnt &&
|
||||
qla24xx_read_flash_dword(ha,
|
||||
flash_conf_to_access_addr(0x005)) & BIT_0;
|
||||
qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
|
||||
cnt--) {
|
||||
udelay(10);
|
||||
}
|
||||
@ -977,7 +991,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
ret = QLA_SUCCESS;
|
||||
|
||||
/* Prepare burst-capable write on supported ISPs. */
|
||||
if (IS_QLA25XX(ha) && !(faddr & 0xfff) &&
|
||||
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
|
||||
dwords > OPTROM_BURST_DWORDS) {
|
||||
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
|
||||
&optrom_dma, GFP_KERNEL);
|
||||
@ -989,7 +1003,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
}
|
||||
|
||||
rest_addr = (ha->fdt_block_size >> 2) - 1;
|
||||
sec_mask = 0x80000 - (ha->fdt_block_size >> 2);
|
||||
sec_mask = (ha->optrom_size >> 2) - (ha->fdt_block_size >> 2);
|
||||
|
||||
qla24xx_unprotect_flash(ha);
|
||||
|
||||
@ -1024,13 +1038,13 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
*s = cpu_to_le32(*d);
|
||||
|
||||
ret = qla2x00_load_ram(vha, optrom_dma,
|
||||
flash_data_to_access_addr(faddr),
|
||||
flash_data_addr(ha, faddr),
|
||||
OPTROM_BURST_DWORDS);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to burst-write optrom segment "
|
||||
"(%x/%x/%llx).\n", ret,
|
||||
flash_data_to_access_addr(faddr),
|
||||
flash_data_addr(ha, faddr),
|
||||
(unsigned long long)optrom_dma);
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Reverting to slow-write.\n");
|
||||
@ -1047,7 +1061,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
}
|
||||
|
||||
ret = qla24xx_write_flash_dword(ha,
|
||||
flash_data_to_access_addr(faddr), cpu_to_le32(*dwptr));
|
||||
flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG9(printk("%s(%ld) Unable to program flash "
|
||||
"address=%x data=%x.\n", __func__,
|
||||
@ -1098,12 +1112,13 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t *dwptr;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
/* Dword reads to flash. */
|
||||
dwptr = (uint32_t *)buf;
|
||||
for (i = 0; i < bytes >> 2; i++, naddr++)
|
||||
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
|
||||
nvram_data_to_access_addr(naddr)));
|
||||
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
|
||||
nvram_data_addr(ha, naddr)));
|
||||
|
||||
return buf;
|
||||
}
|
||||
@ -1160,17 +1175,14 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
|
||||
RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */
|
||||
|
||||
/* Disable NVRAM write-protection. */
|
||||
qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
|
||||
0);
|
||||
qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
|
||||
0);
|
||||
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
|
||||
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
|
||||
|
||||
/* Dword writes to flash. */
|
||||
dwptr = (uint32_t *)buf;
|
||||
for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
|
||||
ret = qla24xx_write_flash_dword(ha,
|
||||
nvram_data_to_access_addr(naddr),
|
||||
cpu_to_le32(*dwptr));
|
||||
nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
|
||||
if (ret != QLA_SUCCESS) {
|
||||
DEBUG9(qla_printk("Unable to program nvram address=%x "
|
||||
"data=%x.\n", naddr, *dwptr));
|
||||
@ -1179,8 +1191,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
|
||||
}
|
||||
|
||||
/* Enable NVRAM write-protection. */
|
||||
qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
|
||||
0x8c);
|
||||
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
|
||||
|
||||
/* Disable flash write. */
|
||||
WRT_REG_DWORD(®->ctrl_status,
|
||||
@ -1202,8 +1213,7 @@ qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
|
||||
dwptr = (uint32_t *)buf;
|
||||
for (i = 0; i < bytes >> 2; i++, naddr++)
|
||||
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
|
||||
flash_data_to_access_addr(ha->flt_region_vpd_nvram |
|
||||
naddr)));
|
||||
flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
|
||||
|
||||
return buf;
|
||||
}
|
||||
@ -2246,12 +2256,12 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
|
||||
burst = left;
|
||||
|
||||
rval = qla2x00_dump_ram(vha, optrom_dma,
|
||||
flash_data_to_access_addr(faddr), burst);
|
||||
flash_data_addr(ha, faddr), burst);
|
||||
if (rval) {
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Unable to burst-read optrom segment "
|
||||
"(%x/%x/%llx).\n", rval,
|
||||
flash_data_to_access_addr(faddr),
|
||||
flash_data_addr(ha, faddr),
|
||||
(unsigned long long)optrom_dma);
|
||||
qla_printk(KERN_WARNING, ha,
|
||||
"Reverting to slow-read.\n");
|
||||
@ -2648,108 +2658,3 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
|
||||
{
|
||||
uint32_t d[2], faddr;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
/* Locate first empty entry. */
|
||||
for (;;) {
|
||||
if (ha->hw_event_ptr >=
|
||||
ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"HW event -- Log Full!\n"));
|
||||
return QLA_MEMORY_ALLOC_FAILED;
|
||||
}
|
||||
|
||||
qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
|
||||
faddr = flash_data_to_access_addr(ha->hw_event_ptr);
|
||||
ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
|
||||
if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
|
||||
d[1] == __constant_cpu_to_le32(0xffffffff)) {
|
||||
qla24xx_unprotect_flash(ha);
|
||||
|
||||
qla24xx_write_flash_dword(ha, faddr++,
|
||||
cpu_to_le32(jiffies));
|
||||
qla24xx_write_flash_dword(ha, faddr++, 0);
|
||||
qla24xx_write_flash_dword(ha, faddr++, *fdata++);
|
||||
qla24xx_write_flash_dword(ha, faddr++, *fdata);
|
||||
|
||||
qla24xx_protect_flash(ha);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
int
|
||||
qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
|
||||
uint16_t d2, uint16_t d3)
|
||||
{
|
||||
#define QMARK(a, b, c, d) \
|
||||
cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval;
|
||||
uint32_t marker[2], fdata[4];
|
||||
|
||||
if (ha->flt_region_hw_event == 0)
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"HW event -- code=%x, d1=%x, d2=%x, d3=%x.\n", code, d1, d2, d3));
|
||||
|
||||
/* If marker not already found, locate or write. */
|
||||
if (!ha->flags.hw_event_marker_found) {
|
||||
/* Create marker. */
|
||||
marker[0] = QMARK('L', ha->fw_major_version,
|
||||
ha->fw_minor_version, ha->fw_subminor_version);
|
||||
marker[1] = QMARK(QLA_DRIVER_MAJOR_VER, QLA_DRIVER_MINOR_VER,
|
||||
QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
|
||||
|
||||
/* Locate marker. */
|
||||
ha->hw_event_ptr = ha->flt_region_hw_event;
|
||||
for (;;) {
|
||||
qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
|
||||
4);
|
||||
if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
|
||||
fdata[1] == __constant_cpu_to_le32(0xffffffff))
|
||||
break;
|
||||
ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
|
||||
if (ha->hw_event_ptr >=
|
||||
ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"HW event -- Log Full!\n"));
|
||||
return QLA_MEMORY_ALLOC_FAILED;
|
||||
}
|
||||
if (fdata[2] == marker[0] && fdata[3] == marker[1]) {
|
||||
ha->flags.hw_event_marker_found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* No marker, write it. */
|
||||
if (!ha->flags.hw_event_marker_found) {
|
||||
rval = qla2xxx_hw_event_store(vha, marker);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"HW event -- Failed marker write=%x.!\n",
|
||||
rval));
|
||||
return rval;
|
||||
}
|
||||
ha->flags.hw_event_marker_found = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Store error. */
|
||||
fdata[0] = cpu_to_le32(code << 16 | d1);
|
||||
fdata[1] = cpu_to_le32(d2 << 16 | d3);
|
||||
rval = qla2xxx_hw_event_store(vha, fdata);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2(qla_printk(KERN_WARNING, ha,
|
||||
"HW event -- Failed error write=%x.!\n",
|
||||
rval));
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
@ -7,9 +7,9 @@
|
||||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "8.02.03-k1"
|
||||
#define QLA2XXX_VERSION "8.03.00-k1"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 8
|
||||
#define QLA_DRIVER_MINOR_VER 2
|
||||
#define QLA_DRIVER_PATCH_VER 3
|
||||
#define QLA_DRIVER_MINOR_VER 3
|
||||
#define QLA_DRIVER_PATCH_VER 0
|
||||
#define QLA_DRIVER_BETA_VER 0
|
||||
|
@ -237,8 +237,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev,
|
||||
rc->dev.parent = get_device(component_dev);
|
||||
rc->num = rd->component_count++;
|
||||
|
||||
snprintf(rc->dev.bus_id, sizeof(rc->dev.bus_id),
|
||||
"component-%d", rc->num);
|
||||
dev_set_name(&rc->dev, "component-%d", rc->num);
|
||||
list_add_tail(&rc->node, &rd->component_list);
|
||||
rc->dev.class = &raid_class.class;
|
||||
err = device_add(&rc->dev);
|
||||
|
@ -651,10 +651,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
||||
unsigned long timeout;
|
||||
int rtn = 0;
|
||||
|
||||
/*
|
||||
* We will use a queued command if possible, otherwise we will
|
||||
* emulate the queuing and calling of completion function ourselves.
|
||||
*/
|
||||
atomic_inc(&cmd->device->iorequest_cnt);
|
||||
|
||||
/* check if the device is still usable */
|
||||
|
@ -2508,7 +2508,7 @@ static void pseudo_0_release(struct device *dev)
|
||||
}
|
||||
|
||||
static struct device pseudo_primary = {
|
||||
.bus_id = "pseudo_0",
|
||||
.init_name = "pseudo_0",
|
||||
.release = pseudo_0_release,
|
||||
};
|
||||
|
||||
@ -2680,7 +2680,7 @@ static int sdebug_add_adapter(void)
|
||||
sdbg_host->dev.bus = &pseudo_lld_bus;
|
||||
sdbg_host->dev.parent = &pseudo_primary;
|
||||
sdbg_host->dev.release = &sdebug_release_adapter;
|
||||
sprintf(sdbg_host->dev.bus_id, "adapter%d", scsi_debug_add_host);
|
||||
dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
|
||||
|
||||
error = device_register(&sdbg_host->dev);
|
||||
|
||||
|
@ -124,34 +124,22 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
|
||||
enum blk_eh_timer_return scsi_times_out(struct request *req)
|
||||
{
|
||||
struct scsi_cmnd *scmd = req->special;
|
||||
enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
|
||||
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
|
||||
|
||||
scsi_log_completion(scmd, TIMEOUT_ERROR);
|
||||
|
||||
if (scmd->device->host->transportt->eh_timed_out)
|
||||
eh_timed_out = scmd->device->host->transportt->eh_timed_out;
|
||||
rtn = scmd->device->host->transportt->eh_timed_out(scmd);
|
||||
else if (scmd->device->host->hostt->eh_timed_out)
|
||||
eh_timed_out = scmd->device->host->hostt->eh_timed_out;
|
||||
else
|
||||
eh_timed_out = NULL;
|
||||
rtn = scmd->device->host->hostt->eh_timed_out(scmd);
|
||||
|
||||
if (eh_timed_out) {
|
||||
rtn = eh_timed_out(scmd);
|
||||
switch (rtn) {
|
||||
case BLK_EH_NOT_HANDLED:
|
||||
break;
|
||||
default:
|
||||
return rtn;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
|
||||
if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
|
||||
!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
|
||||
scmd->result |= DID_TIME_OUT << 16;
|
||||
return BLK_EH_HANDLED;
|
||||
rtn = BLK_EH_HANDLED;
|
||||
}
|
||||
|
||||
return BLK_EH_NOT_HANDLED;
|
||||
return rtn;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -167,10 +167,17 @@ EXPORT_SYMBOL(scsi_set_medium_removal);
|
||||
static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
|
||||
{
|
||||
struct device *dev = scsi_get_device(sdev->host);
|
||||
const char *name;
|
||||
|
||||
if (!dev)
|
||||
return -ENXIO;
|
||||
return copy_to_user(arg, dev->bus_id, sizeof(dev->bus_id))? -EFAULT: 0;
|
||||
|
||||
name = dev_name(dev);
|
||||
|
||||
/* compatibility with old ioctl which only returned
|
||||
* 20 characters */
|
||||
return copy_to_user(arg, name, min(strlen(name), (size_t)20))
|
||||
? -EFAULT: 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -91,26 +91,19 @@ static void scsi_unprep_request(struct request *req)
|
||||
scsi_put_command(cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function: scsi_queue_insert()
|
||||
/**
|
||||
* __scsi_queue_insert - private queue insertion
|
||||
* @cmd: The SCSI command being requeued
|
||||
* @reason: The reason for the requeue
|
||||
* @unbusy: Whether the queue should be unbusied
|
||||
*
|
||||
* Purpose: Insert a command in the midlevel queue.
|
||||
*
|
||||
* Arguments: cmd - command that we are adding to queue.
|
||||
* reason - why we are inserting command to queue.
|
||||
*
|
||||
* Lock status: Assumed that lock is not held upon entry.
|
||||
*
|
||||
* Returns: Nothing.
|
||||
*
|
||||
* Notes: We do this for one of two cases. Either the host is busy
|
||||
* and it cannot accept any more commands for the time being,
|
||||
* or the device returned QUEUE_FULL and can accept no more
|
||||
* commands.
|
||||
* Notes: This could be called either from an interrupt context or a
|
||||
* normal process context.
|
||||
* This is a private queue insertion. The public interface
|
||||
* scsi_queue_insert() always assumes the queue should be unbusied
|
||||
* because it's always called before the completion. This function is
|
||||
* for a requeue after completion, which should only occur in this
|
||||
* file.
|
||||
*/
|
||||
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
||||
static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
|
||||
{
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct scsi_device *device = cmd->device;
|
||||
@ -150,7 +143,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
||||
* Decrement the counters, since these commands are no longer
|
||||
* active on the host/device.
|
||||
*/
|
||||
scsi_device_unbusy(device);
|
||||
if (unbusy)
|
||||
scsi_device_unbusy(device);
|
||||
|
||||
/*
|
||||
* Requeue this command. It will go before all other commands
|
||||
@ -172,6 +166,29 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function: scsi_queue_insert()
|
||||
*
|
||||
* Purpose: Insert a command in the midlevel queue.
|
||||
*
|
||||
* Arguments: cmd - command that we are adding to queue.
|
||||
* reason - why we are inserting command to queue.
|
||||
*
|
||||
* Lock status: Assumed that lock is not held upon entry.
|
||||
*
|
||||
* Returns: Nothing.
|
||||
*
|
||||
* Notes: We do this for one of two cases. Either the host is busy
|
||||
* and it cannot accept any more commands for the time being,
|
||||
* or the device returned QUEUE_FULL and can accept no more
|
||||
* commands.
|
||||
* Notes: This could be called either from an interrupt context or a
|
||||
* normal process context.
|
||||
*/
|
||||
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
||||
{
|
||||
return __scsi_queue_insert(cmd, reason, 1);
|
||||
}
|
||||
/**
|
||||
* scsi_execute - insert request and wait for the result
|
||||
* @sdev: scsi device
|
||||
@ -684,6 +701,8 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
|
||||
scsi_run_queue(sdev->request_queue);
|
||||
}
|
||||
|
||||
static void __scsi_release_buffers(struct scsi_cmnd *, int);
|
||||
|
||||
/*
|
||||
* Function: scsi_end_request()
|
||||
*
|
||||
@ -732,6 +751,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
|
||||
* leftovers in the front of the
|
||||
* queue, and goose the queue again.
|
||||
*/
|
||||
scsi_release_buffers(cmd);
|
||||
scsi_requeue_command(q, cmd);
|
||||
cmd = NULL;
|
||||
}
|
||||
@ -743,6 +763,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
|
||||
* This will goose the queue request function at the end, so we don't
|
||||
* need to worry about launching another command.
|
||||
*/
|
||||
__scsi_release_buffers(cmd, 0);
|
||||
scsi_next_command(cmd);
|
||||
return NULL;
|
||||
}
|
||||
@ -798,6 +819,26 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
|
||||
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
|
||||
}
|
||||
|
||||
static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
|
||||
{
|
||||
|
||||
if (cmd->sdb.table.nents)
|
||||
scsi_free_sgtable(&cmd->sdb);
|
||||
|
||||
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
|
||||
|
||||
if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
|
||||
struct scsi_data_buffer *bidi_sdb =
|
||||
cmd->request->next_rq->special;
|
||||
scsi_free_sgtable(bidi_sdb);
|
||||
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
|
||||
cmd->request->next_rq->special = NULL;
|
||||
}
|
||||
|
||||
if (scsi_prot_sg_count(cmd))
|
||||
scsi_free_sgtable(cmd->prot_sdb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function: scsi_release_buffers()
|
||||
*
|
||||
@ -817,21 +858,7 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
|
||||
*/
|
||||
void scsi_release_buffers(struct scsi_cmnd *cmd)
|
||||
{
|
||||
if (cmd->sdb.table.nents)
|
||||
scsi_free_sgtable(&cmd->sdb);
|
||||
|
||||
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
|
||||
|
||||
if (scsi_bidi_cmnd(cmd)) {
|
||||
struct scsi_data_buffer *bidi_sdb =
|
||||
cmd->request->next_rq->special;
|
||||
scsi_free_sgtable(bidi_sdb);
|
||||
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
|
||||
cmd->request->next_rq->special = NULL;
|
||||
}
|
||||
|
||||
if (scsi_prot_sg_count(cmd))
|
||||
scsi_free_sgtable(cmd->prot_sdb);
|
||||
__scsi_release_buffers(cmd, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_release_buffers);
|
||||
|
||||
@ -945,7 +972,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
}
|
||||
|
||||
BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
|
||||
scsi_release_buffers(cmd);
|
||||
|
||||
/*
|
||||
* Next deal with any sectors which we were able to correctly
|
||||
@ -963,6 +989,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
return;
|
||||
this_count = blk_rq_bytes(req);
|
||||
|
||||
error = -EIO;
|
||||
|
||||
if (host_byte(result) == DID_RESET) {
|
||||
/* Third party bus reset or reset for error recovery
|
||||
* reasons. Just retry the command and see what
|
||||
@ -1004,13 +1032,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
/* This will issue a new 6-byte command. */
|
||||
cmd->device->use_10_for_rw = 0;
|
||||
action = ACTION_REPREP;
|
||||
} else if (sshdr.asc == 0x10) /* DIX */ {
|
||||
description = "Host Data Integrity Failure";
|
||||
action = ACTION_FAIL;
|
||||
error = -EILSEQ;
|
||||
} else
|
||||
action = ACTION_FAIL;
|
||||
break;
|
||||
case ABORTED_COMMAND:
|
||||
if (sshdr.asc == 0x10) { /* DIF */
|
||||
description = "Target Data Integrity Failure";
|
||||
action = ACTION_FAIL;
|
||||
description = "Data Integrity Failure";
|
||||
error = -EILSEQ;
|
||||
} else
|
||||
action = ACTION_RETRY;
|
||||
break;
|
||||
@ -1029,6 +1062,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
case 0x09: /* self test in progress */
|
||||
action = ACTION_DELAYED_RETRY;
|
||||
break;
|
||||
default:
|
||||
description = "Device not ready";
|
||||
action = ACTION_FAIL;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
description = "Device not ready";
|
||||
@ -1052,9 +1089,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
switch (action) {
|
||||
case ACTION_FAIL:
|
||||
/* Give up and fail the remainder of the request */
|
||||
scsi_release_buffers(cmd);
|
||||
if (!(req->cmd_flags & REQ_QUIET)) {
|
||||
if (description)
|
||||
scmd_printk(KERN_INFO, cmd, "%s",
|
||||
scmd_printk(KERN_INFO, cmd, "%s\n",
|
||||
description);
|
||||
scsi_print_result(cmd);
|
||||
if (driver_byte(result) & DRIVER_SENSE)
|
||||
@ -1067,15 +1105,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
/* Unprep the request and put it back at the head of the queue.
|
||||
* A new command will be prepared and issued.
|
||||
*/
|
||||
scsi_release_buffers(cmd);
|
||||
scsi_requeue_command(q, cmd);
|
||||
break;
|
||||
case ACTION_RETRY:
|
||||
/* Retry the same command immediately */
|
||||
scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
|
||||
__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
|
||||
break;
|
||||
case ACTION_DELAYED_RETRY:
|
||||
/* Retry the same command after a delay */
|
||||
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
|
||||
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -414,8 +414,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
|
||||
device_initialize(dev);
|
||||
starget->reap_ref = 1;
|
||||
dev->parent = get_device(parent);
|
||||
sprintf(dev->bus_id, "target%d:%d:%d",
|
||||
shost->host_no, channel, id);
|
||||
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
|
||||
#ifndef CONFIG_SYSFS_DEPRECATED
|
||||
dev->bus = &scsi_bus_type;
|
||||
#endif
|
||||
@ -1024,7 +1023,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
||||
if (rescan || !scsi_device_created(sdev)) {
|
||||
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
|
||||
"scsi scan: device exists on %s\n",
|
||||
sdev->sdev_gendev.bus_id));
|
||||
dev_name(&sdev->sdev_gendev)));
|
||||
if (sdevp)
|
||||
*sdevp = sdev;
|
||||
else
|
||||
@ -1163,7 +1162,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
|
||||
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
|
||||
|
||||
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of"
|
||||
"%s\n", starget->dev.bus_id));
|
||||
"%s\n", dev_name(&starget->dev)));
|
||||
|
||||
max_dev_lun = min(max_scsi_luns, shost->max_lun);
|
||||
/*
|
||||
|
@ -1079,16 +1079,14 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
|
||||
device_initialize(&sdev->sdev_gendev);
|
||||
sdev->sdev_gendev.bus = &scsi_bus_type;
|
||||
sdev->sdev_gendev.type = &scsi_dev_type;
|
||||
sprintf(sdev->sdev_gendev.bus_id,"%d:%d:%d:%d",
|
||||
sdev->host->host_no, sdev->channel, sdev->id,
|
||||
sdev->lun);
|
||||
|
||||
dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
|
||||
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
|
||||
|
||||
device_initialize(&sdev->sdev_dev);
|
||||
sdev->sdev_dev.parent = &sdev->sdev_gendev;
|
||||
sdev->sdev_dev.class = &sdev_class;
|
||||
snprintf(sdev->sdev_dev.bus_id, BUS_ID_SIZE,
|
||||
"%d:%d:%d:%d", sdev->host->host_no,
|
||||
sdev->channel, sdev->id, sdev->lun);
|
||||
dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
|
||||
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
|
||||
sdev->scsi_level = starget->scsi_level;
|
||||
transport_setup_device(&sdev->sdev_gendev);
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
|
@ -2407,8 +2407,12 @@ fc_rport_final_delete(struct work_struct *work)
|
||||
/*
|
||||
* Notify the driver that the rport is now dead. The LLDD will
|
||||
* also guarantee that any communication to the rport is terminated
|
||||
*
|
||||
* Avoid this call if we already called it when we preserved the
|
||||
* rport for the binding.
|
||||
*/
|
||||
if (i->f->dev_loss_tmo_callbk)
|
||||
if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
|
||||
(i->f->dev_loss_tmo_callbk))
|
||||
i->f->dev_loss_tmo_callbk(rport);
|
||||
|
||||
transport_remove_device(dev);
|
||||
@ -2486,8 +2490,8 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
|
||||
device_initialize(dev); /* takes self reference */
|
||||
dev->parent = get_device(&shost->shost_gendev); /* parent reference */
|
||||
dev->release = fc_rport_dev_release;
|
||||
sprintf(dev->bus_id, "rport-%d:%d-%d",
|
||||
shost->host_no, channel, rport->number);
|
||||
dev_set_name(dev, "rport-%d:%d-%d",
|
||||
shost->host_no, channel, rport->number);
|
||||
transport_setup_device(dev);
|
||||
|
||||
error = device_add(dev);
|
||||
@ -2647,7 +2651,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
|
||||
rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
|
||||
FC_RPORT_DEVLOSS_PENDING);
|
||||
FC_RPORT_DEVLOSS_PENDING |
|
||||
FC_RPORT_DEVLOSS_CALLBK_DONE);
|
||||
|
||||
/* if target, initiate a scan */
|
||||
if (rport->scsi_target_id != -1) {
|
||||
@ -2944,6 +2949,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
|
||||
struct fc_rport *rport =
|
||||
container_of(work, struct fc_rport, dev_loss_work.work);
|
||||
struct Scsi_Host *shost = rport_to_shost(rport);
|
||||
struct fc_internal *i = to_fc_internal(shost->transportt);
|
||||
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
|
||||
unsigned long flags;
|
||||
|
||||
@ -3011,6 +3017,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
|
||||
rport->roles = FC_PORT_ROLE_UNKNOWN;
|
||||
rport->port_state = FC_PORTSTATE_NOTPRESENT;
|
||||
rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
|
||||
rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
|
||||
|
||||
/*
|
||||
* Pre-emptively kill I/O rather than waiting for the work queue
|
||||
@ -3046,8 +3053,18 @@ fc_timeout_deleted_rport(struct work_struct *work)
|
||||
* all attached scsi devices.
|
||||
*/
|
||||
fc_queue_work(shost, &rport->stgt_delete_work);
|
||||
|
||||
/*
|
||||
* Notify the driver that the rport is now dead. The LLDD will
|
||||
* also guarantee that any communication to the rport is terminated
|
||||
*
|
||||
* Note: we set the CALLBK_DONE flag above to correspond
|
||||
*/
|
||||
if (i->f->dev_loss_tmo_callbk)
|
||||
i->f->dev_loss_tmo_callbk(rport);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
|
||||
* @work: rport to terminate io on.
|
||||
@ -3164,8 +3181,8 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
|
||||
device_initialize(dev); /* takes self reference */
|
||||
dev->parent = get_device(pdev); /* takes parent reference */
|
||||
dev->release = fc_vport_dev_release;
|
||||
sprintf(dev->bus_id, "vport-%d:%d-%d",
|
||||
shost->host_no, channel, vport->number);
|
||||
dev_set_name(dev, "vport-%d:%d-%d",
|
||||
shost->host_no, channel, vport->number);
|
||||
transport_setup_device(dev);
|
||||
|
||||
error = device_add(dev);
|
||||
@ -3188,19 +3205,19 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
|
||||
*/
|
||||
if (pdev != &shost->shost_gendev) {
|
||||
error = sysfs_create_link(&shost->shost_gendev.kobj,
|
||||
&dev->kobj, dev->bus_id);
|
||||
&dev->kobj, dev_name(dev));
|
||||
if (error)
|
||||
printk(KERN_ERR
|
||||
"%s: Cannot create vport symlinks for "
|
||||
"%s, err=%d\n",
|
||||
__func__, dev->bus_id, error);
|
||||
__func__, dev_name(dev), error);
|
||||
}
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
vport->flags &= ~FC_VPORT_CREATING;
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
|
||||
dev_printk(KERN_NOTICE, pdev,
|
||||
"%s created via shost%d channel %d\n", dev->bus_id,
|
||||
"%s created via shost%d channel %d\n", dev_name(dev),
|
||||
shost->host_no, channel);
|
||||
|
||||
*ret_vport = vport;
|
||||
@ -3297,7 +3314,7 @@ fc_vport_terminate(struct fc_vport *vport)
|
||||
return stat;
|
||||
|
||||
if (dev->parent != &shost->shost_gendev)
|
||||
sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id);
|
||||
sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
|
||||
transport_remove_device(dev);
|
||||
device_del(dev);
|
||||
transport_destroy_device(dev);
|
||||
@ -3329,7 +3346,7 @@ fc_vport_sched_delete(struct work_struct *work)
|
||||
dev_printk(KERN_ERR, vport->dev.parent,
|
||||
"%s: %s could not be deleted created via "
|
||||
"shost%d channel %d - error %d\n", __func__,
|
||||
vport->dev.bus_id, vport->shost->host_no,
|
||||
dev_name(&vport->dev), vport->shost->host_no,
|
||||
vport->channel, stat);
|
||||
}
|
||||
|
||||
|
@ -187,8 +187,7 @@ iscsi_create_endpoint(int dd_size)
|
||||
|
||||
ep->id = id;
|
||||
ep->dev.class = &iscsi_endpoint_class;
|
||||
snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu",
|
||||
(unsigned long long) id);
|
||||
dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
|
||||
err = device_register(&ep->dev);
|
||||
if (err)
|
||||
goto free_ep;
|
||||
@ -724,8 +723,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
|
||||
}
|
||||
session->target_id = id;
|
||||
|
||||
snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
|
||||
session->sid);
|
||||
dev_set_name(&session->dev, "session%u", session->sid);
|
||||
err = device_add(&session->dev);
|
||||
if (err) {
|
||||
iscsi_cls_session_printk(KERN_ERR, session,
|
||||
@ -898,8 +896,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
|
||||
if (!get_device(&session->dev))
|
||||
goto free_conn;
|
||||
|
||||
snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u",
|
||||
session->sid, cid);
|
||||
dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
|
||||
conn->dev.parent = &session->dev;
|
||||
conn->dev.release = iscsi_conn_release;
|
||||
err = device_register(&conn->dev);
|
||||
@ -1816,7 +1813,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
|
||||
priv->t.create_work_queue = 1;
|
||||
|
||||
priv->dev.class = &iscsi_transport_class;
|
||||
snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
|
||||
dev_set_name(&priv->dev, "%s", tt->name);
|
||||
err = device_register(&priv->dev);
|
||||
if (err)
|
||||
goto free_priv;
|
||||
|
@ -207,7 +207,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
|
||||
struct request_queue *q;
|
||||
int error;
|
||||
struct device *dev;
|
||||
char namebuf[BUS_ID_SIZE];
|
||||
char namebuf[20];
|
||||
const char *name;
|
||||
void (*release)(struct device *);
|
||||
|
||||
@ -219,7 +219,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
|
||||
if (rphy) {
|
||||
q = blk_init_queue(sas_non_host_smp_request, NULL);
|
||||
dev = &rphy->dev;
|
||||
name = dev->bus_id;
|
||||
name = dev_name(dev);
|
||||
release = NULL;
|
||||
} else {
|
||||
q = blk_init_queue(sas_host_smp_request, NULL);
|
||||
@ -629,10 +629,10 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
|
||||
INIT_LIST_HEAD(&phy->port_siblings);
|
||||
if (scsi_is_sas_expander_device(parent)) {
|
||||
struct sas_rphy *rphy = dev_to_rphy(parent);
|
||||
sprintf(phy->dev.bus_id, "phy-%d:%d:%d", shost->host_no,
|
||||
dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no,
|
||||
rphy->scsi_target_id, number);
|
||||
} else
|
||||
sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number);
|
||||
dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number);
|
||||
|
||||
transport_setup_device(&phy->dev);
|
||||
|
||||
@ -770,7 +770,7 @@ static void sas_port_create_link(struct sas_port *port,
|
||||
int res;
|
||||
|
||||
res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj,
|
||||
phy->dev.bus_id);
|
||||
dev_name(&phy->dev));
|
||||
if (res)
|
||||
goto err;
|
||||
res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
|
||||
@ -785,7 +785,7 @@ err:
|
||||
static void sas_port_delete_link(struct sas_port *port,
|
||||
struct sas_phy *phy)
|
||||
{
|
||||
sysfs_remove_link(&port->dev.kobj, phy->dev.bus_id);
|
||||
sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev));
|
||||
sysfs_remove_link(&phy->dev.kobj, "port");
|
||||
}
|
||||
|
||||
@ -821,11 +821,11 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
|
||||
|
||||
if (scsi_is_sas_expander_device(parent)) {
|
||||
struct sas_rphy *rphy = dev_to_rphy(parent);
|
||||
sprintf(port->dev.bus_id, "port-%d:%d:%d", shost->host_no,
|
||||
rphy->scsi_target_id, port->port_identifier);
|
||||
dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no,
|
||||
rphy->scsi_target_id, port->port_identifier);
|
||||
} else
|
||||
sprintf(port->dev.bus_id, "port-%d:%d", shost->host_no,
|
||||
port->port_identifier);
|
||||
dev_set_name(&port->dev, "port-%d:%d", shost->host_no,
|
||||
port->port_identifier);
|
||||
|
||||
transport_setup_device(&port->dev);
|
||||
|
||||
@ -935,7 +935,7 @@ void sas_port_delete(struct sas_port *port)
|
||||
if (port->is_backlink) {
|
||||
struct device *parent = port->dev.parent;
|
||||
|
||||
sysfs_remove_link(&port->dev.kobj, parent->bus_id);
|
||||
sysfs_remove_link(&port->dev.kobj, dev_name(parent));
|
||||
port->is_backlink = 0;
|
||||
}
|
||||
|
||||
@ -984,7 +984,8 @@ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy)
|
||||
/* If this trips, you added a phy that was already
|
||||
* part of a different port */
|
||||
if (unlikely(tmp != phy)) {
|
||||
dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", phy->dev.bus_id);
|
||||
dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n",
|
||||
dev_name(&phy->dev));
|
||||
BUG();
|
||||
}
|
||||
} else {
|
||||
@ -1023,7 +1024,7 @@ void sas_port_mark_backlink(struct sas_port *port)
|
||||
return;
|
||||
port->is_backlink = 1;
|
||||
res = sysfs_create_link(&port->dev.kobj, &parent->kobj,
|
||||
parent->bus_id);
|
||||
dev_name(parent));
|
||||
if (res)
|
||||
goto err;
|
||||
return;
|
||||
@ -1367,11 +1368,12 @@ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent)
|
||||
rdev->rphy.dev.release = sas_end_device_release;
|
||||
if (scsi_is_sas_expander_device(parent->dev.parent)) {
|
||||
struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent);
|
||||
sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d:%d",
|
||||
shost->host_no, rphy->scsi_target_id, parent->port_identifier);
|
||||
dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d",
|
||||
shost->host_no, rphy->scsi_target_id,
|
||||
parent->port_identifier);
|
||||
} else
|
||||
sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d",
|
||||
shost->host_no, parent->port_identifier);
|
||||
dev_set_name(&rdev->rphy.dev, "end_device-%d:%d",
|
||||
shost->host_no, parent->port_identifier);
|
||||
rdev->rphy.identify.device_type = SAS_END_DEVICE;
|
||||
sas_rphy_initialize(&rdev->rphy);
|
||||
transport_setup_device(&rdev->rphy.dev);
|
||||
@ -1411,8 +1413,8 @@ struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
|
||||
mutex_lock(&sas_host->lock);
|
||||
rdev->rphy.scsi_target_id = sas_host->next_expander_id++;
|
||||
mutex_unlock(&sas_host->lock);
|
||||
sprintf(rdev->rphy.dev.bus_id, "expander-%d:%d",
|
||||
shost->host_no, rdev->rphy.scsi_target_id);
|
||||
dev_set_name(&rdev->rphy.dev, "expander-%d:%d",
|
||||
shost->host_no, rdev->rphy.scsi_target_id);
|
||||
rdev->rphy.identify.device_type = type;
|
||||
sas_rphy_initialize(&rdev->rphy);
|
||||
transport_setup_device(&rdev->rphy.dev);
|
||||
@ -1445,7 +1447,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
|
||||
transport_add_device(&rphy->dev);
|
||||
transport_configure_device(&rphy->dev);
|
||||
if (sas_bsg_initialize(shost, rphy))
|
||||
printk("fail to a bsg device %s\n", rphy->dev.bus_id);
|
||||
printk("fail to a bsg device %s\n", dev_name(&rphy->dev));
|
||||
|
||||
|
||||
mutex_lock(&sas_host->lock);
|
||||
|
@ -212,7 +212,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
|
||||
rport->roles = ids->roles;
|
||||
|
||||
id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
|
||||
sprintf(rport->dev.bus_id, "port-%d:%d", shost->host_no, id);
|
||||
dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
|
||||
|
||||
transport_setup_device(&rport->dev);
|
||||
|
||||
|
@ -1830,7 +1830,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
|
||||
device_initialize(&sdkp->dev);
|
||||
sdkp->dev.parent = &sdp->sdev_gendev;
|
||||
sdkp->dev.class = &sd_disk_class;
|
||||
strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
|
||||
dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
|
||||
|
||||
if (device_add(&sdkp->dev))
|
||||
goto out_free_index;
|
||||
|
@ -142,7 +142,7 @@ static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
|
||||
static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
|
||||
{
|
||||
struct sd_dif_tuple *sdt = prot;
|
||||
char *tag = tag_buf;
|
||||
u8 *tag = tag_buf;
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
|
||||
@ -154,7 +154,7 @@ static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors
|
||||
static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
|
||||
{
|
||||
struct sd_dif_tuple *sdt = prot;
|
||||
char *tag = tag_buf;
|
||||
u8 *tag = tag_buf;
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
|
||||
@ -256,7 +256,7 @@ static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
|
||||
static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
|
||||
{
|
||||
struct sd_dif_tuple *sdt = prot;
|
||||
char *tag = tag_buf;
|
||||
u8 *tag = tag_buf;
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
|
||||
@ -269,7 +269,7 @@ static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors
|
||||
static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
|
||||
{
|
||||
struct sd_dif_tuple *sdt = prot;
|
||||
char *tag = tag_buf;
|
||||
u8 *tag = tag_buf;
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
|
||||
@ -374,7 +374,10 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsig
|
||||
else
|
||||
csum_convert = 0;
|
||||
|
||||
BUG_ON(dif && (scmd->cmnd[0] == READ_6 || scmd->cmnd[0] == WRITE_6));
|
||||
|
||||
switch (scmd->cmnd[0]) {
|
||||
case READ_6:
|
||||
case READ_10:
|
||||
case READ_12:
|
||||
case READ_16:
|
||||
@ -390,6 +393,7 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsig
|
||||
|
||||
break;
|
||||
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
case WRITE_12:
|
||||
case WRITE_16:
|
||||
@ -475,8 +479,9 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
|
||||
|
||||
error:
|
||||
kunmap_atomic(sdt, KM_USER0);
|
||||
sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n",
|
||||
__func__, virt, phys, be32_to_cpu(sdt->ref_tag));
|
||||
sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
|
||||
__func__, virt, phys, be32_to_cpu(sdt->ref_tag),
|
||||
be16_to_cpu(sdt->app_tag));
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -526,7 +526,7 @@ static int ses_intf_add(struct device *cdev,
|
||||
if (!scomp)
|
||||
goto err_free;
|
||||
|
||||
edev = enclosure_register(cdev->parent, sdev->sdev_gendev.bus_id,
|
||||
edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
|
||||
components, &ses_enclosure_callbacks);
|
||||
if (IS_ERR(edev)) {
|
||||
err = PTR_ERR(edev);
|
||||
|
@ -1669,6 +1669,8 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
|
||||
md->pages = req_schp->pages;
|
||||
md->page_order = req_schp->page_order;
|
||||
md->nr_entries = req_schp->k_use_sg;
|
||||
md->offset = 0;
|
||||
md->null_mapped = hp->dxferp ? 0 : 1;
|
||||
}
|
||||
|
||||
if (iov_count)
|
||||
|
@ -297,7 +297,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit sgiwd93_remove(struct platform_device *pdev)
|
||||
static int __exit sgiwd93_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct Scsi_Host *host = platform_get_drvdata(pdev);
|
||||
struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
|
||||
@ -307,6 +307,7 @@ static void __exit sgiwd93_remove(struct platform_device *pdev)
|
||||
free_irq(pd->irq, host);
|
||||
dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
|
||||
scsi_host_put(host);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sgiwd93_driver = {
|
||||
|
@ -102,7 +102,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
|
||||
struct NCR_700_Host_Parameters *hostdata =
|
||||
kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
|
||||
|
||||
printk(KERN_NOTICE "sim710: %s\n", dev->bus_id);
|
||||
printk(KERN_NOTICE "sim710: %s\n", dev_name(dev));
|
||||
printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
|
||||
irq, clock, base_addr, scsi_id);
|
||||
|
||||
@ -305,7 +305,7 @@ sim710_eisa_probe(struct device *dev)
|
||||
scsi_id = ffs(val) - 1;
|
||||
|
||||
if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) {
|
||||
printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev->bus_id);
|
||||
printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev_name(dev));
|
||||
scsi_id = 7;
|
||||
}
|
||||
} else {
|
||||
|
@ -78,8 +78,7 @@ static int __init snirm710_probe(struct platform_device *dev)
|
||||
base = res->start;
|
||||
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
|
||||
if (!hostdata) {
|
||||
printk(KERN_ERR "%s: Failed to allocate host data\n",
|
||||
dev->dev.bus_id);
|
||||
dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
|
||||
*/
|
||||
|
||||
static const char *verstr = "20080504";
|
||||
static const char *verstr = "20081215";
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
@ -182,18 +182,16 @@ static struct scsi_tape **scsi_tapes = NULL;
|
||||
|
||||
static int modes_defined;
|
||||
|
||||
static struct st_buffer *new_tape_buffer(int, int, int);
|
||||
static int enlarge_buffer(struct st_buffer *, int, int);
|
||||
static void clear_buffer(struct st_buffer *);
|
||||
static void normalize_buffer(struct st_buffer *);
|
||||
static int append_to_buffer(const char __user *, struct st_buffer *, int);
|
||||
static int from_buffer(struct st_buffer *, char __user *, int);
|
||||
static void move_buffer_data(struct st_buffer *, int);
|
||||
static void buf_to_sg(struct st_buffer *, unsigned int);
|
||||
|
||||
static int sgl_map_user_pages(struct scatterlist *, const unsigned int,
|
||||
static int sgl_map_user_pages(struct st_buffer *, const unsigned int,
|
||||
unsigned long, size_t, int);
|
||||
static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int);
|
||||
static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
|
||||
|
||||
static int st_probe(struct device *);
|
||||
static int st_remove(struct device *);
|
||||
@ -435,22 +433,6 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
|
||||
return (-EIO);
|
||||
}
|
||||
|
||||
|
||||
/* Wakeup from interrupt */
|
||||
static void st_sleep_done(void *data, char *sense, int result, int resid)
|
||||
{
|
||||
struct st_request *SRpnt = data;
|
||||
struct scsi_tape *STp = SRpnt->stp;
|
||||
|
||||
memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE);
|
||||
(STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result;
|
||||
(STp->buffer)->cmdstat.residual = resid;
|
||||
DEB( STp->write_pending = 0; )
|
||||
|
||||
if (SRpnt->waiting)
|
||||
complete(SRpnt->waiting);
|
||||
}
|
||||
|
||||
static struct st_request *st_allocate_request(struct scsi_tape *stp)
|
||||
{
|
||||
struct st_request *streq;
|
||||
@ -475,6 +457,63 @@ static void st_release_request(struct st_request *streq)
|
||||
kfree(streq);
|
||||
}
|
||||
|
||||
static void st_scsi_execute_end(struct request *req, int uptodate)
|
||||
{
|
||||
struct st_request *SRpnt = req->end_io_data;
|
||||
struct scsi_tape *STp = SRpnt->stp;
|
||||
|
||||
STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
|
||||
STp->buffer->cmdstat.residual = req->data_len;
|
||||
|
||||
if (SRpnt->waiting)
|
||||
complete(SRpnt->waiting);
|
||||
|
||||
blk_rq_unmap_user(SRpnt->bio);
|
||||
__blk_put_request(req->q, req);
|
||||
}
|
||||
|
||||
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
|
||||
int data_direction, void *buffer, unsigned bufflen,
|
||||
int timeout, int retries)
|
||||
{
|
||||
struct request *req;
|
||||
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
|
||||
int err = 0;
|
||||
int write = (data_direction == DMA_TO_DEVICE);
|
||||
|
||||
req = blk_get_request(SRpnt->stp->device->request_queue, write,
|
||||
GFP_KERNEL);
|
||||
if (!req)
|
||||
return DRIVER_ERROR << 24;
|
||||
|
||||
req->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
req->cmd_flags |= REQ_QUIET;
|
||||
|
||||
mdata->null_mapped = 1;
|
||||
|
||||
if (bufflen) {
|
||||
err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
|
||||
GFP_KERNEL);
|
||||
if (err) {
|
||||
blk_put_request(req);
|
||||
return DRIVER_ERROR << 24;
|
||||
}
|
||||
}
|
||||
|
||||
SRpnt->bio = req->bio;
|
||||
req->cmd_len = COMMAND_SIZE(cmd[0]);
|
||||
memset(req->cmd, 0, BLK_MAX_CDB);
|
||||
memcpy(req->cmd, cmd, req->cmd_len);
|
||||
req->sense = SRpnt->sense;
|
||||
req->sense_len = 0;
|
||||
req->timeout = timeout;
|
||||
req->retries = retries;
|
||||
req->end_io_data = SRpnt;
|
||||
|
||||
blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do the scsi command. Waits until command performed if do_wait is true.
|
||||
Otherwise write_behind_check() is used to check that the command
|
||||
has finished. */
|
||||
@ -483,6 +522,8 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
|
||||
int bytes, int direction, int timeout, int retries, int do_wait)
|
||||
{
|
||||
struct completion *waiting;
|
||||
struct rq_map_data *mdata = &STp->buffer->map_data;
|
||||
int ret;
|
||||
|
||||
/* if async, make sure there's no command outstanding */
|
||||
if (!do_wait && ((STp->buffer)->last_SRpnt)) {
|
||||
@ -510,21 +551,27 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
|
||||
init_completion(waiting);
|
||||
SRpnt->waiting = waiting;
|
||||
|
||||
if (!STp->buffer->do_dio)
|
||||
buf_to_sg(STp->buffer, bytes);
|
||||
if (STp->buffer->do_dio) {
|
||||
mdata->nr_entries = STp->buffer->sg_segs;
|
||||
mdata->pages = STp->buffer->mapped_pages;
|
||||
} else {
|
||||
mdata->nr_entries =
|
||||
DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
|
||||
STp->buffer->map_data.pages = STp->buffer->reserved_pages;
|
||||
STp->buffer->map_data.offset = 0;
|
||||
}
|
||||
|
||||
memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
|
||||
STp->buffer->cmdstat.have_sense = 0;
|
||||
STp->buffer->syscall_result = 0;
|
||||
|
||||
if (scsi_execute_async(STp->device, cmd, COMMAND_SIZE(cmd[0]), direction,
|
||||
&((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs,
|
||||
timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) {
|
||||
ret = st_scsi_execute(SRpnt, cmd, direction, NULL, bytes, timeout,
|
||||
retries);
|
||||
if (ret) {
|
||||
/* could not allocate the buffer or request was too large */
|
||||
(STp->buffer)->syscall_result = (-EBUSY);
|
||||
(STp->buffer)->last_SRpnt = NULL;
|
||||
}
|
||||
else if (do_wait) {
|
||||
} else if (do_wait) {
|
||||
wait_for_completion(waiting);
|
||||
SRpnt->waiting = NULL;
|
||||
(STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
|
||||
@ -533,28 +580,6 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
|
||||
return SRpnt;
|
||||
}
|
||||
|
||||
static int st_scsi_kern_execute(struct st_request *streq,
|
||||
const unsigned char *cmd, int data_direction,
|
||||
void *buffer, unsigned bufflen, int timeout,
|
||||
int retries)
|
||||
{
|
||||
struct scsi_tape *stp = streq->stp;
|
||||
int ret, resid;
|
||||
|
||||
stp->buffer->cmdstat.have_sense = 0;
|
||||
memcpy(streq->cmd, cmd, sizeof(streq->cmd));
|
||||
|
||||
ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
|
||||
streq->sense, timeout, retries, 0, &resid);
|
||||
if (driver_byte(ret) & DRIVER_ERROR)
|
||||
return -EBUSY;
|
||||
|
||||
stp->buffer->cmdstat.midlevel_result = streq->result = ret;
|
||||
stp->buffer->cmdstat.residual = resid;
|
||||
stp->buffer->syscall_result = st_chk_result(stp, streq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
|
||||
write has been correct but EOM early warning reached, -EIO if write ended in
|
||||
@ -627,7 +652,6 @@ static int cross_eof(struct scsi_tape * STp, int forward)
|
||||
{
|
||||
struct st_request *SRpnt;
|
||||
unsigned char cmd[MAX_COMMAND_SIZE];
|
||||
int ret;
|
||||
|
||||
cmd[0] = SPACE;
|
||||
cmd[1] = 0x01; /* Space FileMarks */
|
||||
@ -641,26 +665,20 @@ static int cross_eof(struct scsi_tape * STp, int forward)
|
||||
DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
|
||||
tape_name(STp), forward ? "forward" : "backward"));
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_RETRIES, 1);
|
||||
if (!SRpnt)
|
||||
return STp->buffer->syscall_result;
|
||||
return (STp->buffer)->syscall_result;
|
||||
|
||||
ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_RETRIES);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = STp->buffer->syscall_result;
|
||||
st_release_request(SRpnt);
|
||||
SRpnt = NULL;
|
||||
|
||||
if ((STp->buffer)->cmdstat.midlevel_result != 0)
|
||||
printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
|
||||
tape_name(STp), forward ? "forward" : "backward");
|
||||
|
||||
out:
|
||||
st_release_request(SRpnt);
|
||||
|
||||
return ret;
|
||||
return (STp->buffer)->syscall_result;
|
||||
}
|
||||
|
||||
|
||||
@ -881,24 +899,21 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
|
||||
int attentions, waits, max_wait, scode;
|
||||
int retval = CHKRES_READY, new_session = 0;
|
||||
unsigned char cmd[MAX_COMMAND_SIZE];
|
||||
struct st_request *SRpnt;
|
||||
struct st_request *SRpnt = NULL;
|
||||
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
if (!SRpnt)
|
||||
return STp->buffer->syscall_result;
|
||||
|
||||
max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
|
||||
|
||||
for (attentions=waits=0; ; ) {
|
||||
memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
|
||||
cmd[0] = TEST_UNIT_READY;
|
||||
SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
|
||||
STp->long_timeout, MAX_READY_RETRIES, 1);
|
||||
|
||||
retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
|
||||
STp->long_timeout,
|
||||
MAX_READY_RETRIES);
|
||||
if (retval)
|
||||
if (!SRpnt) {
|
||||
retval = (STp->buffer)->syscall_result;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cmdstatp->have_sense) {
|
||||
|
||||
@ -942,8 +957,8 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
|
||||
break;
|
||||
}
|
||||
|
||||
st_release_request(SRpnt);
|
||||
|
||||
if (SRpnt != NULL)
|
||||
st_release_request(SRpnt);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -1020,24 +1035,17 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
|
||||
}
|
||||
}
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
if (!SRpnt) {
|
||||
retval = STp->buffer->syscall_result;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (STp->omit_blklims)
|
||||
STp->min_block = STp->max_block = (-1);
|
||||
else {
|
||||
memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
|
||||
cmd[0] = READ_BLOCK_LIMITS;
|
||||
|
||||
retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
|
||||
STp->buffer->b_data, 6,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_READY_RETRIES);
|
||||
if (retval) {
|
||||
st_release_request(SRpnt);
|
||||
SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_READY_RETRIES, 1);
|
||||
if (!SRpnt) {
|
||||
retval = (STp->buffer)->syscall_result;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
@ -1061,12 +1069,11 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
|
||||
cmd[0] = MODE_SENSE;
|
||||
cmd[4] = 12;
|
||||
|
||||
retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
|
||||
STp->buffer->b_data, 12,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_READY_RETRIES);
|
||||
if (retval) {
|
||||
st_release_request(SRpnt);
|
||||
SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_READY_RETRIES, 1);
|
||||
if (!SRpnt) {
|
||||
retval = (STp->buffer)->syscall_result;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
@ -1296,17 +1303,11 @@ static int st_flush(struct file *filp, fl_owner_t id)
|
||||
cmd[0] = WRITE_FILEMARKS;
|
||||
cmd[4] = 1 + STp->two_fm;
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_WRITE_RETRIES, 1);
|
||||
if (!SRpnt) {
|
||||
result = STp->buffer->syscall_result;
|
||||
goto out;
|
||||
}
|
||||
|
||||
result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_WRITE_RETRIES);
|
||||
if (result) {
|
||||
st_release_request(SRpnt);
|
||||
result = (STp->buffer)->syscall_result;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1471,8 +1472,8 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
|
||||
|
||||
if (i && ((unsigned long)buf & queue_dma_alignment(
|
||||
STp->device->request_queue)) == 0) {
|
||||
i = sgl_map_user_pages(&(STbp->sg[0]), STbp->use_sg,
|
||||
(unsigned long)buf, count, (is_read ? READ : WRITE));
|
||||
i = sgl_map_user_pages(STbp, STbp->use_sg, (unsigned long)buf,
|
||||
count, (is_read ? READ : WRITE));
|
||||
if (i > 0) {
|
||||
STbp->do_dio = i;
|
||||
STbp->buffer_bytes = 0; /* can be used as transfer counter */
|
||||
@ -1480,7 +1481,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
|
||||
else
|
||||
STbp->do_dio = 0; /* fall back to buffering with any error */
|
||||
STbp->sg_segs = STbp->do_dio;
|
||||
STbp->frp_sg_current = 0;
|
||||
DEB(
|
||||
if (STbp->do_dio) {
|
||||
STp->nbr_dio++;
|
||||
@ -1526,7 +1526,7 @@ static void release_buffering(struct scsi_tape *STp, int is_read)
|
||||
|
||||
STbp = STp->buffer;
|
||||
if (STbp->do_dio) {
|
||||
sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, is_read);
|
||||
sgl_unmap_user_pages(STbp, STbp->do_dio, is_read);
|
||||
STbp->do_dio = 0;
|
||||
STbp->sg_segs = 0;
|
||||
}
|
||||
@ -2372,7 +2372,6 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
|
||||
{
|
||||
unsigned char cmd[MAX_COMMAND_SIZE];
|
||||
struct st_request *SRpnt;
|
||||
int ret;
|
||||
|
||||
memset(cmd, 0, MAX_COMMAND_SIZE);
|
||||
cmd[0] = MODE_SENSE;
|
||||
@ -2381,17 +2380,14 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
|
||||
cmd[2] = page;
|
||||
cmd[4] = 255;
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
if (!SRpnt)
|
||||
return STp->buffer->syscall_result;
|
||||
SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_FROM_DEVICE,
|
||||
STp->device->request_queue->rq_timeout, 0, 1);
|
||||
if (SRpnt == NULL)
|
||||
return (STp->buffer)->syscall_result;
|
||||
|
||||
ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
|
||||
STp->buffer->b_data, cmd[4],
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_RETRIES);
|
||||
st_release_request(SRpnt);
|
||||
|
||||
return ret ? : STp->buffer->syscall_result;
|
||||
return STp->buffer->syscall_result;
|
||||
}
|
||||
|
||||
|
||||
@ -2399,9 +2395,10 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
|
||||
in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
|
||||
static int write_mode_page(struct scsi_tape *STp, int page, int slow)
|
||||
{
|
||||
int pgo, timeout, ret = 0;
|
||||
int pgo;
|
||||
unsigned char cmd[MAX_COMMAND_SIZE];
|
||||
struct st_request *SRpnt;
|
||||
int timeout;
|
||||
|
||||
memset(cmd, 0, MAX_COMMAND_SIZE);
|
||||
cmd[0] = MODE_SELECT;
|
||||
@ -2415,21 +2412,16 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
|
||||
(STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
|
||||
(STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
if (!SRpnt)
|
||||
return ret;
|
||||
|
||||
timeout = slow ? STp->long_timeout :
|
||||
STp->device->request_queue->rq_timeout;
|
||||
|
||||
ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
|
||||
STp->buffer->b_data, cmd[4], timeout, 0);
|
||||
if (!ret)
|
||||
ret = STp->buffer->syscall_result;
|
||||
timeout = slow ?
|
||||
STp->long_timeout : STp->device->request_queue->rq_timeout;
|
||||
SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_TO_DEVICE,
|
||||
timeout, 0, 1);
|
||||
if (SRpnt == NULL)
|
||||
return (STp->buffer)->syscall_result;
|
||||
|
||||
st_release_request(SRpnt);
|
||||
|
||||
return ret;
|
||||
return STp->buffer->syscall_result;
|
||||
}
|
||||
|
||||
|
||||
@ -2547,16 +2539,13 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
|
||||
printk(ST_DEB_MSG "%s: Loading tape.\n", name);
|
||||
);
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
|
||||
timeout, MAX_RETRIES, 1);
|
||||
if (!SRpnt)
|
||||
return STp->buffer->syscall_result;
|
||||
|
||||
retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
|
||||
MAX_RETRIES);
|
||||
if (retval)
|
||||
goto out;
|
||||
return (STp->buffer)->syscall_result;
|
||||
|
||||
retval = (STp->buffer)->syscall_result;
|
||||
st_release_request(SRpnt);
|
||||
|
||||
if (!retval) { /* SCSI command successful */
|
||||
|
||||
@ -2575,8 +2564,6 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
|
||||
STps = &(STp->ps[STp->partition]);
|
||||
STps->drv_file = STps->drv_block = (-1);
|
||||
}
|
||||
out:
|
||||
st_release_request(SRpnt);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@ -2852,15 +2839,12 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
|
||||
return (-ENOSYS);
|
||||
}
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
|
||||
timeout, MAX_RETRIES, 1);
|
||||
if (!SRpnt)
|
||||
return (STp->buffer)->syscall_result;
|
||||
|
||||
ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction,
|
||||
STp->buffer->b_data, datalen,
|
||||
timeout, MAX_RETRIES);
|
||||
if (!ioctl_result)
|
||||
ioctl_result = (STp->buffer)->syscall_result;
|
||||
ioctl_result = (STp->buffer)->syscall_result;
|
||||
|
||||
if (!ioctl_result) { /* SCSI command successful */
|
||||
st_release_request(SRpnt);
|
||||
@ -3022,17 +3006,11 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
|
||||
if (!logical && !STp->scsi2_logical)
|
||||
scmd[1] = 1;
|
||||
}
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_READY_RETRIES, 1);
|
||||
if (!SRpnt)
|
||||
return STp->buffer->syscall_result;
|
||||
|
||||
result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
|
||||
STp->buffer->b_data, 20,
|
||||
STp->device->request_queue->rq_timeout,
|
||||
MAX_READY_RETRIES);
|
||||
if (result)
|
||||
goto out;
|
||||
return (STp->buffer)->syscall_result;
|
||||
|
||||
if ((STp->buffer)->syscall_result != 0 ||
|
||||
(STp->device->scsi_level >= SCSI_2 &&
|
||||
@ -3060,7 +3038,6 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
|
||||
DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
|
||||
*block, *partition));
|
||||
}
|
||||
out:
|
||||
st_release_request(SRpnt);
|
||||
SRpnt = NULL;
|
||||
|
||||
@ -3135,14 +3112,10 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
|
||||
timeout = STp->device->request_queue->rq_timeout;
|
||||
}
|
||||
|
||||
SRpnt = st_allocate_request(STp);
|
||||
SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
|
||||
timeout, MAX_READY_RETRIES, 1);
|
||||
if (!SRpnt)
|
||||
return STp->buffer->syscall_result;
|
||||
|
||||
result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
|
||||
timeout, MAX_READY_RETRIES);
|
||||
if (result)
|
||||
goto out;
|
||||
return (STp->buffer)->syscall_result;
|
||||
|
||||
STps->drv_block = STps->drv_file = (-1);
|
||||
STps->eof = ST_NOEOF;
|
||||
@ -3167,7 +3140,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
|
||||
STps->drv_block = STps->drv_file = 0;
|
||||
result = 0;
|
||||
}
|
||||
out:
|
||||
|
||||
st_release_request(SRpnt);
|
||||
SRpnt = NULL;
|
||||
|
||||
@ -3696,38 +3669,34 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
|
||||
|
||||
/* Try to allocate a new tape buffer. Calling function must not hold
|
||||
dev_arr_lock. */
|
||||
static struct st_buffer *
|
||||
new_tape_buffer(int from_initialization, int need_dma, int max_sg)
|
||||
static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
|
||||
{
|
||||
int i, got = 0;
|
||||
gfp_t priority;
|
||||
struct st_buffer *tb;
|
||||
|
||||
if (from_initialization)
|
||||
priority = GFP_ATOMIC;
|
||||
else
|
||||
priority = GFP_KERNEL;
|
||||
|
||||
i = sizeof(struct st_buffer) + (max_sg - 1) * sizeof(struct scatterlist) +
|
||||
max_sg * sizeof(struct st_buf_fragment);
|
||||
tb = kzalloc(i, priority);
|
||||
tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC);
|
||||
if (!tb) {
|
||||
printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
|
||||
return NULL;
|
||||
}
|
||||
tb->frp_segs = tb->orig_frp_segs = 0;
|
||||
tb->frp_segs = 0;
|
||||
tb->use_sg = max_sg;
|
||||
tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
|
||||
|
||||
tb->dma = need_dma;
|
||||
tb->buffer_size = got;
|
||||
sg_init_table(tb->sg, max_sg);
|
||||
tb->buffer_size = 0;
|
||||
|
||||
tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
|
||||
GFP_ATOMIC);
|
||||
if (!tb->reserved_pages) {
|
||||
kfree(tb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tb;
|
||||
}
|
||||
|
||||
|
||||
/* Try to allocate enough space in the tape buffer */
|
||||
#define ST_MAX_ORDER 6
|
||||
|
||||
static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
|
||||
{
|
||||
int segs, nbr, max_segs, b_size, order, got;
|
||||
@ -3747,33 +3716,45 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
|
||||
priority = GFP_KERNEL | __GFP_NOWARN;
|
||||
if (need_dma)
|
||||
priority |= GFP_DMA;
|
||||
for (b_size = PAGE_SIZE, order=0; order <= 6 &&
|
||||
b_size < new_size - STbuffer->buffer_size;
|
||||
order++, b_size *= 2)
|
||||
; /* empty */
|
||||
|
||||
if (STbuffer->cleared)
|
||||
priority |= __GFP_ZERO;
|
||||
|
||||
if (STbuffer->frp_segs) {
|
||||
order = STbuffer->map_data.page_order;
|
||||
b_size = PAGE_SIZE << order;
|
||||
} else {
|
||||
for (b_size = PAGE_SIZE, order = 0;
|
||||
order < ST_MAX_ORDER && b_size < new_size;
|
||||
order++, b_size *= 2)
|
||||
; /* empty */
|
||||
}
|
||||
if (max_segs * (PAGE_SIZE << order) < new_size) {
|
||||
if (order == ST_MAX_ORDER)
|
||||
return 0;
|
||||
normalize_buffer(STbuffer);
|
||||
return enlarge_buffer(STbuffer, new_size, need_dma);
|
||||
}
|
||||
|
||||
for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
|
||||
segs < max_segs && got < new_size;) {
|
||||
STbuffer->frp[segs].page = alloc_pages(priority, order);
|
||||
if (STbuffer->frp[segs].page == NULL) {
|
||||
if (new_size - got <= (max_segs - segs) * b_size / 2) {
|
||||
b_size /= 2; /* Large enough for the rest of the buffers */
|
||||
order--;
|
||||
continue;
|
||||
}
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(priority, order);
|
||||
if (!page) {
|
||||
DEB(STbuffer->buffer_size = got);
|
||||
normalize_buffer(STbuffer);
|
||||
return 0;
|
||||
}
|
||||
STbuffer->frp[segs].length = b_size;
|
||||
|
||||
STbuffer->frp_segs += 1;
|
||||
got += b_size;
|
||||
STbuffer->buffer_size = got;
|
||||
if (STbuffer->cleared)
|
||||
memset(page_address(STbuffer->frp[segs].page), 0, b_size);
|
||||
STbuffer->reserved_pages[segs] = page;
|
||||
segs++;
|
||||
}
|
||||
STbuffer->b_data = page_address(STbuffer->frp[0].page);
|
||||
STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
|
||||
STbuffer->map_data.page_order = order;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -3785,7 +3766,8 @@ static void clear_buffer(struct st_buffer * st_bp)
|
||||
int i;
|
||||
|
||||
for (i=0; i < st_bp->frp_segs; i++)
|
||||
memset(page_address(st_bp->frp[i].page), 0, st_bp->frp[i].length);
|
||||
memset(page_address(st_bp->reserved_pages[i]), 0,
|
||||
PAGE_SIZE << st_bp->map_data.page_order);
|
||||
st_bp->cleared = 1;
|
||||
}
|
||||
|
||||
@ -3793,16 +3775,16 @@ static void clear_buffer(struct st_buffer * st_bp)
|
||||
/* Release the extra buffer */
|
||||
static void normalize_buffer(struct st_buffer * STbuffer)
|
||||
{
|
||||
int i, order;
|
||||
int i, order = STbuffer->map_data.page_order;
|
||||
|
||||
for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) {
|
||||
order = get_order(STbuffer->frp[i].length);
|
||||
__free_pages(STbuffer->frp[i].page, order);
|
||||
STbuffer->buffer_size -= STbuffer->frp[i].length;
|
||||
for (i = 0; i < STbuffer->frp_segs; i++) {
|
||||
__free_pages(STbuffer->reserved_pages[i], order);
|
||||
STbuffer->buffer_size -= (PAGE_SIZE << order);
|
||||
}
|
||||
STbuffer->frp_segs = STbuffer->orig_frp_segs;
|
||||
STbuffer->frp_sg_current = 0;
|
||||
STbuffer->frp_segs = 0;
|
||||
STbuffer->sg_segs = 0;
|
||||
STbuffer->map_data.page_order = 0;
|
||||
STbuffer->map_data.offset = 0;
|
||||
}
|
||||
|
||||
|
||||
@ -3811,18 +3793,19 @@ static void normalize_buffer(struct st_buffer * STbuffer)
|
||||
static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
|
||||
{
|
||||
int i, cnt, res, offset;
|
||||
int length = PAGE_SIZE << st_bp->map_data.page_order;
|
||||
|
||||
for (i = 0, offset = st_bp->buffer_bytes;
|
||||
i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++)
|
||||
offset -= st_bp->frp[i].length;
|
||||
i < st_bp->frp_segs && offset >= length; i++)
|
||||
offset -= length;
|
||||
if (i == st_bp->frp_segs) { /* Should never happen */
|
||||
printk(KERN_WARNING "st: append_to_buffer offset overflow.\n");
|
||||
return (-EIO);
|
||||
}
|
||||
for (; i < st_bp->frp_segs && do_count > 0; i++) {
|
||||
cnt = st_bp->frp[i].length - offset < do_count ?
|
||||
st_bp->frp[i].length - offset : do_count;
|
||||
res = copy_from_user(page_address(st_bp->frp[i].page) + offset, ubp, cnt);
|
||||
struct page *page = st_bp->reserved_pages[i];
|
||||
cnt = length - offset < do_count ? length - offset : do_count;
|
||||
res = copy_from_user(page_address(page) + offset, ubp, cnt);
|
||||
if (res)
|
||||
return (-EFAULT);
|
||||
do_count -= cnt;
|
||||
@ -3842,18 +3825,19 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
|
||||
static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
|
||||
{
|
||||
int i, cnt, res, offset;
|
||||
int length = PAGE_SIZE << st_bp->map_data.page_order;
|
||||
|
||||
for (i = 0, offset = st_bp->read_pointer;
|
||||
i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++)
|
||||
offset -= st_bp->frp[i].length;
|
||||
i < st_bp->frp_segs && offset >= length; i++)
|
||||
offset -= length;
|
||||
if (i == st_bp->frp_segs) { /* Should never happen */
|
||||
printk(KERN_WARNING "st: from_buffer offset overflow.\n");
|
||||
return (-EIO);
|
||||
}
|
||||
for (; i < st_bp->frp_segs && do_count > 0; i++) {
|
||||
cnt = st_bp->frp[i].length - offset < do_count ?
|
||||
st_bp->frp[i].length - offset : do_count;
|
||||
res = copy_to_user(ubp, page_address(st_bp->frp[i].page) + offset, cnt);
|
||||
struct page *page = st_bp->reserved_pages[i];
|
||||
cnt = length - offset < do_count ? length - offset : do_count;
|
||||
res = copy_to_user(ubp, page_address(page) + offset, cnt);
|
||||
if (res)
|
||||
return (-EFAULT);
|
||||
do_count -= cnt;
|
||||
@ -3874,6 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
|
||||
{
|
||||
int src_seg, dst_seg, src_offset = 0, dst_offset;
|
||||
int count, total;
|
||||
int length = PAGE_SIZE << st_bp->map_data.page_order;
|
||||
|
||||
if (offset == 0)
|
||||
return;
|
||||
@ -3881,24 +3866,26 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
|
||||
total=st_bp->buffer_bytes - offset;
|
||||
for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) {
|
||||
src_offset = offset;
|
||||
if (src_offset < st_bp->frp[src_seg].length)
|
||||
if (src_offset < length)
|
||||
break;
|
||||
offset -= st_bp->frp[src_seg].length;
|
||||
offset -= length;
|
||||
}
|
||||
|
||||
st_bp->buffer_bytes = st_bp->read_pointer = total;
|
||||
for (dst_seg=dst_offset=0; total > 0; ) {
|
||||
count = min(st_bp->frp[dst_seg].length - dst_offset,
|
||||
st_bp->frp[src_seg].length - src_offset);
|
||||
memmove(page_address(st_bp->frp[dst_seg].page) + dst_offset,
|
||||
page_address(st_bp->frp[src_seg].page) + src_offset, count);
|
||||
struct page *dpage = st_bp->reserved_pages[dst_seg];
|
||||
struct page *spage = st_bp->reserved_pages[src_seg];
|
||||
|
||||
count = min(length - dst_offset, length - src_offset);
|
||||
memmove(page_address(dpage) + dst_offset,
|
||||
page_address(spage) + src_offset, count);
|
||||
src_offset += count;
|
||||
if (src_offset >= st_bp->frp[src_seg].length) {
|
||||
if (src_offset >= length) {
|
||||
src_seg++;
|
||||
src_offset = 0;
|
||||
}
|
||||
dst_offset += count;
|
||||
if (dst_offset >= st_bp->frp[dst_seg].length) {
|
||||
if (dst_offset >= length) {
|
||||
dst_seg++;
|
||||
dst_offset = 0;
|
||||
}
|
||||
@ -3906,32 +3893,6 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Fill the s/g list up to the length required for this transfer */
|
||||
static void buf_to_sg(struct st_buffer *STbp, unsigned int length)
|
||||
{
|
||||
int i;
|
||||
unsigned int count;
|
||||
struct scatterlist *sg;
|
||||
struct st_buf_fragment *frp;
|
||||
|
||||
if (length == STbp->frp_sg_current)
|
||||
return; /* work already done */
|
||||
|
||||
sg = &(STbp->sg[0]);
|
||||
frp = STbp->frp;
|
||||
for (i=count=0; count < length; i++) {
|
||||
if (length - count > frp[i].length)
|
||||
sg_set_page(&sg[i], frp[i].page, frp[i].length, 0);
|
||||
else
|
||||
sg_set_page(&sg[i], frp[i].page, length - count, 0);
|
||||
count += sg[i].length;
|
||||
}
|
||||
STbp->sg_segs = i;
|
||||
STbp->frp_sg_current = length;
|
||||
}
|
||||
|
||||
|
||||
/* Validate the options from command line or module parameters */
|
||||
static void validate_options(void)
|
||||
{
|
||||
@ -4026,7 +3987,7 @@ static int st_probe(struct device *dev)
|
||||
SDp->request_queue->max_phys_segments);
|
||||
if (st_max_sg_segs < i)
|
||||
i = st_max_sg_segs;
|
||||
buffer = new_tape_buffer(1, (SDp->host)->unchecked_isa_dma, i);
|
||||
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
|
||||
if (buffer == NULL) {
|
||||
printk(KERN_ERR
|
||||
"st: Can't allocate new tape buffer. Device not attached.\n");
|
||||
@ -4280,8 +4241,8 @@ static void scsi_tape_release(struct kref *kref)
|
||||
tpnt->device = NULL;
|
||||
|
||||
if (tpnt->buffer) {
|
||||
tpnt->buffer->orig_frp_segs = 0;
|
||||
normalize_buffer(tpnt->buffer);
|
||||
kfree(tpnt->buffer->reserved_pages);
|
||||
kfree(tpnt->buffer);
|
||||
}
|
||||
|
||||
@ -4567,14 +4528,16 @@ out:
|
||||
}
|
||||
|
||||
/* The following functions may be useful for a larger audience. */
|
||||
static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
|
||||
unsigned long uaddr, size_t count, int rw)
|
||||
static int sgl_map_user_pages(struct st_buffer *STbp,
|
||||
const unsigned int max_pages, unsigned long uaddr,
|
||||
size_t count, int rw)
|
||||
{
|
||||
unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned long start = uaddr >> PAGE_SHIFT;
|
||||
const int nr_pages = end - start;
|
||||
int res, i, j;
|
||||
struct page **pages;
|
||||
struct rq_map_data *mdata = &STbp->map_data;
|
||||
|
||||
/* User attempted Overflow! */
|
||||
if ((uaddr + count) < uaddr)
|
||||
@ -4616,24 +4579,11 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
|
||||
flush_dcache_page(pages[i]);
|
||||
}
|
||||
|
||||
/* Populate the scatter/gather list */
|
||||
sg_set_page(&sgl[0], pages[0], 0, uaddr & ~PAGE_MASK);
|
||||
if (nr_pages > 1) {
|
||||
sgl[0].length = PAGE_SIZE - sgl[0].offset;
|
||||
count -= sgl[0].length;
|
||||
for (i=1; i < nr_pages ; i++) {
|
||||
sg_set_page(&sgl[i], pages[i],
|
||||
count < PAGE_SIZE ? count : PAGE_SIZE, 0);;
|
||||
count -= PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
else {
|
||||
sgl[0].length = count;
|
||||
}
|
||||
mdata->offset = uaddr & ~PAGE_MASK;
|
||||
mdata->page_order = 0;
|
||||
STbp->mapped_pages = pages;
|
||||
|
||||
kfree(pages);
|
||||
return nr_pages;
|
||||
|
||||
out_unmap:
|
||||
if (res > 0) {
|
||||
for (j=0; j < res; j++)
|
||||
@ -4646,13 +4596,13 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
|
||||
|
||||
|
||||
/* And unmap them... */
|
||||
static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
|
||||
int dirtied)
|
||||
static int sgl_unmap_user_pages(struct st_buffer *STbp,
|
||||
const unsigned int nr_pages, int dirtied)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i < nr_pages; i++) {
|
||||
struct page *page = sg_page(&sgl[i]);
|
||||
struct page *page = STbp->mapped_pages[i];
|
||||
|
||||
if (dirtied)
|
||||
SetPageDirty(page);
|
||||
@ -4661,6 +4611,8 @@ static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_p
|
||||
*/
|
||||
page_cache_release(page);
|
||||
}
|
||||
kfree(STbp->mapped_pages);
|
||||
STbp->mapped_pages = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ struct st_request {
|
||||
int result;
|
||||
struct scsi_tape *stp;
|
||||
struct completion *waiting;
|
||||
struct bio *bio;
|
||||
};
|
||||
|
||||
/* The tape buffer descriptor. */
|
||||
@ -44,20 +45,13 @@ struct st_buffer {
|
||||
int syscall_result;
|
||||
struct st_request *last_SRpnt;
|
||||
struct st_cmdstatus cmdstat;
|
||||
struct page **reserved_pages;
|
||||
struct page **mapped_pages;
|
||||
struct rq_map_data map_data;
|
||||
unsigned char *b_data;
|
||||
unsigned short use_sg; /* zero or max number of s/g segments for this adapter */
|
||||
unsigned short sg_segs; /* number of segments in s/g list */
|
||||
unsigned short orig_frp_segs; /* number of segments allocated at first try */
|
||||
unsigned short frp_segs; /* number of buffer segments */
|
||||
unsigned int frp_sg_current; /* driver buffer length currently in s/g list */
|
||||
struct st_buf_fragment *frp; /* the allocated buffer fragment list */
|
||||
struct scatterlist sg[1]; /* MUST BE last item */
|
||||
};
|
||||
|
||||
/* The tape buffer fragment descriptor */
|
||||
struct st_buf_fragment {
|
||||
struct page *page;
|
||||
unsigned int length;
|
||||
};
|
||||
|
||||
/* The tape mode definition */
|
||||
|
@ -137,8 +137,8 @@ zalon_probe(struct parisc_device *dev)
|
||||
goto fail;
|
||||
|
||||
if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
|
||||
printk(KERN_ERR "%s: irq problem with %d, detaching\n ",
|
||||
dev->dev.bus_id, dev->irq);
|
||||
dev_printk(KERN_ERR, dev, "irq problem with %d, detaching\n ",
|
||||
dev->irq);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
38
fs/bio.c
38
fs/bio.c
@ -788,6 +788,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
||||
int i, ret;
|
||||
int nr_pages = 0;
|
||||
unsigned int len = 0;
|
||||
unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
|
||||
|
||||
for (i = 0; i < iov_count; i++) {
|
||||
unsigned long uaddr;
|
||||
@ -814,35 +815,42 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
||||
bio->bi_rw |= (!write_to_vm << BIO_RW);
|
||||
|
||||
ret = 0;
|
||||
i = 0;
|
||||
while (len) {
|
||||
unsigned int bytes;
|
||||
|
||||
if (map_data)
|
||||
bytes = 1U << (PAGE_SHIFT + map_data->page_order);
|
||||
else
|
||||
bytes = PAGE_SIZE;
|
||||
if (map_data) {
|
||||
nr_pages = 1 << map_data->page_order;
|
||||
i = map_data->offset / PAGE_SIZE;
|
||||
}
|
||||
while (len) {
|
||||
unsigned int bytes = PAGE_SIZE;
|
||||
|
||||
bytes -= offset;
|
||||
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
|
||||
if (map_data) {
|
||||
if (i == map_data->nr_entries) {
|
||||
if (i == map_data->nr_entries * nr_pages) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
page = map_data->pages[i++];
|
||||
} else
|
||||
|
||||
page = map_data->pages[i / nr_pages];
|
||||
page += (i % nr_pages);
|
||||
|
||||
i++;
|
||||
} else {
|
||||
page = alloc_page(q->bounce_gfp | gfp_mask);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
|
||||
if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
|
||||
break;
|
||||
|
||||
len -= bytes;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
@ -851,7 +859,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
||||
/*
|
||||
* success
|
||||
*/
|
||||
if (!write_to_vm) {
|
||||
if (!write_to_vm && (!map_data || !map_data->null_mapped)) {
|
||||
ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
@ -690,6 +690,8 @@ struct rq_map_data {
|
||||
struct page **pages;
|
||||
int page_order;
|
||||
int nr_entries;
|
||||
unsigned long offset;
|
||||
int null_mapped;
|
||||
};
|
||||
|
||||
struct req_iterator {
|
||||
|
@ -358,6 +358,7 @@ struct fc_rport { /* aka fc_starget_attrs */
|
||||
#define FC_RPORT_DEVLOSS_PENDING 0x01
|
||||
#define FC_RPORT_SCAN_PENDING 0x02
|
||||
#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x04
|
||||
#define FC_RPORT_DEVLOSS_CALLBK_DONE 0x08
|
||||
|
||||
#define dev_to_rport(d) \
|
||||
container_of(d, struct fc_rport, dev)
|
||||
|
Loading…
Reference in New Issue
Block a user