mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-15 00:54:03 +08:00
scsi: smartpqi: add suspend and resume support
add support for ACPI S3 (suspend) and S4 (hibernate) system power states. Reviewed-by: Scott Benesh <scott.benesh@microsemi.com> Signed-off-by: Kevin Barnett <kevin.barnett@microsemi.com> Signed-off-by: Don Brace <don.brace@microsemi.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
7561a7e441
commit
061ef06a2d
@ -61,7 +61,7 @@ struct pqi_device_registers {
|
|||||||
/*
|
/*
|
||||||
* controller registers
|
* controller registers
|
||||||
*
|
*
|
||||||
* These are defined by the PMC implementation.
|
* These are defined by the Microsemi implementation.
|
||||||
*
|
*
|
||||||
* Some registers (those named sis_*) are only used when in
|
* Some registers (those named sis_*) are only used when in
|
||||||
* legacy SIS mode before we transition the controller into
|
* legacy SIS mode before we transition the controller into
|
||||||
@ -102,6 +102,12 @@ enum pqi_io_path {
|
|||||||
AIO_PATH = 1
|
AIO_PATH = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum pqi_irq_mode {
|
||||||
|
IRQ_MODE_NONE,
|
||||||
|
IRQ_MODE_INTX,
|
||||||
|
IRQ_MODE_MSIX
|
||||||
|
};
|
||||||
|
|
||||||
struct pqi_sg_descriptor {
|
struct pqi_sg_descriptor {
|
||||||
__le64 address;
|
__le64 address;
|
||||||
__le32 length;
|
__le32 length;
|
||||||
@ -908,7 +914,7 @@ struct pqi_ctrl_info {
|
|||||||
dma_addr_t error_buffer_dma_handle;
|
dma_addr_t error_buffer_dma_handle;
|
||||||
size_t sg_chain_buffer_length;
|
size_t sg_chain_buffer_length;
|
||||||
unsigned int num_queue_groups;
|
unsigned int num_queue_groups;
|
||||||
unsigned int num_active_queue_groups;
|
u16 max_hw_queue_index;
|
||||||
u16 num_elements_per_iq;
|
u16 num_elements_per_iq;
|
||||||
u16 num_elements_per_oq;
|
u16 num_elements_per_oq;
|
||||||
u16 max_inbound_iu_length_per_firmware;
|
u16 max_inbound_iu_length_per_firmware;
|
||||||
@ -923,6 +929,7 @@ struct pqi_ctrl_info {
|
|||||||
struct pqi_admin_queues admin_queues;
|
struct pqi_admin_queues admin_queues;
|
||||||
struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
|
struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
|
||||||
struct pqi_event_queue event_queue;
|
struct pqi_event_queue event_queue;
|
||||||
|
enum pqi_irq_mode irq_mode;
|
||||||
int max_msix_vectors;
|
int max_msix_vectors;
|
||||||
int num_msix_vectors_enabled;
|
int num_msix_vectors_enabled;
|
||||||
int num_msix_vectors_initialized;
|
int num_msix_vectors_initialized;
|
||||||
@ -937,6 +944,7 @@ struct pqi_ctrl_info {
|
|||||||
u8 outbound_spanning_supported : 1;
|
u8 outbound_spanning_supported : 1;
|
||||||
u8 pqi_mode_enabled : 1;
|
u8 pqi_mode_enabled : 1;
|
||||||
u8 heartbeat_timer_started : 1;
|
u8 heartbeat_timer_started : 1;
|
||||||
|
u8 update_time_worker_scheduled : 1;
|
||||||
|
|
||||||
struct list_head scsi_device_list;
|
struct list_head scsi_device_list;
|
||||||
spinlock_t scsi_device_list_lock;
|
spinlock_t scsi_device_list_lock;
|
||||||
|
@ -262,6 +262,11 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
|
|||||||
PQI_RESCAN_WORK_INTERVAL);
|
PQI_RESCAN_WORK_INTERVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
cancel_delayed_work_sync(&ctrl_info->rescan_work);
|
||||||
|
}
|
||||||
|
|
||||||
static int pqi_map_single(struct pci_dev *pci_dev,
|
static int pqi_map_single(struct pci_dev *pci_dev,
|
||||||
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
|
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
|
||||||
size_t buffer_length, int data_direction)
|
size_t buffer_length, int data_direction)
|
||||||
@ -588,7 +593,7 @@ static int pqi_write_driver_version_to_host_wellness(
|
|||||||
buffer->driver_version_tag[1] = 'V';
|
buffer->driver_version_tag[1] = 'V';
|
||||||
put_unaligned_le16(sizeof(buffer->driver_version),
|
put_unaligned_le16(sizeof(buffer->driver_version),
|
||||||
&buffer->driver_version_length);
|
&buffer->driver_version_length);
|
||||||
strncpy(buffer->driver_version, DRIVER_VERSION,
|
strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
|
||||||
sizeof(buffer->driver_version) - 1);
|
sizeof(buffer->driver_version) - 1);
|
||||||
buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
|
buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
|
||||||
buffer->end_tag[0] = 'Z';
|
buffer->end_tag[0] = 'Z';
|
||||||
@ -686,7 +691,21 @@ static void pqi_update_time_worker(struct work_struct *work)
|
|||||||
static inline void pqi_schedule_update_time_worker(
|
static inline void pqi_schedule_update_time_worker(
|
||||||
struct pqi_ctrl_info *ctrl_info)
|
struct pqi_ctrl_info *ctrl_info)
|
||||||
{
|
{
|
||||||
|
if (ctrl_info->update_time_worker_scheduled)
|
||||||
|
return;
|
||||||
|
|
||||||
schedule_delayed_work(&ctrl_info->update_time_work, 0);
|
schedule_delayed_work(&ctrl_info->update_time_work, 0);
|
||||||
|
ctrl_info->update_time_worker_scheduled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void pqi_cancel_update_time_worker(
|
||||||
|
struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
if (!ctrl_info->update_time_worker_scheduled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cancel_delayed_work_sync(&ctrl_info->update_time_work);
|
||||||
|
ctrl_info->update_time_worker_scheduled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
|
static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
|
||||||
@ -1967,6 +1986,18 @@ static int pqi_scan_finished(struct Scsi_Host *shost,
|
|||||||
return !mutex_is_locked(&ctrl_info->scan_mutex);
|
return !mutex_is_locked(&ctrl_info->scan_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
mutex_lock(&ctrl_info->scan_mutex);
|
||||||
|
mutex_unlock(&ctrl_info->scan_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
mutex_lock(&ctrl_info->lun_reset_mutex);
|
||||||
|
mutex_unlock(&ctrl_info->lun_reset_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void pqi_set_encryption_info(
|
static inline void pqi_set_encryption_info(
|
||||||
struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
|
struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
|
||||||
u64 first_block)
|
u64 first_block)
|
||||||
@ -2825,6 +2856,9 @@ static void pqi_heartbeat_timer_handler(unsigned long data)
|
|||||||
int num_interrupts;
|
int num_interrupts;
|
||||||
struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
|
struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
|
||||||
|
|
||||||
|
if (!ctrl_info->heartbeat_timer_started)
|
||||||
|
return;
|
||||||
|
|
||||||
num_interrupts = atomic_read(&ctrl_info->num_interrupts);
|
num_interrupts = atomic_read(&ctrl_info->num_interrupts);
|
||||||
|
|
||||||
if (num_interrupts == ctrl_info->previous_num_interrupts) {
|
if (num_interrupts == ctrl_info->previous_num_interrupts) {
|
||||||
@ -2855,14 +2889,16 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
|
|||||||
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
|
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
|
||||||
ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
|
ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
|
||||||
ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
|
ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
|
||||||
add_timer(&ctrl_info->heartbeat_timer);
|
|
||||||
ctrl_info->heartbeat_timer_started = true;
|
ctrl_info->heartbeat_timer_started = true;
|
||||||
|
add_timer(&ctrl_info->heartbeat_timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
|
static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
|
||||||
{
|
{
|
||||||
if (ctrl_info->heartbeat_timer_started)
|
if (ctrl_info->heartbeat_timer_started) {
|
||||||
|
ctrl_info->heartbeat_timer_started = false;
|
||||||
del_timer_sync(&ctrl_info->heartbeat_timer);
|
del_timer_sync(&ctrl_info->heartbeat_timer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int pqi_event_type_to_event_index(unsigned int event_type)
|
static inline int pqi_event_type_to_event_index(unsigned int event_type)
|
||||||
@ -2938,6 +2974,106 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
|
|||||||
return num_events;
|
return num_events;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define PQI_LEGACY_INTX_MASK 0x1
|
||||||
|
|
||||||
|
static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
|
||||||
|
bool enable_intx)
|
||||||
|
{
|
||||||
|
u32 intx_mask;
|
||||||
|
struct pqi_device_registers __iomem *pqi_registers;
|
||||||
|
volatile void __iomem *register_addr;
|
||||||
|
|
||||||
|
pqi_registers = ctrl_info->pqi_registers;
|
||||||
|
|
||||||
|
if (enable_intx)
|
||||||
|
register_addr = &pqi_registers->legacy_intx_mask_clear;
|
||||||
|
else
|
||||||
|
register_addr = &pqi_registers->legacy_intx_mask_set;
|
||||||
|
|
||||||
|
intx_mask = readl(register_addr);
|
||||||
|
intx_mask |= PQI_LEGACY_INTX_MASK;
|
||||||
|
writel(intx_mask, register_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
|
||||||
|
enum pqi_irq_mode new_mode)
|
||||||
|
{
|
||||||
|
switch (ctrl_info->irq_mode) {
|
||||||
|
case IRQ_MODE_MSIX:
|
||||||
|
switch (new_mode) {
|
||||||
|
case IRQ_MODE_MSIX:
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_INTX:
|
||||||
|
pqi_configure_legacy_intx(ctrl_info, true);
|
||||||
|
sis_disable_msix(ctrl_info);
|
||||||
|
sis_enable_intx(ctrl_info);
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_NONE:
|
||||||
|
sis_disable_msix(ctrl_info);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_INTX:
|
||||||
|
switch (new_mode) {
|
||||||
|
case IRQ_MODE_MSIX:
|
||||||
|
pqi_configure_legacy_intx(ctrl_info, false);
|
||||||
|
sis_disable_intx(ctrl_info);
|
||||||
|
sis_enable_msix(ctrl_info);
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_INTX:
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_NONE:
|
||||||
|
pqi_configure_legacy_intx(ctrl_info, false);
|
||||||
|
sis_disable_intx(ctrl_info);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_NONE:
|
||||||
|
switch (new_mode) {
|
||||||
|
case IRQ_MODE_MSIX:
|
||||||
|
sis_enable_msix(ctrl_info);
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_INTX:
|
||||||
|
pqi_configure_legacy_intx(ctrl_info, true);
|
||||||
|
sis_enable_intx(ctrl_info);
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_NONE:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrl_info->irq_mode = new_mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define PQI_LEGACY_INTX_PENDING 0x1
|
||||||
|
|
||||||
|
static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
bool valid_irq;
|
||||||
|
u32 intx_status;
|
||||||
|
|
||||||
|
switch (ctrl_info->irq_mode) {
|
||||||
|
case IRQ_MODE_MSIX:
|
||||||
|
valid_irq = true;
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_INTX:
|
||||||
|
intx_status =
|
||||||
|
readl(&ctrl_info->pqi_registers->legacy_intx_status);
|
||||||
|
if (intx_status & PQI_LEGACY_INTX_PENDING)
|
||||||
|
valid_irq = true;
|
||||||
|
else
|
||||||
|
valid_irq = false;
|
||||||
|
break;
|
||||||
|
case IRQ_MODE_NONE:
|
||||||
|
default:
|
||||||
|
valid_irq = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return valid_irq;
|
||||||
|
}
|
||||||
|
|
||||||
static irqreturn_t pqi_irq_handler(int irq, void *data)
|
static irqreturn_t pqi_irq_handler(int irq, void *data)
|
||||||
{
|
{
|
||||||
struct pqi_ctrl_info *ctrl_info;
|
struct pqi_ctrl_info *ctrl_info;
|
||||||
@ -2947,7 +3083,7 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
|
|||||||
queue_group = data;
|
queue_group = data;
|
||||||
ctrl_info = queue_group->ctrl_info;
|
ctrl_info = queue_group->ctrl_info;
|
||||||
|
|
||||||
if (!ctrl_info || !queue_group->oq_ci)
|
if (!pqi_is_valid_irq(ctrl_info))
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
|
num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
|
||||||
@ -3013,7 +3149,7 @@ static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
|
ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
|
||||||
|
ctrl_info->irq_mode = IRQ_MODE_MSIX;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3798,16 +3934,15 @@ static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
|
static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
|
||||||
|
unsigned int group_number)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
int rc;
|
int rc;
|
||||||
struct pqi_queue_group *queue_group;
|
struct pqi_queue_group *queue_group;
|
||||||
struct pqi_general_admin_request request;
|
struct pqi_general_admin_request request;
|
||||||
struct pqi_general_admin_response response;
|
struct pqi_general_admin_response response;
|
||||||
|
|
||||||
i = ctrl_info->num_active_queue_groups;
|
queue_group = &ctrl_info->queue_groups[group_number];
|
||||||
queue_group = &ctrl_info->queue_groups[i];
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create IQ (Inbound Queue - host to device queue) for
|
* Create IQ (Inbound Queue - host to device queue) for
|
||||||
@ -3937,8 +4072,6 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
|
|||||||
get_unaligned_le64(
|
get_unaligned_le64(
|
||||||
&response.data.create_operational_oq.oq_ci_offset);
|
&response.data.create_operational_oq.oq_ci_offset);
|
||||||
|
|
||||||
ctrl_info->num_active_queue_groups++;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
delete_inbound_queue_aio:
|
delete_inbound_queue_aio:
|
||||||
@ -3965,7 +4098,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
|
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
|
||||||
rc = pqi_create_queue_group(ctrl_info);
|
rc = pqi_create_queue_group(ctrl_info, i);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(&ctrl_info->pci_dev->dev,
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
"error creating queue group number %u/%u\n",
|
"error creating queue group number %u/%u\n",
|
||||||
@ -4219,6 +4352,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
|
|||||||
num_queue_groups = min(num_queue_groups, max_queue_groups);
|
num_queue_groups = min(num_queue_groups, max_queue_groups);
|
||||||
|
|
||||||
ctrl_info->num_queue_groups = num_queue_groups;
|
ctrl_info->num_queue_groups = num_queue_groups;
|
||||||
|
ctrl_info->max_hw_queue_index = num_queue_groups - 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure that the max. inbound IU length is an even multiple
|
* Make sure that the max. inbound IU length is an even multiple
|
||||||
@ -4591,6 +4725,18 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
|
||||||
|
struct scsi_cmnd *scmd)
|
||||||
|
{
|
||||||
|
u16 hw_queue;
|
||||||
|
|
||||||
|
hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
|
||||||
|
if (hw_queue > ctrl_info->max_hw_queue_index)
|
||||||
|
hw_queue = 0;
|
||||||
|
|
||||||
|
return hw_queue;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function gets called just before we hand the completed SCSI request
|
* This function gets called just before we hand the completed SCSI request
|
||||||
* back to the SML.
|
* back to the SML.
|
||||||
@ -4610,7 +4756,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
|
|||||||
int rc;
|
int rc;
|
||||||
struct pqi_ctrl_info *ctrl_info;
|
struct pqi_ctrl_info *ctrl_info;
|
||||||
struct pqi_scsi_dev *device;
|
struct pqi_scsi_dev *device;
|
||||||
u16 hwq;
|
u16 hw_queue;
|
||||||
struct pqi_queue_group *queue_group;
|
struct pqi_queue_group *queue_group;
|
||||||
bool raid_bypassed;
|
bool raid_bypassed;
|
||||||
|
|
||||||
@ -4637,11 +4783,8 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
|
|||||||
*/
|
*/
|
||||||
scmd->result = 0;
|
scmd->result = 0;
|
||||||
|
|
||||||
hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
|
hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
|
||||||
if (hwq >= ctrl_info->num_queue_groups)
|
queue_group = &ctrl_info->queue_groups[hw_queue];
|
||||||
hwq = 0;
|
|
||||||
|
|
||||||
queue_group = &ctrl_info->queue_groups[hwq];
|
|
||||||
|
|
||||||
if (pqi_is_logical_device(device)) {
|
if (pqi_is_logical_device(device)) {
|
||||||
raid_bypassed = false;
|
raid_bypassed = false;
|
||||||
@ -4777,6 +4920,52 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
|
||||||
|
struct pqi_scsi_dev *device)
|
||||||
|
{
|
||||||
|
while (atomic_read(&device->scsi_cmds_outstanding)) {
|
||||||
|
pqi_check_ctrl_health(ctrl_info);
|
||||||
|
if (pqi_ctrl_offline(ctrl_info))
|
||||||
|
return -ENXIO;
|
||||||
|
usleep_range(1000, 2000);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
bool io_pending;
|
||||||
|
unsigned long flags;
|
||||||
|
struct pqi_scsi_dev *device;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
io_pending = false;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
|
||||||
|
list_for_each_entry(device, &ctrl_info->scsi_device_list,
|
||||||
|
scsi_device_list_entry) {
|
||||||
|
if (atomic_read(&device->scsi_cmds_outstanding)) {
|
||||||
|
io_pending = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
|
||||||
|
flags);
|
||||||
|
|
||||||
|
if (!io_pending)
|
||||||
|
break;
|
||||||
|
|
||||||
|
pqi_check_ctrl_health(ctrl_info);
|
||||||
|
if (pqi_ctrl_offline(ctrl_info))
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
usleep_range(1000, 2000);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
|
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
|
||||||
void *context)
|
void *context)
|
||||||
{
|
{
|
||||||
@ -4853,6 +5042,8 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
|
|||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = pqi_lun_reset(ctrl_info, device);
|
rc = pqi_lun_reset(ctrl_info, device);
|
||||||
|
if (rc == 0)
|
||||||
|
rc = pqi_device_wait_for_pending_io(ctrl_info, device);
|
||||||
|
|
||||||
return rc == 0 ? SUCCESS : FAILED;
|
return rc == 0 ? SUCCESS : FAILED;
|
||||||
}
|
}
|
||||||
@ -5487,7 +5678,7 @@ static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
|
|||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
sis_disable_msix(ctrl_info);
|
pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
|
||||||
rc = pqi_reset(ctrl_info);
|
rc = pqi_reset(ctrl_info);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
@ -5647,7 +5838,10 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
sis_enable_msix(ctrl_info);
|
pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
|
||||||
|
|
||||||
|
ctrl_info->controller_online = true;
|
||||||
|
pqi_start_heartbeat_timer(ctrl_info);
|
||||||
|
|
||||||
rc = pqi_enable_events(ctrl_info);
|
rc = pqi_enable_events(ctrl_info);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
@ -5656,10 +5850,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
pqi_start_heartbeat_timer(ctrl_info);
|
|
||||||
|
|
||||||
ctrl_info->controller_online = true;
|
|
||||||
|
|
||||||
/* Register with the SCSI subsystem. */
|
/* Register with the SCSI subsystem. */
|
||||||
rc = pqi_register_scsi(ctrl_info);
|
rc = pqi_register_scsi(ctrl_info);
|
||||||
if (rc)
|
if (rc)
|
||||||
@ -5686,6 +5876,116 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_PM)
|
||||||
|
|
||||||
|
static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
struct pqi_admin_queues *admin_queues;
|
||||||
|
struct pqi_event_queue *event_queue;
|
||||||
|
|
||||||
|
admin_queues = &ctrl_info->admin_queues;
|
||||||
|
admin_queues->iq_pi_copy = 0;
|
||||||
|
admin_queues->oq_ci_copy = 0;
|
||||||
|
*admin_queues->oq_pi = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
|
||||||
|
ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
|
||||||
|
ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
|
||||||
|
ctrl_info->queue_groups[i].oq_ci_copy = 0;
|
||||||
|
|
||||||
|
*ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
|
||||||
|
*ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
|
||||||
|
*ctrl_info->queue_groups[i].oq_pi = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
event_queue = &ctrl_info->event_queue;
|
||||||
|
*event_queue->oq_pi = 0;
|
||||||
|
event_queue->oq_ci_copy = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = pqi_force_sis_mode(ctrl_info);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait until the controller is ready to start accepting SIS
|
||||||
|
* commands.
|
||||||
|
*/
|
||||||
|
rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the function we are about to call succeeds, the
|
||||||
|
* controller will transition from legacy SIS mode
|
||||||
|
* into PQI mode.
|
||||||
|
*/
|
||||||
|
rc = sis_init_base_struct_addr(ctrl_info);
|
||||||
|
if (rc) {
|
||||||
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
|
"error initializing PQI mode\n");
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Wait for the controller to complete the SIS -> PQI transition. */
|
||||||
|
rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
|
||||||
|
if (rc) {
|
||||||
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
|
"transition to PQI mode failed\n");
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* From here on, we are running in PQI mode. */
|
||||||
|
ctrl_info->pqi_mode_enabled = true;
|
||||||
|
pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
|
||||||
|
|
||||||
|
pqi_reinit_queues(ctrl_info);
|
||||||
|
|
||||||
|
rc = pqi_create_admin_queues(ctrl_info);
|
||||||
|
if (rc) {
|
||||||
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
|
"error creating admin queues\n");
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = pqi_create_queues(ctrl_info);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
|
||||||
|
|
||||||
|
ctrl_info->controller_online = true;
|
||||||
|
pqi_start_heartbeat_timer(ctrl_info);
|
||||||
|
pqi_ctrl_unblock_requests(ctrl_info);
|
||||||
|
|
||||||
|
rc = pqi_enable_events(ctrl_info);
|
||||||
|
if (rc) {
|
||||||
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
|
"error configuring events\n");
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
|
||||||
|
if (rc) {
|
||||||
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
|
"error updating host wellness\n");
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
pqi_schedule_update_time_worker(ctrl_info);
|
||||||
|
|
||||||
|
pqi_scan_scsi_devices(ctrl_info);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
|
static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
|
||||||
u16 timeout)
|
u16 timeout)
|
||||||
{
|
{
|
||||||
@ -5796,6 +6096,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
|
|||||||
init_waitqueue_head(&ctrl_info->block_requests_wait);
|
init_waitqueue_head(&ctrl_info->block_requests_wait);
|
||||||
|
|
||||||
ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
|
ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
|
||||||
|
ctrl_info->irq_mode = IRQ_MODE_NONE;
|
||||||
ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
|
ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
|
||||||
|
|
||||||
return ctrl_info;
|
return ctrl_info;
|
||||||
@ -5839,8 +6140,8 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
|
|||||||
|
|
||||||
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
|
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
|
||||||
{
|
{
|
||||||
cancel_delayed_work_sync(&ctrl_info->rescan_work);
|
pqi_cancel_rescan_worker(ctrl_info);
|
||||||
cancel_delayed_work_sync(&ctrl_info->update_time_work);
|
pqi_cancel_update_time_worker(ctrl_info);
|
||||||
pqi_remove_all_scsi_devices(ctrl_info);
|
pqi_remove_all_scsi_devices(ctrl_info);
|
||||||
pqi_unregister_scsi(ctrl_info);
|
pqi_unregister_scsi(ctrl_info);
|
||||||
if (ctrl_info->pqi_mode_enabled)
|
if (ctrl_info->pqi_mode_enabled)
|
||||||
@ -5952,6 +6253,71 @@ error:
|
|||||||
"unable to flush controller cache\n");
|
"unable to flush controller cache\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_PM)
|
||||||
|
|
||||||
|
static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
|
||||||
|
{
|
||||||
|
struct pqi_ctrl_info *ctrl_info;
|
||||||
|
|
||||||
|
ctrl_info = pci_get_drvdata(pci_dev);
|
||||||
|
|
||||||
|
pqi_disable_events(ctrl_info);
|
||||||
|
pqi_cancel_update_time_worker(ctrl_info);
|
||||||
|
pqi_cancel_rescan_worker(ctrl_info);
|
||||||
|
pqi_wait_until_scan_finished(ctrl_info);
|
||||||
|
pqi_wait_until_lun_reset_finished(ctrl_info);
|
||||||
|
pqi_flush_cache(ctrl_info);
|
||||||
|
pqi_ctrl_block_requests(ctrl_info);
|
||||||
|
pqi_ctrl_wait_until_quiesced(ctrl_info);
|
||||||
|
pqi_wait_until_inbound_queues_empty(ctrl_info);
|
||||||
|
pqi_ctrl_wait_for_pending_io(ctrl_info);
|
||||||
|
pqi_stop_heartbeat_timer(ctrl_info);
|
||||||
|
|
||||||
|
if (state.event == PM_EVENT_FREEZE)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
pci_save_state(pci_dev);
|
||||||
|
pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
|
||||||
|
|
||||||
|
ctrl_info->controller_online = false;
|
||||||
|
ctrl_info->pqi_mode_enabled = false;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pqi_resume(struct pci_dev *pci_dev)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct pqi_ctrl_info *ctrl_info;
|
||||||
|
|
||||||
|
ctrl_info = pci_get_drvdata(pci_dev);
|
||||||
|
|
||||||
|
if (pci_dev->current_state != PCI_D0) {
|
||||||
|
ctrl_info->max_hw_queue_index = 0;
|
||||||
|
pqi_free_interrupts(ctrl_info);
|
||||||
|
pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
|
||||||
|
rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
|
||||||
|
IRQF_SHARED, DRIVER_NAME_SHORT,
|
||||||
|
&ctrl_info->queue_groups[0]);
|
||||||
|
if (rc) {
|
||||||
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
|
"irq %u init failed with error %d\n",
|
||||||
|
pci_dev->irq, rc);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
pqi_start_heartbeat_timer(ctrl_info);
|
||||||
|
pqi_ctrl_unblock_requests(ctrl_info);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_set_power_state(pci_dev, PCI_D0);
|
||||||
|
pci_restore_state(pci_dev);
|
||||||
|
|
||||||
|
return pqi_ctrl_init_resume(ctrl_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
/* Define the PCI IDs for the controllers that we support. */
|
/* Define the PCI IDs for the controllers that we support. */
|
||||||
static const struct pci_device_id pqi_pci_id_table[] = {
|
static const struct pci_device_id pqi_pci_id_table[] = {
|
||||||
{
|
{
|
||||||
@ -6093,6 +6459,10 @@ static struct pci_driver pqi_pci_driver = {
|
|||||||
.probe = pqi_pci_probe,
|
.probe = pqi_pci_probe,
|
||||||
.remove = pqi_pci_remove,
|
.remove = pqi_pci_remove,
|
||||||
.shutdown = pqi_shutdown,
|
.shutdown = pqi_shutdown,
|
||||||
|
#if defined(CONFIG_PM)
|
||||||
|
.suspend = pqi_suspend,
|
||||||
|
.resume = pqi_resume,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init pqi_init(void)
|
static int __init pqi_init(void)
|
||||||
@ -6458,6 +6828,9 @@ static void __attribute__((unused)) verify_structures(void)
|
|||||||
BUILD_BUG_ON(offsetof(struct pqi_event_config,
|
BUILD_BUG_ON(offsetof(struct pqi_event_config,
|
||||||
descriptors) != 4);
|
descriptors) != 4);
|
||||||
|
|
||||||
|
BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
|
||||||
|
ARRAY_SIZE(pqi_supported_event_types));
|
||||||
|
|
||||||
BUILD_BUG_ON(offsetof(struct pqi_event_response,
|
BUILD_BUG_ON(offsetof(struct pqi_event_response,
|
||||||
header.iu_type) != 0);
|
header.iu_type) != 0);
|
||||||
BUILD_BUG_ON(offsetof(struct pqi_event_response,
|
BUILD_BUG_ON(offsetof(struct pqi_event_response,
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
/* for submission of legacy SIS commands */
|
/* for submission of legacy SIS commands */
|
||||||
#define SIS_REENABLE_SIS_MODE 0x1
|
#define SIS_REENABLE_SIS_MODE 0x1
|
||||||
#define SIS_ENABLE_MSIX 0x40
|
#define SIS_ENABLE_MSIX 0x40
|
||||||
|
#define SIS_ENABLE_INTX 0x80
|
||||||
#define SIS_SOFT_RESET 0x100
|
#define SIS_SOFT_RESET 0x100
|
||||||
#define SIS_TRIGGER_SHUTDOWN 0x800000
|
#define SIS_TRIGGER_SHUTDOWN 0x800000
|
||||||
#define SIS_CMD_READY 0x200
|
#define SIS_CMD_READY 0x200
|
||||||
@ -56,6 +57,7 @@
|
|||||||
#define SIS_CTRL_KERNEL_UP 0x80
|
#define SIS_CTRL_KERNEL_UP 0x80
|
||||||
#define SIS_CTRL_KERNEL_PANIC 0x100
|
#define SIS_CTRL_KERNEL_PANIC 0x100
|
||||||
#define SIS_CTRL_READY_TIMEOUT_SECS 30
|
#define SIS_CTRL_READY_TIMEOUT_SECS 30
|
||||||
|
#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
|
||||||
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
|
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
|
||||||
|
|
||||||
#pragma pack(1)
|
#pragma pack(1)
|
||||||
@ -79,12 +81,13 @@ struct sis_base_struct {
|
|||||||
|
|
||||||
#pragma pack()
|
#pragma pack()
|
||||||
|
|
||||||
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
|
static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
|
||||||
|
unsigned int timeout_secs)
|
||||||
{
|
{
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
u32 status;
|
u32 status;
|
||||||
|
|
||||||
timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies;
|
timeout = (timeout_secs * HZ) + jiffies;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
status = readl(&ctrl_info->registers->sis_firmware_status);
|
status = readl(&ctrl_info->registers->sis_firmware_status);
|
||||||
@ -107,6 +110,18 @@ int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
|
||||||
|
SIS_CTRL_READY_TIMEOUT_SECS);
|
||||||
|
}
|
||||||
|
|
||||||
|
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
|
||||||
|
SIS_CTRL_READY_RESUME_TIMEOUT_SECS);
|
||||||
|
}
|
||||||
|
|
||||||
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
|
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
|
||||||
{
|
{
|
||||||
bool running;
|
bool running;
|
||||||
@ -315,6 +330,34 @@ out:
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS 30
|
||||||
|
|
||||||
|
static void sis_wait_for_doorbell_bit_to_clear(
|
||||||
|
struct pqi_ctrl_info *ctrl_info, u32 bit)
|
||||||
|
{
|
||||||
|
u32 doorbell_register;
|
||||||
|
unsigned long timeout;
|
||||||
|
|
||||||
|
timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
doorbell_register =
|
||||||
|
readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
||||||
|
if ((doorbell_register & bit) == 0)
|
||||||
|
break;
|
||||||
|
if (readl(&ctrl_info->registers->sis_firmware_status) &
|
||||||
|
SIS_CTRL_KERNEL_PANIC)
|
||||||
|
break;
|
||||||
|
if (time_after(jiffies, timeout)) {
|
||||||
|
dev_err(&ctrl_info->pci_dev->dev,
|
||||||
|
"doorbell register bit 0x%x not cleared\n",
|
||||||
|
bit);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
usleep_range(1000, 2000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Enable MSI-X interrupts on the controller. */
|
/* Enable MSI-X interrupts on the controller. */
|
||||||
|
|
||||||
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
|
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
|
||||||
@ -327,6 +370,8 @@ void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
|
|||||||
|
|
||||||
writel(doorbell_register,
|
writel(doorbell_register,
|
||||||
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
||||||
|
|
||||||
|
sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable MSI-X interrupts on the controller. */
|
/* Disable MSI-X interrupts on the controller. */
|
||||||
@ -343,6 +388,32 @@ void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
|
|||||||
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
u32 doorbell_register;
|
||||||
|
|
||||||
|
doorbell_register =
|
||||||
|
readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
||||||
|
doorbell_register |= SIS_ENABLE_INTX;
|
||||||
|
|
||||||
|
writel(doorbell_register,
|
||||||
|
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
||||||
|
|
||||||
|
sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX);
|
||||||
|
}
|
||||||
|
|
||||||
|
void sis_disable_intx(struct pqi_ctrl_info *ctrl_info)
|
||||||
|
{
|
||||||
|
u32 doorbell_register;
|
||||||
|
|
||||||
|
doorbell_register =
|
||||||
|
readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
||||||
|
doorbell_register &= ~SIS_ENABLE_INTX;
|
||||||
|
|
||||||
|
writel(doorbell_register,
|
||||||
|
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
|
||||||
|
}
|
||||||
|
|
||||||
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
|
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
|
||||||
{
|
{
|
||||||
writel(SIS_SOFT_RESET,
|
writel(SIS_SOFT_RESET,
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#define _SMARTPQI_SIS_H
|
#define _SMARTPQI_SIS_H
|
||||||
|
|
||||||
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
|
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
|
||||||
|
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info);
|
||||||
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
|
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
|
||||||
bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info);
|
bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info);
|
||||||
int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
|
int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
|
||||||
@ -27,6 +28,8 @@ int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
|
|||||||
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
|
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
|
||||||
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
|
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
|
||||||
void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
|
void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
|
||||||
|
void sis_enable_intx(struct pqi_ctrl_info *ctrl_info);
|
||||||
|
void sis_disable_intx(struct pqi_ctrl_info *ctrl_info);
|
||||||
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
|
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
|
||||||
void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
|
void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
|
||||||
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
|
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
|
||||||
|
Loading…
Reference in New Issue
Block a user