2006-04-02 17:51:53 +08:00
|
|
|
/*
|
|
|
|
* libata-eh.c - libata error handling
|
|
|
|
*
|
|
|
|
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
|
|
|
|
* Please ALWAYS copy linux-ide@vger.kernel.org
|
|
|
|
* on emails.
|
|
|
|
*
|
|
|
|
* Copyright 2006 Tejun Heo <htejun@gmail.com>
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation; either version 2, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; see the file COPYING. If not, write to
|
|
|
|
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
|
|
|
|
* USA.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* libata documentation is available via 'make {ps|pdf}docs',
|
|
|
|
* as Documentation/DocBook/libata.*
|
|
|
|
*
|
|
|
|
* Hardware documentation available from http://www.t13.org/ and
|
|
|
|
* http://www.sata-io.org/
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <scsi/scsi.h>
|
|
|
|
#include <scsi/scsi_host.h>
|
|
|
|
#include <scsi/scsi_eh.h>
|
|
|
|
#include <scsi/scsi_device.h>
|
|
|
|
#include <scsi/scsi_cmnd.h>
|
2006-08-10 19:31:37 +08:00
|
|
|
#include "../scsi/scsi_transport_api.h"
|
2006-04-02 17:51:53 +08:00
|
|
|
|
|
|
|
#include <linux/libata.h>
|
|
|
|
|
|
|
|
#include "libata.h"
|
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
enum {
|
|
|
|
ATA_EH_SPDN_NCQ_OFF = (1 << 0),
|
|
|
|
ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
|
|
|
|
ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
|
|
|
|
};
|
|
|
|
|
2007-02-02 15:50:52 +08:00
|
|
|
/* Waiting in ->prereset can never be reliable. It's sometimes nice
|
|
|
|
* to wait there but it can't be depended upon; otherwise, we wouldn't
|
|
|
|
* be resetting. Just give it enough time for most drives to spin up.
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
ATA_EH_PRERESET_TIMEOUT = 10 * HZ,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* The following table determines how we sequence resets. Each entry
|
|
|
|
* represents timeout for that try. The first try can be soft or
|
|
|
|
* hardreset. All others are hardreset if available. In most cases
|
|
|
|
* the first reset w/ 10sec timeout should succeed. Following entries
|
|
|
|
* are mostly for error handling, hotplug and retarded devices.
|
|
|
|
*/
|
|
|
|
static const unsigned long ata_eh_reset_timeouts[] = {
|
|
|
|
10 * HZ, /* most drives spin up by 10sec */
|
|
|
|
10 * HZ, /* > 99% working drives spin up before 20sec */
|
|
|
|
35 * HZ, /* give > 30 secs of idleness for retarded devices */
|
|
|
|
5 * HZ, /* and sweet one last chance */
|
|
|
|
/* > 1 min has elapsed, give up */
|
|
|
|
};
|
|
|
|
|
2006-05-15 19:58:12 +08:00
|
|
|
static void __ata_port_freeze(struct ata_port *ap);
|
2006-05-31 17:28:13 +08:00
|
|
|
static void ata_eh_finish(struct ata_port *ap);
|
2007-03-02 16:32:47 +08:00
|
|
|
#ifdef CONFIG_PM
|
2006-07-03 15:07:27 +08:00
|
|
|
static void ata_eh_handle_port_suspend(struct ata_port *ap);
|
|
|
|
static void ata_eh_handle_port_resume(struct ata_port *ap);
|
2007-03-02 16:32:47 +08:00
|
|
|
static int ata_eh_suspend(struct ata_port *ap,
|
|
|
|
struct ata_device **r_failed_dev);
|
|
|
|
static void ata_eh_prep_resume(struct ata_port *ap);
|
|
|
|
static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev);
|
|
|
|
#else /* CONFIG_PM */
|
|
|
|
static void ata_eh_handle_port_suspend(struct ata_port *ap)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
static void ata_eh_handle_port_resume(struct ata_port *ap)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ata_eh_prep_resume(struct ata_port *ap)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PM */
|
2006-05-15 19:58:12 +08:00
|
|
|
|
2006-05-15 19:58:19 +08:00
|
|
|
static void ata_ering_record(struct ata_ering *ering, int is_io,
|
|
|
|
unsigned int err_mask)
|
|
|
|
{
|
|
|
|
struct ata_ering_entry *ent;
|
|
|
|
|
|
|
|
WARN_ON(!err_mask);
|
|
|
|
|
|
|
|
ering->cursor++;
|
|
|
|
ering->cursor %= ATA_ERING_SIZE;
|
|
|
|
|
|
|
|
ent = &ering->ring[ering->cursor];
|
|
|
|
ent->is_io = is_io;
|
|
|
|
ent->err_mask = err_mask;
|
|
|
|
ent->timestamp = get_jiffies_64();
|
|
|
|
}
|
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
static void ata_ering_clear(struct ata_ering *ering)
|
2006-05-15 19:58:19 +08:00
|
|
|
{
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
memset(ering, 0, sizeof(*ering));
|
2006-05-15 19:58:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ata_ering_map(struct ata_ering *ering,
|
|
|
|
int (*map_fn)(struct ata_ering_entry *, void *),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
int idx, rc = 0;
|
|
|
|
struct ata_ering_entry *ent;
|
|
|
|
|
|
|
|
idx = ering->cursor;
|
|
|
|
do {
|
|
|
|
ent = &ering->ring[idx];
|
|
|
|
if (!ent->err_mask)
|
|
|
|
break;
|
|
|
|
rc = map_fn(ent, arg);
|
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
|
|
|
|
} while (idx != ering->cursor);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2006-06-24 19:30:18 +08:00
|
|
|
static unsigned int ata_eh_dev_action(struct ata_device *dev)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &dev->ap->eh_context;
|
|
|
|
|
|
|
|
return ehc->i.action | ehc->i.dev_action[dev->devno];
|
|
|
|
}
|
|
|
|
|
2006-06-24 19:30:18 +08:00
|
|
|
static void ata_eh_clear_action(struct ata_device *dev,
|
|
|
|
struct ata_eh_info *ehi, unsigned int action)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!dev) {
|
|
|
|
ehi->action &= ~action;
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
ehi->dev_action[i] &= ~action;
|
|
|
|
} else {
|
|
|
|
/* doesn't make sense for port-wide EH actions */
|
|
|
|
WARN_ON(!(action & ATA_EH_PERDEV_MASK));
|
|
|
|
|
|
|
|
/* break ehi->action into ehi->dev_action */
|
|
|
|
if (ehi->action & action) {
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
ehi->dev_action[i] |= ehi->action & action;
|
|
|
|
ehi->action &= ~action;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* turn off the specified per-dev action */
|
|
|
|
ehi->dev_action[dev->devno] &= ~action;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-02 17:51:53 +08:00
|
|
|
/**
|
|
|
|
* ata_scsi_timed_out - SCSI layer time out callback
|
|
|
|
* @cmd: timed out SCSI command
|
|
|
|
*
|
|
|
|
* Handles SCSI layer timeout. We race with normal completion of
|
|
|
|
* the qc for @cmd. If the qc is already gone, we lose and let
|
|
|
|
* the scsi command finish (EH_HANDLED). Otherwise, the qc has
|
|
|
|
* timed out and EH should be invoked. Prevent ata_qc_complete()
|
|
|
|
* from finishing it by setting EH_SCHEDULED and return
|
|
|
|
* EH_NOT_HANDLED.
|
|
|
|
*
|
2006-05-15 19:58:12 +08:00
|
|
|
* TODO: kill this function once old EH is gone.
|
|
|
|
*
|
2006-04-02 17:51:53 +08:00
|
|
|
* LOCKING:
|
|
|
|
* Called from timer context
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* EH_HANDLED or EH_NOT_HANDLED
|
|
|
|
*/
|
|
|
|
enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *host = cmd->device->host;
|
2006-04-12 01:12:34 +08:00
|
|
|
struct ata_port *ap = ata_shost_to_port(host);
|
2006-04-02 17:51:53 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct ata_queued_cmd *qc;
|
2006-05-15 19:58:12 +08:00
|
|
|
enum scsi_eh_timer_return ret;
|
2006-04-02 17:51:53 +08:00
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
2006-05-15 19:58:12 +08:00
|
|
|
if (ap->ops->error_handler) {
|
|
|
|
ret = EH_NOT_HANDLED;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = EH_HANDLED;
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-04-02 17:51:53 +08:00
|
|
|
qc = ata_qc_from_tag(ap, ap->active_tag);
|
|
|
|
if (qc) {
|
|
|
|
WARN_ON(qc->scsicmd != cmd);
|
|
|
|
qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
|
|
|
|
qc->err_mask |= AC_ERR_TIMEOUT;
|
|
|
|
ret = EH_NOT_HANDLED;
|
|
|
|
}
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-04-02 17:51:53 +08:00
|
|
|
|
2006-05-15 19:58:12 +08:00
|
|
|
out:
|
2006-04-02 17:51:53 +08:00
|
|
|
DPRINTK("EXIT, ret=%d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_scsi_error - SCSI layer error handler callback
|
|
|
|
* @host: SCSI host on which error occurred
|
|
|
|
*
|
|
|
|
* Handles SCSI-layer-thrown error events.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Inherited from SCSI layer (none, can sleep)
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* Zero.
|
|
|
|
*/
|
2006-04-12 01:04:39 +08:00
|
|
|
void ata_scsi_error(struct Scsi_Host *host)
|
2006-04-02 17:51:53 +08:00
|
|
|
{
|
2006-04-12 01:12:34 +08:00
|
|
|
struct ata_port *ap = ata_shost_to_port(host);
|
2006-05-15 19:58:12 +08:00
|
|
|
int i, repeat_cnt = ATA_EH_MAX_REPEAT;
|
|
|
|
unsigned long flags;
|
2006-04-02 17:51:53 +08:00
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
2006-05-15 19:58:12 +08:00
|
|
|
/* synchronize with port task */
|
2006-04-02 17:51:53 +08:00
|
|
|
ata_port_flush_task(ap);
|
|
|
|
|
2006-08-24 15:19:22 +08:00
|
|
|
/* synchronize with host lock and sort out timeouts */
|
2006-05-15 19:58:12 +08:00
|
|
|
|
|
|
|
/* For new EH, all qcs are finished in one of three ways -
|
|
|
|
* normal completion, error completion, and SCSI timeout.
|
|
|
|
* Both cmpletions can race against SCSI timeout. When normal
|
|
|
|
* completion wins, the qc never reaches EH. When error
|
|
|
|
* completion wins, the qc has ATA_QCFLAG_FAILED set.
|
|
|
|
*
|
|
|
|
* When SCSI timeout wins, things are a bit more complex.
|
|
|
|
* Normal or error completion can occur after the timeout but
|
|
|
|
* before this point. In such cases, both types of
|
|
|
|
* completions are honored. A scmd is determined to have
|
|
|
|
* timed out iff its associated qc is active and not failed.
|
|
|
|
*/
|
|
|
|
if (ap->ops->error_handler) {
|
|
|
|
struct scsi_cmnd *scmd, *tmp;
|
|
|
|
int nr_timedout = 0;
|
|
|
|
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
|
|
|
list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
|
|
|
|
struct ata_queued_cmd *qc;
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_QUEUE; i++) {
|
|
|
|
qc = __ata_qc_from_tag(ap, i);
|
|
|
|
if (qc->flags & ATA_QCFLAG_ACTIVE &&
|
|
|
|
qc->scsicmd == scmd)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i < ATA_MAX_QUEUE) {
|
|
|
|
/* the scmd has an associated qc */
|
|
|
|
if (!(qc->flags & ATA_QCFLAG_FAILED)) {
|
|
|
|
/* which hasn't failed yet, timeout */
|
|
|
|
qc->err_mask |= AC_ERR_TIMEOUT;
|
|
|
|
qc->flags |= ATA_QCFLAG_FAILED;
|
|
|
|
nr_timedout++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Normal completion occurred after
|
|
|
|
* SCSI timeout but before this point.
|
|
|
|
* Successfully complete it.
|
|
|
|
*/
|
|
|
|
scmd->retries = scmd->allowed;
|
|
|
|
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we have timed out qcs. They belong to EH from
|
|
|
|
* this point but the state of the controller is
|
|
|
|
* unknown. Freeze the port to make sure the IRQ
|
|
|
|
* handler doesn't diddle with those qcs. This must
|
|
|
|
* be done atomically w.r.t. setting QCFLAG_FAILED.
|
|
|
|
*/
|
|
|
|
if (nr_timedout)
|
|
|
|
__ata_port_freeze(ap);
|
|
|
|
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
} else
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_unlock_wait(ap->lock);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
|
|
|
repeat:
|
|
|
|
/* invoke error handler */
|
|
|
|
if (ap->ops->error_handler) {
|
2006-07-03 15:07:27 +08:00
|
|
|
/* process port resume request */
|
|
|
|
ata_eh_handle_port_resume(ap);
|
|
|
|
|
2006-05-15 19:58:21 +08:00
|
|
|
/* fetch & clear EH info */
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-15 19:58:21 +08:00
|
|
|
|
|
|
|
memset(&ap->eh_context, 0, sizeof(ap->eh_context));
|
|
|
|
ap->eh_context.i = ap->eh_info;
|
|
|
|
memset(&ap->eh_info, 0, sizeof(ap->eh_info));
|
|
|
|
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
|
|
|
|
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
|
2006-05-15 19:58:21 +08:00
|
|
|
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
2006-07-03 15:07:27 +08:00
|
|
|
/* invoke EH, skip if unloading or suspended */
|
|
|
|
if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
|
2006-05-31 17:28:13 +08:00
|
|
|
ap->ops->error_handler(ap);
|
|
|
|
else
|
|
|
|
ata_eh_finish(ap);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
2006-07-03 15:07:27 +08:00
|
|
|
/* process port suspend request */
|
|
|
|
ata_eh_handle_port_suspend(ap);
|
|
|
|
|
2006-05-15 19:58:12 +08:00
|
|
|
/* Exception might have happend after ->error_handler
|
|
|
|
* recovered the port but before this point. Repeat
|
|
|
|
* EH in such case.
|
|
|
|
*/
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
2006-06-29 00:29:30 +08:00
|
|
|
if (ap->pflags & ATA_PFLAG_EH_PENDING) {
|
2006-05-15 19:58:12 +08:00
|
|
|
if (--repeat_cnt) {
|
|
|
|
ata_port_printk(ap, KERN_INFO,
|
|
|
|
"EH pending after completion, "
|
|
|
|
"repeating EH (cnt=%d)\n", repeat_cnt);
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
goto repeat;
|
|
|
|
}
|
|
|
|
ata_port_printk(ap, KERN_ERR, "EH pending after %d "
|
|
|
|
"tries, giving up\n", ATA_EH_MAX_REPEAT);
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:21 +08:00
|
|
|
/* this run is complete, make sure EH info is clear */
|
|
|
|
memset(&ap->eh_info, 0, sizeof(ap->eh_info));
|
|
|
|
|
2006-07-03 02:02:15 +08:00
|
|
|
/* Clear host_eh_scheduled while holding ap->lock such
|
2006-05-15 19:58:12 +08:00
|
|
|
* that if exception occurs after this point but
|
|
|
|
* before EH completion, SCSI midlayer will
|
|
|
|
* re-initiate EH.
|
|
|
|
*/
|
|
|
|
host->host_eh_scheduled = 0;
|
|
|
|
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
} else {
|
|
|
|
WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
|
|
|
|
ap->ops->eng_timeout(ap);
|
|
|
|
}
|
2006-04-02 17:51:53 +08:00
|
|
|
|
2006-05-15 19:58:12 +08:00
|
|
|
/* finish or retry handled scmd's and clean up */
|
2006-04-02 17:51:53 +08:00
|
|
|
WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
|
|
|
|
|
|
|
|
scsi_eh_flush_done_q(&ap->eh_done_q);
|
|
|
|
|
2006-05-15 19:58:12 +08:00
|
|
|
/* clean up */
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
2006-07-03 15:07:26 +08:00
|
|
|
if (ap->pflags & ATA_PFLAG_LOADING)
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags &= ~ATA_PFLAG_LOADING;
|
2006-07-03 15:07:26 +08:00
|
|
|
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
|
2006-11-22 22:54:01 +08:00
|
|
|
queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
|
2006-07-03 15:07:26 +08:00
|
|
|
|
|
|
|
if (ap->pflags & ATA_PFLAG_RECOVERED)
|
|
|
|
ata_port_printk(ap, KERN_INFO, "EH complete\n");
|
2006-05-31 17:28:05 +08:00
|
|
|
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
2006-05-31 17:27:27 +08:00
|
|
|
/* tell wait_eh that we're done */
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
|
2006-05-31 17:27:27 +08:00
|
|
|
wake_up_all(&ap->eh_wait_q);
|
|
|
|
|
2006-07-03 02:02:15 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:12 +08:00
|
|
|
|
2006-04-02 17:51:53 +08:00
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
}
|
|
|
|
|
2006-05-31 17:27:27 +08:00
|
|
|
/**
|
|
|
|
* ata_port_wait_eh - Wait for the currently pending EH to complete
|
|
|
|
* @ap: Port to wait EH for
|
|
|
|
*
|
|
|
|
* Wait until the currently pending EH is complete.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*/
|
|
|
|
void ata_port_wait_eh(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
retry:
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-31 17:27:27 +08:00
|
|
|
|
2006-06-29 00:29:30 +08:00
|
|
|
while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
|
2006-05-31 17:27:27 +08:00
|
|
|
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-31 17:27:27 +08:00
|
|
|
schedule();
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-31 17:27:27 +08:00
|
|
|
}
|
2006-06-11 10:01:38 +08:00
|
|
|
finish_wait(&ap->eh_wait_q, &wait);
|
2006-05-31 17:27:27 +08:00
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-31 17:27:27 +08:00
|
|
|
|
|
|
|
/* make sure SCSI EH is complete */
|
2006-08-24 15:19:22 +08:00
|
|
|
if (scsi_host_in_recovery(ap->scsi_host)) {
|
2006-05-31 17:27:27 +08:00
|
|
|
msleep(10);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-02 17:51:53 +08:00
|
|
|
/**
|
|
|
|
* ata_qc_timeout - Handle timeout of queued command
|
|
|
|
* @qc: Command that timed out
|
|
|
|
*
|
|
|
|
* Some part of the kernel (currently, only the SCSI layer)
|
|
|
|
* has noticed that the active command on port @ap has not
|
|
|
|
* completed after a specified length of time. Handle this
|
|
|
|
* condition by disabling DMA (if necessary) and completing
|
|
|
|
* transactions, with error if necessary.
|
|
|
|
*
|
|
|
|
* This also handles the case of the "lost interrupt", where
|
|
|
|
* for some reason (possibly hardware bug, possibly driver bug)
|
|
|
|
* an interrupt was not delivered to the driver, even though the
|
|
|
|
* transaction completed successfully.
|
|
|
|
*
|
2006-05-15 19:58:12 +08:00
|
|
|
* TODO: kill this function once old EH is gone.
|
|
|
|
*
|
2006-04-02 17:51:53 +08:00
|
|
|
* LOCKING:
|
|
|
|
* Inherited from SCSI layer (none, can sleep)
|
|
|
|
*/
|
|
|
|
static void ata_qc_timeout(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
u8 host_stat = 0, drv_stat;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
|
|
ap->hsm_task_state = HSM_ST_IDLE;
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-04-02 17:51:53 +08:00
|
|
|
|
|
|
|
switch (qc->tf.protocol) {
|
|
|
|
|
|
|
|
case ATA_PROT_DMA:
|
|
|
|
case ATA_PROT_ATAPI_DMA:
|
|
|
|
host_stat = ap->ops->bmdma_status(ap);
|
|
|
|
|
|
|
|
/* before we do anything else, clear DMA-Start bit */
|
|
|
|
ap->ops->bmdma_stop(qc);
|
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
default:
|
|
|
|
ata_altstatus(ap);
|
|
|
|
drv_stat = ata_chk_status(ap);
|
|
|
|
|
|
|
|
/* ack bmdma irq events */
|
|
|
|
ap->ops->irq_clear(ap);
|
|
|
|
|
2006-05-15 19:57:56 +08:00
|
|
|
ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
|
|
|
|
"stat 0x%x host_stat 0x%x\n",
|
|
|
|
qc->tf.command, drv_stat, host_stat);
|
2006-04-02 17:51:53 +08:00
|
|
|
|
|
|
|
/* complete taskfile transaction */
|
2006-04-02 22:34:24 +08:00
|
|
|
qc->err_mask |= AC_ERR_TIMEOUT;
|
2006-04-02 17:51:53 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-04-02 17:51:53 +08:00
|
|
|
|
|
|
|
ata_eh_qc_complete(qc);
|
|
|
|
|
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eng_timeout - Handle timeout of queued command
|
|
|
|
* @ap: Port on which timed-out command is active
|
|
|
|
*
|
|
|
|
* Some part of the kernel (currently, only the SCSI layer)
|
|
|
|
* has noticed that the active command on port @ap has not
|
|
|
|
* completed after a specified length of time. Handle this
|
|
|
|
* condition by disabling DMA (if necessary) and completing
|
|
|
|
* transactions, with error if necessary.
|
|
|
|
*
|
|
|
|
* This also handles the case of the "lost interrupt", where
|
|
|
|
* for some reason (possibly hardware bug, possibly driver bug)
|
|
|
|
* an interrupt was not delivered to the driver, even though the
|
|
|
|
* transaction completed successfully.
|
|
|
|
*
|
2006-05-15 19:58:12 +08:00
|
|
|
* TODO: kill this function once old EH is gone.
|
|
|
|
*
|
2006-04-02 17:51:53 +08:00
|
|
|
* LOCKING:
|
|
|
|
* Inherited from SCSI layer (none, can sleep)
|
|
|
|
*/
|
|
|
|
void ata_eng_timeout(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
|
|
ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
|
|
|
|
|
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:05 +08:00
|
|
|
/**
|
|
|
|
* ata_qc_schedule_eh - schedule qc for error handling
|
|
|
|
* @qc: command to schedule error handling for
|
|
|
|
*
|
|
|
|
* Schedule error handling for @qc. EH will kick in as soon as
|
|
|
|
* other commands are drained.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
2006-08-24 15:19:22 +08:00
|
|
|
* spin_lock_irqsave(host lock)
|
2006-05-15 19:58:05 +08:00
|
|
|
*/
|
|
|
|
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
|
|
|
|
WARN_ON(!ap->ops->error_handler);
|
|
|
|
|
|
|
|
qc->flags |= ATA_QCFLAG_FAILED;
|
2006-06-29 00:29:30 +08:00
|
|
|
qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
|
2006-05-15 19:58:05 +08:00
|
|
|
|
|
|
|
/* The following will fail if timeout has already expired.
|
|
|
|
* ata_scsi_error() takes care of such scmds on EH entry.
|
|
|
|
* Note that ATA_QCFLAG_FAILED is unconditionally set after
|
|
|
|
* this function completes.
|
|
|
|
*/
|
|
|
|
scsi_req_abort_cmd(qc->scsicmd);
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:07 +08:00
|
|
|
/**
|
|
|
|
* ata_port_schedule_eh - schedule error handling without a qc
|
|
|
|
* @ap: ATA port to schedule EH for
|
|
|
|
*
|
|
|
|
* Schedule error handling for @ap. EH will kick in as soon as
|
|
|
|
* all commands are drained.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
2006-08-24 15:19:22 +08:00
|
|
|
* spin_lock_irqsave(host lock)
|
2006-05-15 19:58:07 +08:00
|
|
|
*/
|
|
|
|
void ata_port_schedule_eh(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
WARN_ON(!ap->ops->error_handler);
|
|
|
|
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags |= ATA_PFLAG_EH_PENDING;
|
2006-08-24 15:19:22 +08:00
|
|
|
scsi_schedule_eh(ap->scsi_host);
|
2006-05-15 19:58:07 +08:00
|
|
|
|
|
|
|
DPRINTK("port EH scheduled\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_port_abort - abort all qc's on the port
|
|
|
|
* @ap: ATA port to abort qc's for
|
|
|
|
*
|
|
|
|
* Abort all active qc's of @ap and schedule EH.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
2006-08-24 15:19:22 +08:00
|
|
|
* spin_lock_irqsave(host lock)
|
2006-05-15 19:58:07 +08:00
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* Number of aborted qc's.
|
|
|
|
*/
|
|
|
|
int ata_port_abort(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
int tag, nr_aborted = 0;
|
|
|
|
|
|
|
|
WARN_ON(!ap->ops->error_handler);
|
|
|
|
|
|
|
|
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
|
|
|
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
|
|
|
|
|
|
|
|
if (qc) {
|
|
|
|
qc->flags |= ATA_QCFLAG_FAILED;
|
|
|
|
ata_qc_complete(qc);
|
|
|
|
nr_aborted++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nr_aborted)
|
|
|
|
ata_port_schedule_eh(ap);
|
|
|
|
|
|
|
|
return nr_aborted;
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:09 +08:00
|
|
|
/**
|
|
|
|
* __ata_port_freeze - freeze port
|
|
|
|
* @ap: ATA port to freeze
|
|
|
|
*
|
|
|
|
* This function is called when HSM violation or some other
|
|
|
|
* condition disrupts normal operation of the port. Frozen port
|
|
|
|
* is not allowed to perform any operation until the port is
|
|
|
|
* thawed, which usually follows a successful reset.
|
|
|
|
*
|
|
|
|
* ap->ops->freeze() callback can be used for freezing the port
|
|
|
|
* hardware-wise (e.g. mask interrupt and stop DMA engine). If a
|
|
|
|
* port cannot be frozen hardware-wise, the interrupt handler
|
|
|
|
* must ack and clear interrupts unconditionally while the port
|
|
|
|
* is frozen.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
2006-08-24 15:19:22 +08:00
|
|
|
* spin_lock_irqsave(host lock)
|
2006-05-15 19:58:09 +08:00
|
|
|
*/
|
|
|
|
static void __ata_port_freeze(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
WARN_ON(!ap->ops->error_handler);
|
|
|
|
|
|
|
|
if (ap->ops->freeze)
|
|
|
|
ap->ops->freeze(ap);
|
|
|
|
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags |= ATA_PFLAG_FROZEN;
|
2006-05-15 19:58:09 +08:00
|
|
|
|
2007-02-21 00:06:51 +08:00
|
|
|
DPRINTK("ata%u port frozen\n", ap->print_id);
|
2006-05-15 19:58:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_port_freeze - abort & freeze port
|
|
|
|
* @ap: ATA port to freeze
|
|
|
|
*
|
|
|
|
* Abort and freeze @ap.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
2006-08-24 15:19:22 +08:00
|
|
|
* spin_lock_irqsave(host lock)
|
2006-05-15 19:58:09 +08:00
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* Number of aborted commands.
|
|
|
|
*/
|
|
|
|
int ata_port_freeze(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
int nr_aborted;
|
|
|
|
|
|
|
|
WARN_ON(!ap->ops->error_handler);
|
|
|
|
|
|
|
|
nr_aborted = ata_port_abort(ap);
|
|
|
|
__ata_port_freeze(ap);
|
|
|
|
|
|
|
|
return nr_aborted;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_freeze_port - EH helper to freeze port
|
|
|
|
* @ap: ATA port to freeze
|
|
|
|
*
|
|
|
|
* Freeze @ap.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
|
|
|
void ata_eh_freeze_port(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!ap->ops->error_handler)
|
|
|
|
return;
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-15 19:58:09 +08:00
|
|
|
__ata_port_freeze(ap);
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_port_thaw_port - EH helper to thaw port
|
|
|
|
* @ap: ATA port to thaw
|
|
|
|
*
|
|
|
|
* Thaw frozen port @ap.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
|
|
|
void ata_eh_thaw_port(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!ap->ops->error_handler)
|
|
|
|
return;
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-15 19:58:09 +08:00
|
|
|
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags &= ~ATA_PFLAG_FROZEN;
|
2006-05-15 19:58:09 +08:00
|
|
|
|
|
|
|
if (ap->ops->thaw)
|
|
|
|
ap->ops->thaw(ap);
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:09 +08:00
|
|
|
|
2007-02-21 00:06:51 +08:00
|
|
|
DPRINTK("ata%u port thawed\n", ap->print_id);
|
2006-05-15 19:58:09 +08:00
|
|
|
}
|
|
|
|
|
2006-04-02 17:51:53 +08:00
|
|
|
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
|
|
|
|
{
|
|
|
|
/* nada */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
struct scsi_cmnd *scmd = qc->scsicmd;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-04-02 17:51:53 +08:00
|
|
|
qc->scsidone = ata_eh_scsidone;
|
|
|
|
__ata_qc_complete(qc);
|
|
|
|
WARN_ON(ata_tag_valid(qc->tag));
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-04-02 17:51:53 +08:00
|
|
|
|
|
|
|
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_qc_complete - Complete an active ATA command from EH
|
|
|
|
* @qc: Command to complete
|
|
|
|
*
|
|
|
|
* Indicate to the mid and upper layers that an ATA command has
|
|
|
|
* completed. To be used from EH.
|
|
|
|
*/
|
|
|
|
void ata_eh_qc_complete(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct scsi_cmnd *scmd = qc->scsicmd;
|
|
|
|
scmd->retries = scmd->allowed;
|
|
|
|
__ata_eh_qc_complete(qc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
|
|
|
|
* @qc: Command to retry
|
|
|
|
*
|
|
|
|
* Indicate to the mid and upper layers that an ATA command
|
|
|
|
* should be retried. To be used from EH.
|
|
|
|
*
|
|
|
|
* SCSI midlayer limits the number of retries to scmd->allowed.
|
|
|
|
* scmd->retries is decremented for commands which get retried
|
|
|
|
* due to unrelated failures (qc->err_mask is zero).
|
|
|
|
*/
|
|
|
|
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct scsi_cmnd *scmd = qc->scsicmd;
|
|
|
|
if (!qc->err_mask && scmd->retries)
|
|
|
|
scmd->retries--;
|
|
|
|
__ata_eh_qc_complete(qc);
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-05-31 17:28:01 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_detach_dev - detach ATA device
|
|
|
|
* @dev: ATA device to detach
|
|
|
|
*
|
|
|
|
* Detach @dev.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
|
|
|
static void ata_eh_detach_dev(struct ata_device *dev)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = dev->ap;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
ata_dev_disable(dev);
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-05-31 17:28:01 +08:00
|
|
|
|
|
|
|
dev->flags &= ~ATA_DFLAG_DETACH;
|
|
|
|
|
|
|
|
if (ata_scsi_offline_dev(dev)) {
|
|
|
|
dev->flags |= ATA_DFLAG_DETACHED;
|
2006-06-29 00:29:30 +08:00
|
|
|
ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
|
2006-05-31 17:28:01 +08:00
|
|
|
}
|
|
|
|
|
2006-06-24 19:30:19 +08:00
|
|
|
/* clear per-dev EH actions */
|
|
|
|
ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
|
|
|
|
ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-31 17:28:01 +08:00
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_about_to_do - about to perform eh_action
|
|
|
|
* @ap: target ATA port
|
2006-06-19 17:27:23 +08:00
|
|
|
* @dev: target ATA dev for per-dev action (can be NULL)
|
2006-05-15 19:58:22 +08:00
|
|
|
* @action: action about to be performed
|
|
|
|
*
|
|
|
|
* Called just before performing EH actions to clear related bits
|
|
|
|
* in @ap->eh_info such that eh actions are not unnecessarily
|
|
|
|
* repeated.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
2006-06-19 17:27:23 +08:00
|
|
|
static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
|
|
|
|
unsigned int action)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2006-07-10 22:18:46 +08:00
|
|
|
struct ata_eh_info *ehi = &ap->eh_info;
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
2006-07-03 15:07:26 +08:00
|
|
|
|
2006-07-10 22:18:46 +08:00
|
|
|
/* Reset is represented by combination of actions and EHI
|
|
|
|
* flags. Suck in all related bits before clearing eh_info to
|
|
|
|
* avoid losing requested action.
|
|
|
|
*/
|
|
|
|
if (action & ATA_EH_RESET_MASK) {
|
|
|
|
ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
|
|
|
|
ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
|
|
|
|
|
|
|
|
/* make sure all reset actions are cleared & clear EHI flags */
|
|
|
|
action |= ATA_EH_RESET_MASK;
|
|
|
|
ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
ata_eh_clear_action(dev, ehi, action);
|
2006-07-03 15:07:26 +08:00
|
|
|
|
2006-07-10 22:18:46 +08:00
|
|
|
if (!(ehc->i.flags & ATA_EHI_QUIET))
|
2006-07-03 15:07:26 +08:00
|
|
|
ap->pflags |= ATA_PFLAG_RECOVERED;
|
|
|
|
|
2006-06-23 11:46:10 +08:00
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
2006-06-19 17:27:23 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_done - EH action complete
|
|
|
|
* @ap: target ATA port
|
|
|
|
* @dev: target ATA dev for per-dev action (can be NULL)
|
|
|
|
* @action: action just completed
|
|
|
|
*
|
|
|
|
* Called right after performing EH actions to clear related bits
|
|
|
|
* in @ap->eh_context.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
|
|
|
static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
|
|
|
|
unsigned int action)
|
|
|
|
{
|
2006-07-10 22:18:46 +08:00
|
|
|
/* if reset is complete, clear all reset actions & reset modifier */
|
|
|
|
if (action & ATA_EH_RESET_MASK) {
|
|
|
|
action |= ATA_EH_RESET_MASK;
|
|
|
|
ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
|
|
|
|
}
|
|
|
|
|
2006-06-19 17:27:23 +08:00
|
|
|
ata_eh_clear_action(dev, &ap->eh_context.i, action);
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/**
|
|
|
|
* ata_err_string - convert err_mask to descriptive string
|
|
|
|
* @err_mask: error mask to convert to string
|
|
|
|
*
|
|
|
|
* Convert @err_mask to descriptive string. Errors are
|
|
|
|
* prioritized according to severity and only the most severe
|
|
|
|
* error is reported.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* Descriptive string for @err_mask
|
|
|
|
*/
|
|
|
|
static const char * ata_err_string(unsigned int err_mask)
|
|
|
|
{
|
|
|
|
if (err_mask & AC_ERR_HOST_BUS)
|
|
|
|
return "host bus error";
|
|
|
|
if (err_mask & AC_ERR_ATA_BUS)
|
|
|
|
return "ATA bus error";
|
|
|
|
if (err_mask & AC_ERR_TIMEOUT)
|
|
|
|
return "timeout";
|
|
|
|
if (err_mask & AC_ERR_HSM)
|
|
|
|
return "HSM violation";
|
|
|
|
if (err_mask & AC_ERR_SYSTEM)
|
|
|
|
return "internal error";
|
|
|
|
if (err_mask & AC_ERR_MEDIA)
|
|
|
|
return "media error";
|
|
|
|
if (err_mask & AC_ERR_INVALID)
|
|
|
|
return "invalid argument";
|
|
|
|
if (err_mask & AC_ERR_DEV)
|
|
|
|
return "device error";
|
|
|
|
return "unknown error";
|
|
|
|
}
|
|
|
|
|
2006-05-15 20:03:46 +08:00
|
|
|
/**
|
|
|
|
* ata_read_log_page - read a specific log page
|
|
|
|
* @dev: target device
|
|
|
|
* @page: page to read
|
|
|
|
* @buf: buffer to store read page
|
|
|
|
* @sectors: number of sectors to read
|
|
|
|
*
|
|
|
|
* Read log page using READ_LOG_EXT command.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success, AC_ERR_* mask otherwise.
|
|
|
|
*/
|
|
|
|
static unsigned int ata_read_log_page(struct ata_device *dev,
|
|
|
|
u8 page, void *buf, unsigned int sectors)
|
|
|
|
{
|
|
|
|
struct ata_taskfile tf;
|
|
|
|
unsigned int err_mask;
|
|
|
|
|
|
|
|
DPRINTK("read log page - page %d\n", page);
|
|
|
|
|
|
|
|
ata_tf_init(dev, &tf);
|
|
|
|
tf.command = ATA_CMD_READ_LOG_EXT;
|
|
|
|
tf.lbal = page;
|
|
|
|
tf.nsect = sectors;
|
|
|
|
tf.hob_nsect = sectors >> 8;
|
|
|
|
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
|
|
|
|
tf.protocol = ATA_PROT_PIO;
|
|
|
|
|
|
|
|
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
|
|
|
|
buf, sectors * ATA_SECT_SIZE);
|
|
|
|
|
|
|
|
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
|
|
|
return err_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_read_log_10h - Read log page 10h for NCQ error details
|
|
|
|
* @dev: Device to read log page 10h from
|
|
|
|
* @tag: Resulting tag of the failed command
|
|
|
|
* @tf: Resulting taskfile registers of the failed command
|
|
|
|
*
|
|
|
|
* Read log page 10h to obtain NCQ error details and clear error
|
|
|
|
* condition.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success, -errno otherwise.
|
|
|
|
*/
|
|
|
|
static int ata_eh_read_log_10h(struct ata_device *dev,
|
|
|
|
int *tag, struct ata_taskfile *tf)
|
|
|
|
{
|
|
|
|
u8 *buf = dev->ap->sector_buf;
|
|
|
|
unsigned int err_mask;
|
|
|
|
u8 csum;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
|
|
|
|
if (err_mask)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
csum = 0;
|
|
|
|
for (i = 0; i < ATA_SECT_SIZE; i++)
|
|
|
|
csum += buf[i];
|
|
|
|
if (csum)
|
|
|
|
ata_dev_printk(dev, KERN_WARNING,
|
|
|
|
"invalid checksum 0x%x on log page 10h\n", csum);
|
|
|
|
|
|
|
|
if (buf[0] & 0x80)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
*tag = buf[0] & 0x1f;
|
|
|
|
|
|
|
|
tf->command = buf[2];
|
|
|
|
tf->feature = buf[3];
|
|
|
|
tf->lbal = buf[4];
|
|
|
|
tf->lbam = buf[5];
|
|
|
|
tf->lbah = buf[6];
|
|
|
|
tf->device = buf[7];
|
|
|
|
tf->hob_lbal = buf[8];
|
|
|
|
tf->hob_lbam = buf[9];
|
|
|
|
tf->hob_lbah = buf[10];
|
|
|
|
tf->nsect = buf[12];
|
|
|
|
tf->hob_nsect = buf[13];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/**
|
|
|
|
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
|
|
|
|
* @dev: device to perform REQUEST_SENSE to
|
|
|
|
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
|
|
|
|
*
|
|
|
|
* Perform ATAPI REQUEST_SENSE after the device reported CHECK
|
|
|
|
* SENSE. This function is EH helper.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success, AC_ERR_* mask on failure
|
|
|
|
*/
|
2007-04-02 11:30:46 +08:00
|
|
|
static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
2007-04-02 11:30:46 +08:00
|
|
|
struct ata_device *dev = qc->dev;
|
|
|
|
unsigned char *sense_buf = qc->scsicmd->sense_buffer;
|
2006-05-15 19:58:22 +08:00
|
|
|
struct ata_port *ap = dev->ap;
|
|
|
|
struct ata_taskfile tf;
|
|
|
|
u8 cdb[ATAPI_CDB_LEN];
|
|
|
|
|
|
|
|
DPRINTK("ATAPI request sense\n");
|
|
|
|
|
|
|
|
/* FIXME: is this needed? */
|
|
|
|
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
|
|
|
|
|
2007-04-02 11:30:46 +08:00
|
|
|
/* initialize sense_buf with the error register,
|
|
|
|
* for the case where they are -not- overwritten
|
|
|
|
*/
|
2006-05-15 19:58:22 +08:00
|
|
|
sense_buf[0] = 0x70;
|
2007-04-02 11:30:46 +08:00
|
|
|
sense_buf[2] = qc->result_tf.feature >> 4;
|
|
|
|
|
|
|
|
/* some devices time out if garbage left in tf */
|
|
|
|
ata_tf_init(dev, &tf);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
memset(cdb, 0, ATAPI_CDB_LEN);
|
|
|
|
cdb[0] = REQUEST_SENSE;
|
|
|
|
cdb[4] = SCSI_SENSE_BUFFERSIZE;
|
|
|
|
|
|
|
|
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
|
|
|
tf.command = ATA_CMD_PACKET;
|
|
|
|
|
|
|
|
/* is it pointless to prefer PIO for "safety reasons"? */
|
|
|
|
if (ap->flags & ATA_FLAG_PIO_DMA) {
|
|
|
|
tf.protocol = ATA_PROT_ATAPI_DMA;
|
|
|
|
tf.feature |= ATAPI_PKT_DMA;
|
|
|
|
} else {
|
|
|
|
tf.protocol = ATA_PROT_ATAPI;
|
|
|
|
tf.lbam = (8 * 1024) & 0xff;
|
|
|
|
tf.lbah = (8 * 1024) >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
|
|
|
|
sense_buf, SCSI_SENSE_BUFFERSIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_analyze_serror - analyze SError for a failed port
|
|
|
|
* @ap: ATA port to analyze SError for
|
|
|
|
*
|
|
|
|
* Analyze SError if available and further determine cause of
|
|
|
|
* failure.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
|
|
|
static void ata_eh_analyze_serror(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
|
|
|
u32 serror = ehc->i.serror;
|
|
|
|
unsigned int err_mask = 0, action = 0;
|
|
|
|
|
|
|
|
if (serror & SERR_PERSISTENT) {
|
|
|
|
err_mask |= AC_ERR_ATA_BUS;
|
|
|
|
action |= ATA_EH_HARDRESET;
|
|
|
|
}
|
|
|
|
if (serror &
|
|
|
|
(SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
|
|
|
|
err_mask |= AC_ERR_ATA_BUS;
|
|
|
|
action |= ATA_EH_SOFTRESET;
|
|
|
|
}
|
|
|
|
if (serror & SERR_PROTOCOL) {
|
|
|
|
err_mask |= AC_ERR_HSM;
|
|
|
|
action |= ATA_EH_SOFTRESET;
|
|
|
|
}
|
|
|
|
if (serror & SERR_INTERNAL) {
|
|
|
|
err_mask |= AC_ERR_SYSTEM;
|
2007-03-14 00:20:51 +08:00
|
|
|
action |= ATA_EH_HARDRESET;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
2006-05-31 17:28:03 +08:00
|
|
|
if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
|
|
|
|
ata_ehi_hotplugged(&ehc->i);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
ehc->i.err_mask |= err_mask;
|
|
|
|
ehc->i.action |= action;
|
|
|
|
}
|
|
|
|
|
2006-05-15 20:03:46 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_analyze_ncq_error - analyze NCQ error
|
|
|
|
* @ap: ATA port to analyze NCQ error for
|
|
|
|
*
|
|
|
|
* Read log page 10h, determine the offending qc and acquire
|
|
|
|
* error status TF. For NCQ device errors, all LLDDs have to do
|
|
|
|
* is setting AC_ERR_DEV in ehi->err_mask. This function takes
|
|
|
|
* care of the rest.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*/
|
|
|
|
static void ata_eh_analyze_ncq_error(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
|
|
|
struct ata_device *dev = ap->device;
|
|
|
|
struct ata_queued_cmd *qc;
|
|
|
|
struct ata_taskfile tf;
|
|
|
|
int tag, rc;
|
|
|
|
|
|
|
|
/* if frozen, we can't do much */
|
2006-06-29 00:29:30 +08:00
|
|
|
if (ap->pflags & ATA_PFLAG_FROZEN)
|
2006-05-15 20:03:46 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* is it NCQ device error? */
|
|
|
|
if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* has LLDD analyzed already? */
|
|
|
|
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
|
|
|
qc = __ata_qc_from_tag(ap, tag);
|
|
|
|
|
|
|
|
if (!(qc->flags & ATA_QCFLAG_FAILED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qc->err_mask)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* okay, this error is ours */
|
|
|
|
rc = ata_eh_read_log_10h(dev, &tag, &tf);
|
|
|
|
if (rc) {
|
|
|
|
ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
|
|
|
|
"(errno=%d)\n", rc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(ap->sactive & (1 << tag))) {
|
|
|
|
ata_port_printk(ap, KERN_ERR, "log page 10h reported "
|
|
|
|
"inactive tag %d\n", tag);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we've got the perpetrator, condemn it */
|
|
|
|
qc = __ata_qc_from_tag(ap, tag);
|
|
|
|
memcpy(&qc->result_tf, &tf, sizeof(tf));
|
|
|
|
qc->err_mask |= AC_ERR_DEV;
|
|
|
|
ehc->i.err_mask &= ~AC_ERR_DEV;
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_analyze_tf - analyze taskfile of a failed qc
|
|
|
|
* @qc: qc to analyze
|
|
|
|
* @tf: Taskfile registers to analyze
|
|
|
|
*
|
|
|
|
* Analyze taskfile of @qc and further determine cause of
|
|
|
|
* failure. This function also requests ATAPI sense data if
|
|
|
|
* avaliable.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* Determined recovery action
|
|
|
|
*/
|
|
|
|
static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
|
|
|
|
const struct ata_taskfile *tf)
|
|
|
|
{
|
|
|
|
unsigned int tmp, action = 0;
|
|
|
|
u8 stat = tf->command, err = tf->feature;
|
|
|
|
|
|
|
|
if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
|
|
|
|
qc->err_mask |= AC_ERR_HSM;
|
|
|
|
return ATA_EH_SOFTRESET;
|
|
|
|
}
|
|
|
|
|
2007-03-20 14:24:11 +08:00
|
|
|
if (stat & (ATA_ERR | ATA_DF))
|
|
|
|
qc->err_mask |= AC_ERR_DEV;
|
|
|
|
else
|
2006-05-15 19:58:22 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (qc->dev->class) {
|
|
|
|
case ATA_DEV_ATA:
|
|
|
|
if (err & ATA_ICRC)
|
|
|
|
qc->err_mask |= AC_ERR_ATA_BUS;
|
|
|
|
if (err & ATA_UNC)
|
|
|
|
qc->err_mask |= AC_ERR_MEDIA;
|
|
|
|
if (err & ATA_IDNF)
|
|
|
|
qc->err_mask |= AC_ERR_INVALID;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ATA_DEV_ATAPI:
|
2006-11-21 09:40:51 +08:00
|
|
|
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
|
2007-04-02 11:30:46 +08:00
|
|
|
tmp = atapi_eh_request_sense(qc);
|
2006-11-21 09:40:51 +08:00
|
|
|
if (!tmp) {
|
|
|
|
/* ATA_QCFLAG_SENSE_VALID is used to
|
|
|
|
* tell atapi_qc_complete() that sense
|
|
|
|
* data is already valid.
|
|
|
|
*
|
|
|
|
* TODO: interpret sense data and set
|
|
|
|
* appropriate err_mask.
|
|
|
|
*/
|
|
|
|
qc->flags |= ATA_QCFLAG_SENSE_VALID;
|
|
|
|
} else
|
|
|
|
qc->err_mask |= tmp;
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
|
|
|
|
action |= ATA_EH_SOFTRESET;
|
|
|
|
|
|
|
|
return action;
|
|
|
|
}
|
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
if (err_mask & AC_ERR_ATA_BUS)
|
2006-05-15 19:58:22 +08:00
|
|
|
return 1;
|
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
if (err_mask & AC_ERR_TIMEOUT)
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
if (is_io) {
|
|
|
|
if (err_mask & AC_ERR_HSM)
|
2006-05-15 19:58:22 +08:00
|
|
|
return 2;
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
if ((err_mask &
|
|
|
|
(AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
|
|
|
|
return 3;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
struct speed_down_verdict_arg {
|
2006-05-15 19:58:22 +08:00
|
|
|
u64 since;
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
int nr_errors[4];
|
2006-05-15 19:58:22 +08:00
|
|
|
};
|
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
struct speed_down_verdict_arg *arg = void_arg;
|
|
|
|
int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
if (ent->timestamp < arg->since)
|
|
|
|
return -1;
|
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
arg->nr_errors[cat]++;
|
2006-05-15 19:58:22 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
* ata_eh_speed_down_verdict - Determine speed down verdict
|
2006-05-15 19:58:22 +08:00
|
|
|
* @dev: Device of interest
|
|
|
|
*
|
|
|
|
* This function examines error ring of @dev and determines
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
* whether NCQ needs to be turned off, transfer speed should be
|
|
|
|
* stepped down, or falling back to PIO is necessary.
|
2006-05-15 19:58:22 +08:00
|
|
|
*
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
* Cat-1 is ATA_BUS error for any command.
|
2006-05-15 19:58:22 +08:00
|
|
|
*
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
* Cat-2 is TIMEOUT for any command or HSM violation for known
|
|
|
|
* supported commands.
|
|
|
|
*
|
|
|
|
* Cat-3 is is unclassified DEV error for known supported
|
2006-05-15 19:58:22 +08:00
|
|
|
* command.
|
|
|
|
*
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
* NCQ needs to be turned off if there have been more than 3
|
|
|
|
* Cat-2 + Cat-3 errors during last 10 minutes.
|
|
|
|
*
|
|
|
|
* Speed down is necessary if there have been more than 3 Cat-1 +
|
|
|
|
* Cat-2 errors or 10 Cat-3 errors during last 10 minutes.
|
|
|
|
*
|
|
|
|
* Falling back to PIO mode is necessary if there have been more
|
|
|
|
* than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes.
|
|
|
|
*
|
2006-05-15 19:58:22 +08:00
|
|
|
* LOCKING:
|
|
|
|
* Inherited from caller.
|
|
|
|
*
|
|
|
|
* RETURNS:
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
* OR of ATA_EH_SPDN_* flags.
|
2006-05-15 19:58:22 +08:00
|
|
|
*/
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
|
|
|
|
u64 j64 = get_jiffies_64();
|
|
|
|
struct speed_down_verdict_arg arg;
|
|
|
|
unsigned int verdict = 0;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
/* scan past 10 mins of error history */
|
|
|
|
memset(&arg, 0, sizeof(arg));
|
|
|
|
arg.since = j64 - min(j64, j10mins);
|
|
|
|
ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
if (arg.nr_errors[2] + arg.nr_errors[3] > 3)
|
|
|
|
verdict |= ATA_EH_SPDN_NCQ_OFF;
|
|
|
|
if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10)
|
|
|
|
verdict |= ATA_EH_SPDN_SPEED_DOWN;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
/* scan past 3 mins of error history */
|
2006-05-15 19:58:22 +08:00
|
|
|
memset(&arg, 0, sizeof(arg));
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
arg.since = j64 - min(j64, j5mins);
|
|
|
|
ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10)
|
|
|
|
verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
return verdict;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_speed_down - record error and speed down if necessary
|
|
|
|
* @dev: Failed device
|
|
|
|
* @is_io: Did the device fail during normal IO?
|
|
|
|
* @err_mask: err_mask of the error
|
|
|
|
*
|
|
|
|
* Record error and examine error history to determine whether
|
|
|
|
* adjusting transmission speed is necessary. It also sets
|
|
|
|
* transmission limits appropriately if such adjustment is
|
|
|
|
* necessary.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
* Determined recovery action.
|
2006-05-15 19:58:22 +08:00
|
|
|
*/
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
|
|
|
|
unsigned int err_mask)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
unsigned int verdict;
|
|
|
|
unsigned int action = 0;
|
|
|
|
|
|
|
|
/* don't bother if Cat-0 error */
|
|
|
|
if (ata_eh_categorize_error(is_io, err_mask) == 0)
|
2006-05-15 19:58:22 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* record error and determine whether speed down is necessary */
|
|
|
|
ata_ering_record(&dev->ering, is_io, err_mask);
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
verdict = ata_eh_speed_down_verdict(dev);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
/* turn off NCQ? */
|
|
|
|
if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
|
|
|
|
(dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
|
|
|
|
ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
|
|
|
|
dev->flags |= ATA_DFLAG_NCQ_OFF;
|
|
|
|
ata_dev_printk(dev, KERN_WARNING,
|
|
|
|
"NCQ disabled due to excessive errors\n");
|
|
|
|
goto done;
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
/* speed down? */
|
|
|
|
if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
|
|
|
|
/* speed down SATA link speed if possible */
|
|
|
|
if (sata_down_spd_limit(dev->ap) == 0) {
|
|
|
|
action |= ATA_EH_HARDRESET;
|
|
|
|
goto done;
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
/* lower transfer mode */
|
|
|
|
if (dev->spdn_cnt < 2) {
|
|
|
|
static const int dma_dnxfer_sel[] =
|
|
|
|
{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
|
|
|
|
static const int pio_dnxfer_sel[] =
|
|
|
|
{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
|
|
|
|
int sel;
|
|
|
|
|
|
|
|
if (dev->xfer_shift != ATA_SHIFT_PIO)
|
|
|
|
sel = dma_dnxfer_sel[dev->spdn_cnt];
|
|
|
|
else
|
|
|
|
sel = pio_dnxfer_sel[dev->spdn_cnt];
|
|
|
|
|
|
|
|
dev->spdn_cnt++;
|
|
|
|
|
|
|
|
if (ata_down_xfermask_limit(dev, sel) == 0) {
|
|
|
|
action |= ATA_EH_SOFTRESET;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fall back to PIO? Slowing down to PIO is meaningless for
|
|
|
|
* SATA. Consider it only for PATA.
|
|
|
|
*/
|
|
|
|
if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
|
|
|
|
(dev->ap->cbl != ATA_CBL_SATA) &&
|
|
|
|
(dev->xfer_shift != ATA_SHIFT_PIO)) {
|
|
|
|
if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
|
|
|
|
dev->spdn_cnt = 0;
|
|
|
|
action |= ATA_EH_SOFTRESET;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
return 0;
|
libata: put some intelligence into EH speed down sequence
The current EH speed down code is more of a proof that the EH
framework is capable of adjusting transfer speed in response to error.
This patch puts some intelligence into EH speed down sequence. The
rules are..
* If there have been more than three timeout, HSM violation or
unclassified DEV errors for known supported commands during last 10
mins, NCQ is turned off.
* If there have been more than three timeout or HSM violation for known
supported command, transfer mode is slowed down. If DMA is active,
it is first slowered by one grade (e.g. UDMA133->100). If that
doesn't help, it's slowered to 40c limit (UDMA33). If PIO is
active, it's slowered by one grade first. If that doesn't help,
PIO0 is forced. Note that this rule does not change transfer mode.
DMA is never degraded into PIO by this rule.
* If there have been more than ten ATA bus, timeout, HSM violation or
unclassified device errors for known supported commands && speeding
down DMA mode didn't help, the device is forced into PIO mode. Note
that this rule is considered only for PATA devices and is pretty
difficult to trigger.
One error can only trigger one rule at a time. After a rule is
triggered, error history is cleared such that the next speed down
happens only after some number of errors are accumulated. This makes
sense because now speed down is done in bigger stride.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:22:31 +08:00
|
|
|
done:
|
|
|
|
/* device has been slowed down, blow error history */
|
|
|
|
ata_ering_clear(&dev->ering);
|
|
|
|
return action;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_autopsy - analyze error and determine recovery action
|
|
|
|
* @ap: ATA port to perform autopsy on
|
|
|
|
*
|
|
|
|
* Analyze why @ap failed and determine which recovery action is
|
|
|
|
* needed. This function also sets more detailed AC_ERR_* values
|
|
|
|
* and fills sense data for ATAPI CHECK SENSE.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*/
|
|
|
|
static void ata_eh_autopsy(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
|
|
|
unsigned int all_err_mask = 0;
|
|
|
|
int tag, is_io = 0;
|
|
|
|
u32 serror;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
2006-07-03 15:07:26 +08:00
|
|
|
if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
|
|
|
|
return;
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/* obtain and analyze SError */
|
|
|
|
rc = sata_scr_read(ap, SCR_ERROR, &serror);
|
|
|
|
if (rc == 0) {
|
|
|
|
ehc->i.serror |= serror;
|
|
|
|
ata_eh_analyze_serror(ap);
|
|
|
|
} else if (rc != -EOPNOTSUPP)
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.action |= ATA_EH_HARDRESET;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-05-15 20:03:46 +08:00
|
|
|
/* analyze NCQ failure */
|
|
|
|
ata_eh_analyze_ncq_error(ap);
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/* any real error trumps AC_ERR_OTHER */
|
|
|
|
if (ehc->i.err_mask & ~AC_ERR_OTHER)
|
|
|
|
ehc->i.err_mask &= ~AC_ERR_OTHER;
|
|
|
|
|
|
|
|
all_err_mask |= ehc->i.err_mask;
|
|
|
|
|
|
|
|
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
|
|
|
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
|
|
|
|
|
|
|
if (!(qc->flags & ATA_QCFLAG_FAILED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* inherit upper level err_mask */
|
|
|
|
qc->err_mask |= ehc->i.err_mask;
|
|
|
|
|
|
|
|
/* analyze TF */
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
/* DEV errors are probably spurious in case of ATA_BUS error */
|
|
|
|
if (qc->err_mask & AC_ERR_ATA_BUS)
|
|
|
|
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
|
|
|
|
AC_ERR_INVALID);
|
|
|
|
|
|
|
|
/* any real error trumps unknown error */
|
|
|
|
if (qc->err_mask & ~AC_ERR_OTHER)
|
|
|
|
qc->err_mask &= ~AC_ERR_OTHER;
|
|
|
|
|
|
|
|
/* SENSE_VALID trumps dev/unknown error and revalidation */
|
|
|
|
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
|
|
|
|
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.action &= ~ATA_EH_REVALIDATE;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* accumulate error info */
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.dev = qc->dev;
|
2006-05-15 19:58:22 +08:00
|
|
|
all_err_mask |= qc->err_mask;
|
|
|
|
if (qc->flags & ATA_QCFLAG_IO)
|
|
|
|
is_io = 1;
|
|
|
|
}
|
|
|
|
|
2006-05-16 11:58:24 +08:00
|
|
|
/* enforce default EH actions */
|
2006-06-29 00:29:30 +08:00
|
|
|
if (ap->pflags & ATA_PFLAG_FROZEN ||
|
2006-05-16 11:58:24 +08:00
|
|
|
all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.action |= ATA_EH_SOFTRESET;
|
2006-05-16 11:58:24 +08:00
|
|
|
else if (all_err_mask)
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.action |= ATA_EH_REVALIDATE;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-06-19 17:27:23 +08:00
|
|
|
/* if we have offending qcs and the associated failed device */
|
2006-07-08 19:17:26 +08:00
|
|
|
if (ehc->i.dev) {
|
2006-06-19 17:27:23 +08:00
|
|
|
/* speed down */
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
|
|
|
|
all_err_mask);
|
2006-06-19 17:27:23 +08:00
|
|
|
|
|
|
|
/* perform per-dev EH action only on the offending device */
|
2006-07-08 19:17:26 +08:00
|
|
|
ehc->i.dev_action[ehc->i.dev->devno] |=
|
|
|
|
ehc->i.action & ATA_EH_PERDEV_MASK;
|
|
|
|
ehc->i.action &= ~ATA_EH_PERDEV_MASK;
|
2006-06-19 17:27:23 +08:00
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_report - report error handling to user
|
|
|
|
* @ap: ATA port EH is going on
|
|
|
|
*
|
|
|
|
* Report EH to user.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
|
|
|
static void ata_eh_report(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
|
|
|
const char *frozen, *desc;
|
|
|
|
int tag, nr_failed = 0;
|
|
|
|
|
|
|
|
desc = NULL;
|
|
|
|
if (ehc->i.desc[0] != '\0')
|
|
|
|
desc = ehc->i.desc;
|
|
|
|
|
|
|
|
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
|
|
|
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
|
|
|
|
|
|
|
if (!(qc->flags & ATA_QCFLAG_FAILED))
|
|
|
|
continue;
|
|
|
|
if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nr_failed++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nr_failed && !ehc->i.err_mask)
|
|
|
|
return;
|
|
|
|
|
|
|
|
frozen = "";
|
2006-06-29 00:29:30 +08:00
|
|
|
if (ap->pflags & ATA_PFLAG_FROZEN)
|
2006-05-15 19:58:22 +08:00
|
|
|
frozen = " frozen";
|
|
|
|
|
|
|
|
if (ehc->i.dev) {
|
2006-05-15 20:03:46 +08:00
|
|
|
ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
|
|
|
|
"SAct 0x%x SErr 0x%x action 0x%x%s\n",
|
|
|
|
ehc->i.err_mask, ap->sactive, ehc->i.serror,
|
|
|
|
ehc->i.action, frozen);
|
2006-05-15 19:58:22 +08:00
|
|
|
if (desc)
|
|
|
|
ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
|
|
|
|
} else {
|
2006-05-15 20:03:46 +08:00
|
|
|
ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
|
|
|
|
"SAct 0x%x SErr 0x%x action 0x%x%s\n",
|
|
|
|
ehc->i.err_mask, ap->sactive, ehc->i.serror,
|
|
|
|
ehc->i.action, frozen);
|
2006-05-15 19:58:22 +08:00
|
|
|
if (desc)
|
|
|
|
ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
2006-11-14 21:36:12 +08:00
|
|
|
static const char *dma_str[] = {
|
|
|
|
[DMA_BIDIRECTIONAL] = "bidi",
|
|
|
|
[DMA_TO_DEVICE] = "out",
|
|
|
|
[DMA_FROM_DEVICE] = "in",
|
|
|
|
[DMA_NONE] = "",
|
|
|
|
};
|
2006-05-15 19:58:22 +08:00
|
|
|
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
2006-11-14 21:36:12 +08:00
|
|
|
struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
|
|
|
|
continue;
|
|
|
|
|
2006-11-14 21:36:12 +08:00
|
|
|
ata_dev_printk(qc->dev, KERN_ERR,
|
|
|
|
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
|
2006-11-20 15:05:34 +08:00
|
|
|
"tag %d cdb 0x%x data %u %s\n "
|
2006-11-14 21:36:12 +08:00
|
|
|
"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
|
|
|
|
"Emask 0x%x (%s)\n",
|
|
|
|
cmd->command, cmd->feature, cmd->nsect,
|
|
|
|
cmd->lbal, cmd->lbam, cmd->lbah,
|
|
|
|
cmd->hob_feature, cmd->hob_nsect,
|
|
|
|
cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
|
2007-01-03 16:30:39 +08:00
|
|
|
cmd->device, qc->tag, qc->cdb[0], qc->nbytes,
|
2006-11-20 15:05:34 +08:00
|
|
|
dma_str[qc->dma_dir],
|
2006-11-14 21:36:12 +08:00
|
|
|
res->command, res->feature, res->nsect,
|
|
|
|
res->lbal, res->lbam, res->lbah,
|
|
|
|
res->hob_feature, res->hob_nsect,
|
|
|
|
res->hob_lbal, res->hob_lbam, res->hob_lbah,
|
|
|
|
res->device, qc->err_mask, ata_err_string(qc->err_mask));
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-31 17:28:24 +08:00
|
|
|
static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
|
libata: add deadline support to prereset and reset methods
Add @deadline to prereset and reset methods and make them honor it.
ata_wait_ready() which directly takes @deadline is implemented to be
used as the wait function. This patch is in preparation for EH timing
improvements.
* ata_wait_ready() never does busy sleep. It's only used from EH and
no wait in EH is that urgent. This function also prints 'be
patient' message automatically after 5 secs of waiting if more than
3 secs is remaining till deadline.
* ata_bus_post_reset() now fails with error code if any of its wait
fails. This is important because earlier reset tries will have
shorter timeout than the spec requires. If a device fails to
respond before the short timeout, reset should be retried with
longer timeout rather than silently ignoring the device.
There are three behavior differences.
1. Timeout is applied to both devices at once, not separately. This
is more consistent with what the spec says.
2. When a device passes devchk but fails to become ready before
deadline. Previouly, post_reset would just succeed and let
device classification remove the device. New code fails the
reset thus causing reset retry. After a few times, EH will give
up disabling the port.
3. When slave device passes devchk but fails to become accessible
(TF-wise) after reset. Original code disables dev1 after 30s
timeout and continues as if the device doesn't exist, while the
patched code fails reset. When this happens, new code fails
reset on whole port rather than proceeding with only the primary
device.
If the failing device is suffering transient problems, new code
retries reset which is a better behavior. If the failing device is
actually broken, the net effect is identical to it, but not to the
other device sharing the channel. In the previous code, reset would
have succeeded after 30s thus detecting the working one. In the new
code, reset fails and whole port gets disabled. IMO, it's a
pathological case anyway (broken device sharing bus with working
one) and doesn't really matter.
* ata_bus_softreset() is changed to return error code from
ata_bus_post_reset(). It used to return 0 unconditionally.
* Spin up waiting is to be removed and not converted to honor
deadline.
* To be on the safe side, deadline is set to 40s for the time being.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:50:52 +08:00
|
|
|
unsigned int *classes, unsigned long deadline)
|
2006-05-31 17:28:24 +08:00
|
|
|
{
|
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
classes[i] = ATA_DEV_UNKNOWN;
|
|
|
|
|
libata: add deadline support to prereset and reset methods
Add @deadline to prereset and reset methods and make them honor it.
ata_wait_ready() which directly takes @deadline is implemented to be
used as the wait function. This patch is in preparation for EH timing
improvements.
* ata_wait_ready() never does busy sleep. It's only used from EH and
no wait in EH is that urgent. This function also prints 'be
patient' message automatically after 5 secs of waiting if more than
3 secs is remaining till deadline.
* ata_bus_post_reset() now fails with error code if any of its wait
fails. This is important because earlier reset tries will have
shorter timeout than the spec requires. If a device fails to
respond before the short timeout, reset should be retried with
longer timeout rather than silently ignoring the device.
There are three behavior differences.
1. Timeout is applied to both devices at once, not separately. This
is more consistent with what the spec says.
2. When a device passes devchk but fails to become ready before
deadline. Previouly, post_reset would just succeed and let
device classification remove the device. New code fails the
reset thus causing reset retry. After a few times, EH will give
up disabling the port.
3. When slave device passes devchk but fails to become accessible
(TF-wise) after reset. Original code disables dev1 after 30s
timeout and continues as if the device doesn't exist, while the
patched code fails reset. When this happens, new code fails
reset on whole port rather than proceeding with only the primary
device.
If the failing device is suffering transient problems, new code
retries reset which is a better behavior. If the failing device is
actually broken, the net effect is identical to it, but not to the
other device sharing the channel. In the previous code, reset would
have succeeded after 30s thus detecting the working one. In the new
code, reset fails and whole port gets disabled. IMO, it's a
pathological case anyway (broken device sharing bus with working
one) and doesn't really matter.
* ata_bus_softreset() is changed to return error code from
ata_bus_post_reset(). It used to return 0 unconditionally.
* Spin up waiting is to be removed and not converted to honor
deadline.
* To be on the safe side, deadline is set to 40s for the time being.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 15:50:52 +08:00
|
|
|
rc = reset(ap, classes, deadline);
|
2006-05-31 17:28:24 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
/* If any class isn't ATA_DEV_UNKNOWN, consider classification
|
|
|
|
* is complete and convert all ATA_DEV_UNKNOWN to
|
|
|
|
* ATA_DEV_NONE.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
if (classes[i] != ATA_DEV_UNKNOWN)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (i < ATA_MAX_DEVICES)
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
if (classes[i] == ATA_DEV_UNKNOWN)
|
|
|
|
classes[i] = ATA_DEV_NONE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-05-31 17:27:50 +08:00
|
|
|
static int ata_eh_followup_srst_needed(int rc, int classify,
|
|
|
|
const unsigned int *classes)
|
|
|
|
{
|
|
|
|
if (rc == -EAGAIN)
|
|
|
|
return 1;
|
|
|
|
if (rc != 0)
|
|
|
|
return 0;
|
|
|
|
if (classify && classes[0] == ATA_DEV_UNKNOWN)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ata_eh_reset(struct ata_port *ap, int classify,
|
2006-05-31 17:27:48 +08:00
|
|
|
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
|
2006-05-15 19:58:22 +08:00
|
|
|
ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
2006-05-31 17:27:50 +08:00
|
|
|
unsigned int *classes = ehc->classes;
|
2006-07-03 15:07:26 +08:00
|
|
|
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
|
2007-02-02 15:50:52 +08:00
|
|
|
int try = 0;
|
|
|
|
unsigned long deadline;
|
2006-05-31 17:27:48 +08:00
|
|
|
unsigned int action;
|
2006-05-15 19:58:22 +08:00
|
|
|
ata_reset_fn_t reset;
|
2006-05-31 17:27:50 +08:00
|
|
|
int i, did_followup_srst, rc;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-07-10 22:18:46 +08:00
|
|
|
/* about to reset */
|
|
|
|
ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
|
|
|
|
|
2006-05-31 17:27:48 +08:00
|
|
|
/* Determine which reset to use and record in ehc->i.action.
|
|
|
|
* prereset() may examine and modify it.
|
|
|
|
*/
|
|
|
|
action = ehc->i.action;
|
|
|
|
ehc->i.action &= ~ATA_EH_RESET_MASK;
|
2006-05-15 19:58:22 +08:00
|
|
|
if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
|
2006-05-31 17:27:48 +08:00
|
|
|
!(action & ATA_EH_HARDRESET))))
|
|
|
|
ehc->i.action |= ATA_EH_SOFTRESET;
|
2006-05-15 19:58:22 +08:00
|
|
|
else
|
2006-05-31 17:27:48 +08:00
|
|
|
ehc->i.action |= ATA_EH_HARDRESET;
|
|
|
|
|
|
|
|
if (prereset) {
|
2007-02-02 15:50:52 +08:00
|
|
|
rc = prereset(ap, jiffies + ATA_EH_PRERESET_TIMEOUT);
|
2006-05-31 17:27:48 +08:00
|
|
|
if (rc) {
|
2006-09-27 00:53:38 +08:00
|
|
|
if (rc == -ENOENT) {
|
2007-03-12 16:24:08 +08:00
|
|
|
ata_port_printk(ap, KERN_DEBUG,
|
|
|
|
"port disabled. ignoring.\n");
|
2006-09-27 00:53:38 +08:00
|
|
|
ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
|
2007-03-12 16:24:08 +08:00
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
classes[i] = ATA_DEV_NONE;
|
|
|
|
|
|
|
|
rc = 0;
|
2006-09-27 00:53:38 +08:00
|
|
|
} else
|
|
|
|
ata_port_printk(ap, KERN_ERR,
|
2006-05-31 17:27:48 +08:00
|
|
|
"prereset failed (errno=%d)\n", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* prereset() might have modified ehc->i.action */
|
|
|
|
if (ehc->i.action & ATA_EH_HARDRESET)
|
2006-05-15 19:58:22 +08:00
|
|
|
reset = hardreset;
|
2006-05-31 17:27:48 +08:00
|
|
|
else if (ehc->i.action & ATA_EH_SOFTRESET)
|
|
|
|
reset = softreset;
|
|
|
|
else {
|
|
|
|
/* prereset told us not to reset, bang classes and return */
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
classes[i] = ATA_DEV_NONE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* did prereset() screw up? if so, fix up to avoid oopsing */
|
|
|
|
if (!reset) {
|
|
|
|
ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
|
|
|
|
"invalid reset type\n");
|
|
|
|
if (softreset)
|
|
|
|
reset = softreset;
|
|
|
|
else
|
|
|
|
reset = hardreset;
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
retry:
|
2007-02-02 15:50:52 +08:00
|
|
|
deadline = jiffies + ata_eh_reset_timeouts[try++];
|
|
|
|
|
2006-05-31 17:28:11 +08:00
|
|
|
/* shut up during boot probing */
|
|
|
|
if (verbose)
|
|
|
|
ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
|
|
|
|
reset == softreset ? "soft" : "hard");
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-07-10 22:18:46 +08:00
|
|
|
/* mark that this EH session started with reset */
|
2007-04-23 01:41:05 +08:00
|
|
|
if (reset == hardreset)
|
|
|
|
ehc->i.flags |= ATA_EHI_DID_HARDRESET;
|
|
|
|
else
|
|
|
|
ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2007-02-02 15:50:52 +08:00
|
|
|
rc = ata_do_reset(ap, reset, classes, deadline);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-05-31 17:27:50 +08:00
|
|
|
did_followup_srst = 0;
|
|
|
|
if (reset == hardreset &&
|
|
|
|
ata_eh_followup_srst_needed(rc, classify, classes)) {
|
|
|
|
/* okay, let's do follow-up softreset */
|
|
|
|
did_followup_srst = 1;
|
|
|
|
reset = softreset;
|
|
|
|
|
|
|
|
if (!reset) {
|
|
|
|
ata_port_printk(ap, KERN_ERR,
|
|
|
|
"follow-up softreset required "
|
|
|
|
"but no softreset avaliable\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2006-06-19 17:27:23 +08:00
|
|
|
ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
|
2007-02-02 15:50:52 +08:00
|
|
|
rc = ata_do_reset(ap, reset, classes, deadline);
|
2006-05-31 17:27:50 +08:00
|
|
|
|
|
|
|
if (rc == 0 && classify &&
|
|
|
|
classes[0] == ATA_DEV_UNKNOWN) {
|
|
|
|
ata_port_printk(ap, KERN_ERR,
|
|
|
|
"classification failed\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-02 15:50:52 +08:00
|
|
|
if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) {
|
|
|
|
unsigned long now = jiffies;
|
2006-05-31 17:27:50 +08:00
|
|
|
|
2007-02-02 15:50:52 +08:00
|
|
|
if (time_before(now, deadline)) {
|
|
|
|
unsigned long delta = deadline - jiffies;
|
2006-05-31 17:27:50 +08:00
|
|
|
|
2007-02-02 15:50:52 +08:00
|
|
|
ata_port_printk(ap, KERN_WARNING, "reset failed "
|
|
|
|
"(errno=%d), retrying in %u secs\n",
|
|
|
|
rc, (jiffies_to_msecs(delta) + 999) / 1000);
|
|
|
|
|
|
|
|
schedule_timeout_uninterruptible(delta);
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2007-02-02 15:50:52 +08:00
|
|
|
if (reset == hardreset &&
|
|
|
|
try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1)
|
2006-05-15 19:58:22 +08:00
|
|
|
sata_down_spd_limit(ap);
|
|
|
|
if (hardreset)
|
|
|
|
reset = hardreset;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc == 0) {
|
2006-05-31 17:27:23 +08:00
|
|
|
/* After the reset, the device state is PIO 0 and the
|
|
|
|
* controller state is undefined. Record the mode.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
ap->device[i].pio_mode = XFER_PIO_0;
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
if (postreset)
|
|
|
|
postreset(ap, classes);
|
|
|
|
|
|
|
|
/* reset successful, schedule revalidation */
|
2006-07-10 22:18:46 +08:00
|
|
|
ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
|
2006-05-15 19:58:22 +08:00
|
|
|
ehc->i.action |= ATA_EH_REVALIDATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
static int ata_eh_revalidate_and_attach(struct ata_port *ap,
|
|
|
|
struct ata_device **r_failed_dev)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
|
|
|
struct ata_device *dev;
|
2007-03-22 21:24:19 +08:00
|
|
|
unsigned int new_mask = 0;
|
2006-05-31 17:28:03 +08:00
|
|
|
unsigned long flags;
|
2006-05-15 19:58:22 +08:00
|
|
|
int i, rc = 0;
|
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
2007-03-22 21:24:19 +08:00
|
|
|
/* For PATA drive side cable detection to work, IDENTIFY must
|
|
|
|
* be done backwards such that PDIAG- is released by the slave
|
|
|
|
* device before the master device is identified.
|
|
|
|
*/
|
|
|
|
for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
|
2006-11-10 17:08:10 +08:00
|
|
|
unsigned int action, readid_flags = 0;
|
2006-06-19 17:27:23 +08:00
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
dev = &ap->device[i];
|
2006-06-24 19:30:18 +08:00
|
|
|
action = ata_eh_dev_action(dev);
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-11-10 17:08:10 +08:00
|
|
|
if (ehc->i.flags & ATA_EHI_DID_RESET)
|
|
|
|
readid_flags |= ATA_READID_POSTRESET;
|
|
|
|
|
2006-07-03 15:07:26 +08:00
|
|
|
if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
|
2006-05-15 19:58:22 +08:00
|
|
|
if (ata_port_offline(ap)) {
|
|
|
|
rc = -EIO;
|
2007-03-22 21:24:19 +08:00
|
|
|
goto err;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
2006-06-19 17:27:23 +08:00
|
|
|
ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
|
2006-11-10 17:08:10 +08:00
|
|
|
rc = ata_dev_revalidate(dev, readid_flags);
|
2006-05-15 19:58:22 +08:00
|
|
|
if (rc)
|
2007-03-22 21:24:19 +08:00
|
|
|
goto err;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2006-06-19 17:27:23 +08:00
|
|
|
ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
|
|
|
|
|
2006-11-01 17:39:27 +08:00
|
|
|
/* Configuration may have changed, reconfigure
|
|
|
|
* transfer mode.
|
|
|
|
*/
|
|
|
|
ehc->i.flags |= ATA_EHI_SETMODE;
|
|
|
|
|
2006-06-12 12:01:34 +08:00
|
|
|
/* schedule the scsi_rescan_device() here */
|
|
|
|
queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
|
2006-05-31 17:28:03 +08:00
|
|
|
} else if (dev->class == ATA_DEV_UNKNOWN &&
|
|
|
|
ehc->tries[dev->devno] &&
|
|
|
|
ata_class_enabled(ehc->classes[dev->devno])) {
|
|
|
|
dev->class = ehc->classes[dev->devno];
|
|
|
|
|
2006-11-10 17:08:10 +08:00
|
|
|
rc = ata_dev_read_id(dev, &dev->class, readid_flags,
|
|
|
|
dev->id);
|
2007-03-22 21:24:19 +08:00
|
|
|
switch (rc) {
|
|
|
|
case 0:
|
|
|
|
new_mask |= 1 << i;
|
|
|
|
break;
|
|
|
|
case -ENOENT:
|
2006-11-10 17:08:10 +08:00
|
|
|
/* IDENTIFY was issued to non-existent
|
|
|
|
* device. No need to reset. Just
|
|
|
|
* thaw and kill the device.
|
|
|
|
*/
|
|
|
|
ata_eh_thaw_port(ap);
|
2006-05-31 17:28:03 +08:00
|
|
|
dev->class = ATA_DEV_UNKNOWN;
|
|
|
|
break;
|
2007-03-22 21:24:19 +08:00
|
|
|
default:
|
|
|
|
dev->class = ATA_DEV_UNKNOWN;
|
|
|
|
goto err;
|
2006-05-31 17:28:03 +08:00
|
|
|
}
|
2007-03-22 21:24:19 +08:00
|
|
|
}
|
|
|
|
}
|
2006-05-31 17:28:03 +08:00
|
|
|
|
2007-04-23 01:05:53 +08:00
|
|
|
/* PDIAG- should have been released, ask cable type if post-reset */
|
|
|
|
if ((ehc->i.flags & ATA_EHI_DID_RESET) && ap->ops->cable_detect)
|
|
|
|
ap->cbl = ap->ops->cable_detect(ap);
|
|
|
|
|
2007-03-22 21:24:19 +08:00
|
|
|
/* Configure new devices forward such that user doesn't see
|
|
|
|
* device detection messages backwards.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
dev = &ap->device[i];
|
2006-11-01 17:39:27 +08:00
|
|
|
|
2007-03-22 21:24:19 +08:00
|
|
|
if (!(new_mask & (1 << i)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ehc->i.flags |= ATA_EHI_PRINTINFO;
|
|
|
|
rc = ata_dev_configure(dev);
|
|
|
|
ehc->i.flags &= ~ATA_EHI_PRINTINFO;
|
|
|
|
if (rc)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
|
|
|
|
/* new device discovered, configure xfermode */
|
|
|
|
ehc->i.flags |= ATA_EHI_SETMODE;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
2007-03-22 21:24:19 +08:00
|
|
|
return 0;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
2007-03-22 21:24:19 +08:00
|
|
|
err:
|
|
|
|
*r_failed_dev = dev;
|
|
|
|
DPRINTK("EXIT rc=%d\n", rc);
|
2006-05-15 19:58:22 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-03-02 16:32:47 +08:00
|
|
|
#ifdef CONFIG_PM
|
2006-07-03 15:07:26 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_suspend - handle suspend EH action
|
|
|
|
* @ap: target host port
|
|
|
|
* @r_failed_dev: result parameter to indicate failing device
|
|
|
|
*
|
|
|
|
* Handle suspend EH action. Disk devices are spinned down and
|
|
|
|
* other types of devices are just marked suspended. Once
|
|
|
|
* suspended, no EH action to the device is allowed until it is
|
|
|
|
* resumed.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success, -errno otherwise
|
|
|
|
*/
|
|
|
|
static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
|
|
|
|
{
|
|
|
|
struct ata_device *dev;
|
|
|
|
int i, rc = 0;
|
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned int action, err_mask;
|
|
|
|
|
|
|
|
dev = &ap->device[i];
|
|
|
|
action = ata_eh_dev_action(dev);
|
|
|
|
|
|
|
|
if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
|
|
|
|
|
|
|
|
ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
|
|
|
|
|
|
|
|
if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
|
|
|
|
/* flush cache */
|
|
|
|
rc = ata_flush_cache(dev);
|
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* spin down */
|
|
|
|
err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
|
|
|
|
if (err_mask) {
|
|
|
|
ata_dev_printk(dev, KERN_ERR, "failed to "
|
|
|
|
"spin down (err_mask=0x%x)\n",
|
|
|
|
err_mask);
|
|
|
|
rc = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
dev->flags |= ATA_DFLAG_SUSPENDED;
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
|
|
|
|
ata_eh_done(ap, dev, ATA_EH_SUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
*r_failed_dev = dev;
|
|
|
|
|
|
|
|
DPRINTK("EXIT\n");
|
2007-01-26 19:10:25 +08:00
|
|
|
return rc;
|
2006-07-03 15:07:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_prep_resume - prep for resume EH action
|
|
|
|
* @ap: target host port
|
|
|
|
*
|
|
|
|
* Clear SUSPENDED in preparation for scheduled resume actions.
|
|
|
|
* This allows other parts of EH to access the devices being
|
|
|
|
* resumed.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*/
|
|
|
|
static void ata_eh_prep_resume(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
struct ata_device *dev;
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
unsigned int action;
|
|
|
|
|
|
|
|
dev = &ap->device[i];
|
|
|
|
action = ata_eh_dev_action(dev);
|
|
|
|
|
|
|
|
if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
dev->flags &= ~ATA_DFLAG_SUSPENDED;
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_resume - handle resume EH action
|
|
|
|
* @ap: target host port
|
|
|
|
* @r_failed_dev: result parameter to indicate failing device
|
|
|
|
*
|
|
|
|
* Handle resume EH action. Target devices are already reset and
|
|
|
|
* revalidated. Spinning up is the only operation left.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success, -errno otherwise
|
|
|
|
*/
|
|
|
|
static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
|
|
|
|
{
|
|
|
|
struct ata_device *dev;
|
|
|
|
int i, rc = 0;
|
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
unsigned int action, err_mask;
|
|
|
|
|
|
|
|
dev = &ap->device[i];
|
|
|
|
action = ata_eh_dev_action(dev);
|
|
|
|
|
|
|
|
if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
|
|
|
|
|
|
|
|
if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
|
|
|
|
err_mask = ata_do_simple_cmd(dev,
|
|
|
|
ATA_CMD_IDLEIMMEDIATE);
|
|
|
|
if (err_mask) {
|
|
|
|
ata_dev_printk(dev, KERN_ERR, "failed to "
|
|
|
|
"spin up (err_mask=0x%x)\n",
|
|
|
|
err_mask);
|
|
|
|
rc = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ata_eh_done(ap, dev, ATA_EH_RESUME);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
*r_failed_dev = dev;
|
|
|
|
|
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2007-03-02 16:32:47 +08:00
|
|
|
#endif /* CONFIG_PM */
|
2006-07-03 15:07:26 +08:00
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
static int ata_port_nr_enabled(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
int i, cnt = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
if (ata_dev_enabled(&ap->device[i]))
|
|
|
|
cnt++;
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
static int ata_port_nr_vacant(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
int i, cnt = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
if (ap->device[i].class == ATA_DEV_UNKNOWN)
|
|
|
|
cnt++;
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ata_eh_skip_recovery(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
|
|
|
int i;
|
|
|
|
|
2006-07-03 15:07:26 +08:00
|
|
|
/* skip if all possible devices are suspended */
|
|
|
|
for (i = 0; i < ata_port_max_devices(ap); i++) {
|
|
|
|
struct ata_device *dev = &ap->device[i];
|
|
|
|
|
2006-07-10 22:18:23 +08:00
|
|
|
if (!(dev->flags & ATA_DFLAG_SUSPENDED))
|
2006-07-03 15:07:26 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == ata_port_max_devices(ap))
|
|
|
|
return 1;
|
|
|
|
|
2006-07-10 22:18:23 +08:00
|
|
|
/* thaw frozen port, resume link and recover failed devices */
|
|
|
|
if ((ap->pflags & ATA_PFLAG_FROZEN) ||
|
|
|
|
(ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
|
2006-05-31 17:28:03 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* skip if class codes for all vacant slots are ATA_DEV_NONE */
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
struct ata_device *dev = &ap->device[i];
|
|
|
|
|
|
|
|
if (dev->class == ATA_DEV_UNKNOWN &&
|
|
|
|
ehc->classes[dev->devno] != ATA_DEV_NONE)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_recover - recover host port after error
|
|
|
|
* @ap: host port to recover
|
2006-05-31 17:27:48 +08:00
|
|
|
* @prereset: prereset method (can be NULL)
|
2006-05-15 19:58:22 +08:00
|
|
|
* @softreset: softreset method (can be NULL)
|
|
|
|
* @hardreset: hardreset method (can be NULL)
|
|
|
|
* @postreset: postreset method (can be NULL)
|
|
|
|
*
|
|
|
|
* This is the alpha and omega, eum and yang, heart and soul of
|
|
|
|
* libata exception handling. On entry, actions required to
|
2006-05-31 17:28:03 +08:00
|
|
|
* recover the port and hotplug requests are recorded in
|
|
|
|
* eh_context. This function executes all the operations with
|
|
|
|
* appropriate retrials and fallbacks to resurrect failed
|
|
|
|
* devices, detach goners and greet newcomers.
|
2006-05-15 19:58:22 +08:00
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* 0 on success, -errno on failure.
|
|
|
|
*/
|
2006-05-31 17:27:48 +08:00
|
|
|
static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
|
|
|
|
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
|
2006-05-15 19:58:22 +08:00
|
|
|
ata_postreset_fn_t postreset)
|
|
|
|
{
|
|
|
|
struct ata_eh_context *ehc = &ap->eh_context;
|
|
|
|
struct ata_device *dev;
|
2007-02-02 15:22:30 +08:00
|
|
|
int i, rc;
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
|
|
/* prep for recovery */
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
dev = &ap->device[i];
|
|
|
|
|
|
|
|
ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
|
2006-05-31 17:28:03 +08:00
|
|
|
|
2007-01-18 16:22:18 +08:00
|
|
|
/* collect port action mask recorded in dev actions */
|
|
|
|
ehc->i.action |= ehc->i.dev_action[i] & ~ATA_EH_PERDEV_MASK;
|
|
|
|
ehc->i.dev_action[i] &= ATA_EH_PERDEV_MASK;
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
/* process hotplug request */
|
|
|
|
if (dev->flags & ATA_DFLAG_DETACH)
|
|
|
|
ata_eh_detach_dev(dev);
|
|
|
|
|
|
|
|
if (!ata_dev_enabled(dev) &&
|
|
|
|
((ehc->i.probe_mask & (1 << dev->devno)) &&
|
|
|
|
!(ehc->did_probe_mask & (1 << dev->devno)))) {
|
|
|
|
ata_eh_detach_dev(dev);
|
|
|
|
ata_dev_init(dev);
|
|
|
|
ehc->did_probe_mask |= (1 << dev->devno);
|
|
|
|
ehc->i.action |= ATA_EH_SOFTRESET;
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
|
|
|
rc = 0;
|
|
|
|
|
2006-06-12 13:11:43 +08:00
|
|
|
/* if UNLOADING, finish immediately */
|
2006-06-29 00:29:30 +08:00
|
|
|
if (ap->pflags & ATA_PFLAG_UNLOADING)
|
2006-06-12 13:11:43 +08:00
|
|
|
goto out;
|
|
|
|
|
2006-07-03 15:07:26 +08:00
|
|
|
/* prep for resume */
|
|
|
|
ata_eh_prep_resume(ap);
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/* skip EH if possible. */
|
2006-05-31 17:28:03 +08:00
|
|
|
if (ata_eh_skip_recovery(ap))
|
2006-05-15 19:58:22 +08:00
|
|
|
ehc->i.action = 0;
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
ehc->classes[i] = ATA_DEV_UNKNOWN;
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
/* reset */
|
|
|
|
if (ehc->i.action & ATA_EH_RESET_MASK) {
|
|
|
|
ata_eh_freeze_port(ap);
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
|
|
|
|
softreset, hardreset, postreset);
|
2006-05-15 19:58:22 +08:00
|
|
|
if (rc) {
|
|
|
|
ata_port_printk(ap, KERN_ERR,
|
|
|
|
"reset failed, giving up\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ata_eh_thaw_port(ap);
|
|
|
|
}
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
/* revalidate existing devices and attach new ones */
|
|
|
|
rc = ata_eh_revalidate_and_attach(ap, &dev);
|
2006-05-15 19:58:22 +08:00
|
|
|
if (rc)
|
|
|
|
goto dev_fail;
|
|
|
|
|
2006-07-03 15:07:26 +08:00
|
|
|
/* resume devices */
|
|
|
|
rc = ata_eh_resume(ap, &dev);
|
|
|
|
if (rc)
|
|
|
|
goto dev_fail;
|
|
|
|
|
2006-11-01 17:39:27 +08:00
|
|
|
/* configure transfer mode if necessary */
|
|
|
|
if (ehc->i.flags & ATA_EHI_SETMODE) {
|
2006-05-15 19:58:22 +08:00
|
|
|
rc = ata_set_mode(ap, &dev);
|
2007-02-02 15:22:30 +08:00
|
|
|
if (rc)
|
2006-05-15 19:58:22 +08:00
|
|
|
goto dev_fail;
|
2006-11-01 17:39:27 +08:00
|
|
|
ehc->i.flags &= ~ATA_EHI_SETMODE;
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
2006-07-03 15:07:26 +08:00
|
|
|
/* suspend devices */
|
|
|
|
rc = ata_eh_suspend(ap, &dev);
|
|
|
|
if (rc)
|
|
|
|
goto dev_fail;
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
dev_fail:
|
2007-02-02 15:22:30 +08:00
|
|
|
ehc->tries[dev->devno]--;
|
|
|
|
|
2006-05-15 19:58:22 +08:00
|
|
|
switch (rc) {
|
|
|
|
case -EINVAL:
|
2007-02-02 15:22:30 +08:00
|
|
|
/* eeek, something went very wrong, give up */
|
2006-05-15 19:58:22 +08:00
|
|
|
ehc->tries[dev->devno] = 0;
|
|
|
|
break;
|
2007-02-02 15:22:30 +08:00
|
|
|
|
|
|
|
case -ENODEV:
|
|
|
|
/* device missing or wrong IDENTIFY data, schedule probing */
|
|
|
|
ehc->i.probe_mask |= (1 << dev->devno);
|
|
|
|
/* give it just one more chance */
|
|
|
|
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
|
2006-05-15 19:58:22 +08:00
|
|
|
case -EIO:
|
2007-02-02 15:22:30 +08:00
|
|
|
if (ehc->tries[dev->devno] == 1) {
|
|
|
|
/* This is the last chance, better to slow
|
|
|
|
* down than lose it.
|
|
|
|
*/
|
|
|
|
sata_down_spd_limit(ap);
|
|
|
|
ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
}
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
|
|
|
|
/* disable device if it has used up all its chances */
|
2006-05-15 19:58:22 +08:00
|
|
|
ata_dev_disable(dev);
|
|
|
|
|
2006-05-31 17:28:03 +08:00
|
|
|
/* detach if offline */
|
|
|
|
if (ata_port_offline(ap))
|
|
|
|
ata_eh_detach_dev(dev);
|
|
|
|
|
|
|
|
/* probe if requested */
|
|
|
|
if ((ehc->i.probe_mask & (1 << dev->devno)) &&
|
|
|
|
!(ehc->did_probe_mask & (1 << dev->devno))) {
|
|
|
|
ata_eh_detach_dev(dev);
|
|
|
|
ata_dev_init(dev);
|
|
|
|
|
|
|
|
ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
|
|
|
|
ehc->did_probe_mask |= (1 << dev->devno);
|
|
|
|
ehc->i.action |= ATA_EH_SOFTRESET;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* soft didn't work? be haaaaard */
|
|
|
|
if (ehc->i.flags & ATA_EHI_DID_RESET)
|
|
|
|
ehc->i.action |= ATA_EH_HARDRESET;
|
|
|
|
else
|
|
|
|
ehc->i.action |= ATA_EH_SOFTRESET;
|
|
|
|
}
|
2006-05-15 19:58:22 +08:00
|
|
|
|
|
|
|
if (ata_port_nr_enabled(ap)) {
|
|
|
|
ata_port_printk(ap, KERN_WARNING, "failed to recover some "
|
|
|
|
"devices, retrying in 5 secs\n");
|
|
|
|
ssleep(5);
|
|
|
|
} else {
|
|
|
|
/* no device left, repeat fast */
|
|
|
|
msleep(500);
|
|
|
|
}
|
|
|
|
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (rc) {
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
|
ata_dev_disable(&ap->device[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTK("EXIT, rc=%d\n", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_finish - finish up EH
|
|
|
|
* @ap: host port to finish EH for
|
|
|
|
*
|
|
|
|
* Recovery is complete. Clean up EH states and retry or finish
|
|
|
|
* failed qcs.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* None.
|
|
|
|
*/
|
|
|
|
static void ata_eh_finish(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
int tag;
|
|
|
|
|
|
|
|
/* retry or finish qcs */
|
|
|
|
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
|
|
|
|
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
|
|
|
|
|
|
|
|
if (!(qc->flags & ATA_QCFLAG_FAILED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qc->err_mask) {
|
|
|
|
/* FIXME: Once EH migration is complete,
|
|
|
|
* generate sense data in this function,
|
|
|
|
* considering both err_mask and tf.
|
|
|
|
*/
|
|
|
|
if (qc->err_mask & AC_ERR_INVALID)
|
|
|
|
ata_eh_qc_complete(qc);
|
|
|
|
else
|
|
|
|
ata_eh_qc_retry(qc);
|
|
|
|
} else {
|
|
|
|
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
|
|
|
|
ata_eh_qc_complete(qc);
|
|
|
|
} else {
|
|
|
|
/* feed zero TF to sense generation */
|
|
|
|
memset(&qc->result_tf, 0, sizeof(qc->result_tf));
|
|
|
|
ata_eh_qc_retry(qc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_do_eh - do standard error handling
|
|
|
|
* @ap: host port to handle error for
|
2006-05-31 17:27:48 +08:00
|
|
|
* @prereset: prereset method (can be NULL)
|
2006-05-15 19:58:22 +08:00
|
|
|
* @softreset: softreset method (can be NULL)
|
|
|
|
* @hardreset: hardreset method (can be NULL)
|
|
|
|
* @postreset: postreset method (can be NULL)
|
|
|
|
*
|
|
|
|
* Perform standard error handling sequence.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*/
|
2006-05-31 17:27:48 +08:00
|
|
|
void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
|
|
|
|
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
|
|
|
|
ata_postreset_fn_t postreset)
|
2006-05-15 19:58:22 +08:00
|
|
|
{
|
2006-07-03 15:07:26 +08:00
|
|
|
ata_eh_autopsy(ap);
|
|
|
|
ata_eh_report(ap);
|
2006-05-31 17:27:48 +08:00
|
|
|
ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
|
2006-05-15 19:58:22 +08:00
|
|
|
ata_eh_finish(ap);
|
|
|
|
}
|
2006-07-03 15:07:27 +08:00
|
|
|
|
2007-03-02 16:32:47 +08:00
|
|
|
#ifdef CONFIG_PM
|
2006-07-03 15:07:27 +08:00
|
|
|
/**
|
|
|
|
* ata_eh_handle_port_suspend - perform port suspend operation
|
|
|
|
* @ap: port to suspend
|
|
|
|
*
|
|
|
|
* Suspend @ap.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*/
|
|
|
|
static void ata_eh_handle_port_suspend(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
/* are we suspending? */
|
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
|
|
|
|
ap->pm_mesg.event == PM_EVENT_ON) {
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
|
|
|
|
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
|
|
|
|
|
|
|
|
/* suspend */
|
|
|
|
ata_eh_freeze_port(ap);
|
|
|
|
|
|
|
|
if (ap->ops->port_suspend)
|
|
|
|
rc = ap->ops->port_suspend(ap, ap->pm_mesg);
|
|
|
|
|
|
|
|
/* report result */
|
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
|
|
|
|
ap->pflags &= ~ATA_PFLAG_PM_PENDING;
|
|
|
|
if (rc == 0)
|
|
|
|
ap->pflags |= ATA_PFLAG_SUSPENDED;
|
|
|
|
else
|
|
|
|
ata_port_schedule_eh(ap);
|
|
|
|
|
|
|
|
if (ap->pm_result) {
|
|
|
|
*ap->pm_result = rc;
|
|
|
|
ap->pm_result = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ata_eh_handle_port_resume - perform port resume operation
|
|
|
|
* @ap: port to resume
|
|
|
|
*
|
|
|
|
* Resume @ap.
|
|
|
|
*
|
|
|
|
* This function also waits upto one second until all devices
|
|
|
|
* hanging off this port requests resume EH action. This is to
|
|
|
|
* prevent invoking EH and thus reset multiple times on resume.
|
|
|
|
*
|
|
|
|
* On DPM resume, where some of devices might not be resumed
|
|
|
|
* together, this may delay port resume upto one second, but such
|
|
|
|
* DPM resumes are rare and 1 sec delay isn't too bad.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* Kernel thread context (may sleep).
|
|
|
|
*/
|
|
|
|
static void ata_eh_handle_port_resume(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
unsigned long timeout;
|
|
|
|
unsigned long flags;
|
|
|
|
int i, rc = 0;
|
|
|
|
|
|
|
|
/* are we resuming? */
|
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
|
|
|
|
ap->pm_mesg.event != PM_EVENT_ON) {
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
|
|
|
|
/* spurious? */
|
|
|
|
if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
if (ap->ops->port_resume)
|
|
|
|
rc = ap->ops->port_resume(ap);
|
|
|
|
|
|
|
|
/* give devices time to request EH */
|
|
|
|
timeout = jiffies + HZ; /* 1s max */
|
|
|
|
while (1) {
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
struct ata_device *dev = &ap->device[i];
|
|
|
|
unsigned int action = ata_eh_dev_action(dev);
|
|
|
|
|
|
|
|
if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
|
|
|
|
!(action & ATA_EH_RESUME))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
|
|
|
|
break;
|
|
|
|
msleep(10);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
|
|
|
|
if (ap->pm_result) {
|
|
|
|
*ap->pm_result = rc;
|
|
|
|
ap->pm_result = NULL;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(ap->lock, flags);
|
|
|
|
}
|
2007-03-02 16:32:47 +08:00
|
|
|
#endif /* CONFIG_PM */
|