2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 22:53:55 +08:00
linux-next/drivers/ide/ide-atapi.c

730 lines
18 KiB
C
Raw Normal View History

/*
* ATAPI support.
*/
#include <linux/kernel.h>
#include <linux/cdrom.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/gfp.h>
#include <scsi/scsi.h>
#define DRV_NAME "ide-atapi"
#define PFX DRV_NAME ": "
#ifdef DEBUG
#define debug_log(fmt, args...) \
printk(KERN_INFO "ide: " fmt, ## args)
#else
#define debug_log(fmt, args...) do {} while (0)
#endif
#define ATAPI_MIN_CDB_BYTES 12
static inline int dev_is_idecd(ide_drive_t *drive)
{
return drive->media == ide_cdrom || drive->media == ide_optical;
}
/*
* Check whether we can support a device,
* based on the ATAPI IDENTIFY command results.
*/
int ide_check_atapi_device(ide_drive_t *drive, const char *s)
{
u16 *id = drive->id;
u8 gcw[2], protocol, device_type, removable, drq_type, packet_size;
*((u16 *)&gcw) = id[ATA_ID_CONFIG];
protocol = (gcw[1] & 0xC0) >> 6;
device_type = gcw[1] & 0x1F;
removable = (gcw[0] & 0x80) >> 7;
drq_type = (gcw[0] & 0x60) >> 5;
packet_size = gcw[0] & 0x03;
#ifdef CONFIG_PPC
/* kludge for Apple PowerBook internal zip */
if (drive->media == ide_floppy && device_type == 5 &&
!strstr((char *)&id[ATA_ID_PROD], "CD-ROM") &&
strstr((char *)&id[ATA_ID_PROD], "ZIP"))
device_type = 0;
#endif
if (protocol != 2)
printk(KERN_ERR "%s: %s: protocol (0x%02x) is not ATAPI\n",
s, drive->name, protocol);
else if ((drive->media == ide_floppy && device_type != 0) ||
(drive->media == ide_tape && device_type != 1))
printk(KERN_ERR "%s: %s: invalid device type (0x%02x)\n",
s, drive->name, device_type);
else if (removable == 0)
printk(KERN_ERR "%s: %s: the removable flag is not set\n",
s, drive->name);
else if (drive->media == ide_floppy && drq_type == 3)
printk(KERN_ERR "%s: %s: sorry, DRQ type (0x%02x) not "
"supported\n", s, drive->name, drq_type);
else if (packet_size != 0)
printk(KERN_ERR "%s: %s: packet size (0x%02x) is not 12 "
"bytes\n", s, drive->name, packet_size);
else
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(ide_check_atapi_device);
void ide_init_pc(struct ide_atapi_pc *pc)
{
memset(pc, 0, sizeof(*pc));
}
EXPORT_SYMBOL_GPL(ide_init_pc);
/*
* Add a special packet command request to the tail of the request queue,
* and wait for it to be serviced.
*/
int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
struct ide_atapi_pc *pc, void *buf, unsigned int bufflen)
{
struct request *rq;
int error;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->special = (char *)pc;
if (buf && bufflen) {
error = blk_rq_map_kern(drive->queue, rq, buf, bufflen,
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
GFP_NOIO);
if (error)
goto put_req;
}
memcpy(rq->cmd, pc->c, 12);
if (drive->media == ide_tape)
rq->cmd[13] = REQ_IDETAPE_PC1;
error = blk_execute_rq(drive->queue, disk, rq, 0);
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
put_req:
blk_put_request(rq);
return error;
}
EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
int ide_do_test_unit_ready(ide_drive_t *drive, struct gendisk *disk)
{
struct ide_atapi_pc pc;
ide_init_pc(&pc);
pc.c[0] = TEST_UNIT_READY;
return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
}
EXPORT_SYMBOL_GPL(ide_do_test_unit_ready);
int ide_do_start_stop(ide_drive_t *drive, struct gendisk *disk, int start)
{
struct ide_atapi_pc pc;
ide_init_pc(&pc);
pc.c[0] = START_STOP;
pc.c[4] = start;
if (drive->media == ide_tape)
pc.flags |= PC_FLAG_WAIT_FOR_DSC;
return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
}
EXPORT_SYMBOL_GPL(ide_do_start_stop);
int ide_set_media_lock(ide_drive_t *drive, struct gendisk *disk, int on)
{
struct ide_atapi_pc pc;
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
return 0;
ide_init_pc(&pc);
pc.c[0] = ALLOW_MEDIUM_REMOVAL;
pc.c[4] = on;
return ide_queue_pc_tail(drive, disk, &pc, NULL, 0);
}
EXPORT_SYMBOL_GPL(ide_set_media_lock);
void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
{
ide_init_pc(pc);
pc->c[0] = REQUEST_SENSE;
if (drive->media == ide_floppy) {
pc->c[4] = 255;
pc->req_xfer = 18;
} else {
pc->c[4] = 20;
pc->req_xfer = 20;
}
}
EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
void ide_prep_sense(ide_drive_t *drive, struct request *rq)
{
struct request_sense *sense = &drive->sense_data;
struct request *sense_rq = &drive->sense_rq;
unsigned int cmd_len, sense_len;
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
int err;
switch (drive->media) {
case ide_floppy:
cmd_len = 255;
sense_len = 18;
break;
case ide_tape:
cmd_len = 20;
sense_len = 20;
break;
default:
cmd_len = 18;
sense_len = 18;
}
BUG_ON(sense_len > sizeof(*sense));
if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed)
return;
memset(sense, 0, sizeof(*sense));
blk_rq_init(rq->q, sense_rq);
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
GFP_NOIO);
if (unlikely(err)) {
if (printk_ratelimit())
printk(KERN_WARNING PFX "%s: failed to map sense "
"buffer\n", drive->name);
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
return;
}
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
sense_rq->cmd[4] = cmd_len;
sense_rq->cmd_type = REQ_TYPE_SENSE;
sense_rq->cmd_flags |= REQ_PREEMPT;
if (drive->media == ide_tape)
sense_rq->cmd[13] = REQ_IDETAPE_PC1;
drive->sense_rq_armed = true;
}
EXPORT_SYMBOL_GPL(ide_prep_sense);
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
/* deferred failure from ide_prep_sense() */
if (!drive->sense_rq_armed) {
printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
drive->name);
return -ENOMEM;
}
drive->sense_rq.special = special;
drive->sense_rq_armed = false;
drive->hwif->rq = NULL;
elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
ide-cd,atapi: use bio for internal commands Impact: unify request data buffer handling rq->data is used mostly to pass kernel buffer through request queue without using bio. There are only a couple of places which still do this in kernel and converting to bio isn't difficult. This patch converts ide-cd and atapi to use bio instead of rq->data for request sense and internal pc commands. With previous change to unify sense request handling, this is relatively easily achieved by adding blk_rq_map_kern() during sense_rq prep and PC issue. If blk_rq_map_kern() fails for sense, the error is deferred till sense issue and aborts the failed command which triggered the sense. Note that this is a slim possibility as sense prep is done on each command issue, so for the above condition to actually trigger, all preps since the last sense issue till the issue of the request which would require a sense should fail. * do_request functions might sleep now. This should be okay as ide request_fn - do_ide_request() - is invoked only from make_request and plug work. Make sure this is the case by adding might_sleep() to do_ide_request(). * Functions which access the read sense data before the sense request is complete now should access bio_data(sense_rq->bio) as the sense buffer might have been copied during blk_rq_map_kern(). * ide-tape updated to map sg. * cdrom_do_block_pc() now doesn't have to deal with REQ_TYPE_ATA_PC special case. Simplified. * tp_ops->output/input_data path dropped from ide_pc_intr(). Signed-off-by: Tejun Heo <tj@kernel.org>
2009-04-19 06:00:42 +08:00
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
/*
* Called when an error was detected during the last packet command.
* We queue a request sense packet command at the head of the request
* queue.
*/
void ide_retry_pc(ide_drive_t *drive)
{
struct request *failed_rq = drive->hwif->rq;
struct request *sense_rq = &drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
(void)ide_read_error(drive);
/* init pc from sense_rq */
ide_init_pc(pc);
memcpy(pc->c, sense_rq->cmd, 12);
if (drive->media == ide_tape)
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
/*
* Push back the failed request and put request sense on top
* of it. The failed command will be retried after sense data
* is acquired.
*/
drive->hwif->rq = NULL;
ide_requeue_and_plug(drive, failed_rq);
if (ide_queue_sense_rq(drive, pc)) {
block: implement and enforce request peek/start/fetch Till now block layer allowed two separate modes of request execution. A request is always acquired from the request queue via elv_next_request(). After that, drivers are free to either dequeue it or process it without dequeueing. Dequeue allows elv_next_request() to return the next request so that multiple requests can be in flight. Executing requests without dequeueing has its merits mostly in allowing drivers for simpler devices which can't do sg to deal with segments only without considering request boundary. However, the benefit this brings is dubious and declining while the cost of the API ambiguity is increasing. Segment based drivers are usually for very old or limited devices and as converting to dequeueing model isn't difficult, it doesn't justify the API overhead it puts on block layer and its more modern users. Previous patches converted all block low level drivers to dequeueing model. This patch completes the API transition by... * renaming elv_next_request() to blk_peek_request() * renaming blkdev_dequeue_request() to blk_start_request() * adding blk_fetch_request() which is combination of peek and start * disallowing completion of queued (not started) requests * applying new API to all LLDs Renamings are for consistency and to break out of tree code so that it's apparent that out of tree drivers need updating. [ Impact: block request issue API cleanup, no functional change ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Mike Miller <mike.miller@hp.com> Cc: unsik Kim <donari75@gmail.com> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Cc: David S. Miller <davem@davemloft.net> Cc: Laurent Vivier <Laurent@lvivier.info> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Grant Likely <grant.likely@secretlab.ca> Cc: Adrian McMenamin <adrian@mcmen.demon.co.uk> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: Pierre Ossman <drzeus@drzeus.cx> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Markus Lidel <Markus.Lidel@shadowconnect.com> Cc: Stefan Weinhuber <wein@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-05-08 10:54:16 +08:00
blk_start_request(failed_rq);
ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
}
}
EXPORT_SYMBOL_GPL(ide_retry_pc);
int ide_cd_expiry(ide_drive_t *drive)
{
struct request *rq = drive->hwif->rq;
unsigned long wait = 0;
debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]);
/*
* Some commands are *slow* and normally take a long time to complete.
* Usually we can use the ATAPI "disconnect" to bypass this, but not all
* commands/drives support that. Let ide_timer_expiry keep polling us
* for these.
*/
switch (rq->cmd[0]) {
case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK:
case GPCMD_CLOSE_TRACK:
case GPCMD_FLUSH_CACHE:
wait = ATAPI_WAIT_PC;
break;
default:
if (!(rq->cmd_flags & REQ_QUIET))
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
rq->cmd[0]);
wait = 0;
break;
}
return wait;
}
EXPORT_SYMBOL_GPL(ide_cd_expiry);
int ide_cd_get_xferlen(struct request *rq)
{
switch (rq->cmd_type) {
case REQ_TYPE_FS:
return 32768;
case REQ_TYPE_SENSE:
case REQ_TYPE_BLOCK_PC:
case REQ_TYPE_ATA_PC:
return blk_rq_bytes(rq);
default:
return 0;
}
}
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
{
struct ide_taskfile tf;
drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT |
IDE_VALID_LBAM | IDE_VALID_LBAH);
*bcount = (tf.lbah << 8) | tf.lbam;
*ireason = tf.nsect & 3;
}
EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
/*
* Check the contents of the interrupt reason register and attempt to recover if
* there are problems.
*
* Returns:
* - 0 if everything's ok
* - 1 if the request has to be terminated.
*/
int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
int ireason, int rw)
{
ide_hwif_t *hwif = drive->hwif;
debug_log("ireason: 0x%x, rw: 0x%x\n", ireason, rw);
if (ireason == (!rw << 1))
return 0;
else if (ireason == (rw << 1)) {
printk(KERN_ERR PFX "%s: %s: wrong transfer direction!\n",
drive->name, __func__);
if (dev_is_idecd(drive))
ide_pad_transfer(drive, rw, len);
} else if (!rw && ireason == ATAPI_COD) {
if (dev_is_idecd(drive)) {
/*
* Some drives (ASUS) seem to tell us that status info
* is available. Just get it and ignore.
*/
(void)hwif->tp_ops->read_status(hwif);
return 0;
}
} else {
if (ireason & ATAPI_COD)
printk(KERN_ERR PFX "%s: CoD != 0 in %s\n", drive->name,
__func__);
/* drive wants a command packet, or invalid ireason... */
printk(KERN_ERR PFX "%s: %s: bad interrupt reason 0x%02x\n",
drive->name, __func__, ireason);
}
if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
rq->cmd_flags |= REQ_FAILED;
return 1;
}
EXPORT_SYMBOL_GPL(ide_check_ireason);
/*
* This is the usual interrupt handler which will be called during a packet
* command. We will transfer some of the data (as requested by the drive)
* and will re-point interrupt handler to us.
*/
static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
{
struct ide_atapi_pc *pc = drive->pc;
ide_hwif_t *hwif = drive->hwif;
struct ide_cmd *cmd = &hwif->cmd;
struct request *rq = hwif->rq;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
unsigned int timeout, done;
u16 bcount;
u8 stat, ireason, dsc = 0;
u8 write = !!(pc->flags & PC_FLAG_WRITING);
debug_log("Enter %s - interrupt handler\n", __func__);
timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
: WAIT_TAPE_CMD;
/* Clear the interrupt */
stat = tp_ops->read_status(hwif);
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
int rc;
drive->waiting_for_dma = 0;
rc = hwif->dma_ops->dma_end(drive);
ide_dma_unmap_sg(drive, cmd);
if (rc || (drive->media == ide_tape && (stat & ATA_ERR))) {
if (drive->media == ide_floppy)
printk(KERN_ERR PFX "%s: DMA %s error\n",
drive->name, rq_data_dir(pc->rq)
? "write" : "read");
pc->flags |= PC_FLAG_DMA_ERROR;
} else
rq->resid_len = 0;
debug_log("%s: DMA finished\n", drive->name);
}
/* No more interrupts */
if ((stat & ATA_DRQ) == 0) {
int uptodate, error;
debug_log("Packet command completed, %d bytes transferred\n",
blk_rq_bytes(rq));
pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
local_irq_enable_in_hardirq();
if (drive->media == ide_tape &&
(stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE)
stat &= ~ATA_ERR;
if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
/* Error detected */
debug_log("%s: I/O error\n", drive->name);
if (drive->media != ide_tape)
pc->rq->errors++;
if (rq->cmd[0] == REQUEST_SENSE) {
printk(KERN_ERR PFX "%s: I/O error in request "
"sense command\n", drive->name);
return ide_do_reset(drive);
}
debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
/* Retry operation */
ide_retry_pc(drive);
/* queued, but not started */
return ide_stopped;
}
pc->error = 0;
if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0)
dsc = 1;
/*
* ->pc_callback() might change rq->data_len for
* residual count, cache total length.
*/
done = blk_rq_bytes(rq);
/* Command finished - Call the callback function */
uptodate = drive->pc_callback(drive, dsc);
if (uptodate == 0)
drive->failed_pc = NULL;
if (rq->cmd_type == REQ_TYPE_SPECIAL) {
rq->errors = 0;
error = 0;
} else {
if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
if (rq->errors == 0)
rq->errors = -EIO;
}
error = uptodate ? 0 : -EIO;
}
block: add rq->resid_len rq->data_len served two purposes - the length of data buffer on issue and the residual count on completion. This duality creates some headaches. First of all, block layer and low level drivers can't really determine what rq->data_len contains while a request is executing. It could be the total request length or it coulde be anything else one of the lower layers is using to keep track of residual count. This complicates things because blk_rq_bytes() and thus [__]blk_end_request_all() relies on rq->data_len for PC commands. Drivers which want to report residual count should first cache the total request length, update rq->data_len and then complete the request with the cached data length. Secondly, it makes requests default to reporting full residual count, ie. reporting that no data transfer occurred. The residual count is an exception not the norm; however, the driver should clear rq->data_len to zero to signify the normal cases while leaving it alone means no data transfer occurred at all. This reverse default behavior complicates code unnecessarily and renders block PC on some drivers (ide-tape/floppy) unuseable. This patch adds rq->resid_len which is used only for residual count. While at it, remove now unnecessasry blk_rq_bytes() caching in ide_pc_intr() as rq->data_len is not changed anymore. Boaz : spotted missing conversion in osd Sergei : spotted too early conversion to blk_rq_bytes() in ide-tape [ Impact: cleanup residual count handling, report 0 resid by default ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Mike Miller <mike.miller@hp.com> Cc: Eric Moore <Eric.Moore@lsi.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Doug Gilbert <dgilbert@interlog.com> Cc: Mike Miller <mike.miller@hp.com> Cc: Eric Moore <Eric.Moore@lsi.com> Cc: Darrick J. Wong <djwong@us.ibm.com> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: Boaz Harrosh <bharrosh@panasas.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-05-07 21:24:37 +08:00
ide_complete_rq(drive, error, blk_rq_bytes(rq));
return ide_stopped;
}
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
printk(KERN_ERR PFX "%s: The device wants to issue more "
"interrupts in DMA mode\n", drive->name);
ide_dma_off(drive);
return ide_do_reset(drive);
}
/* Get the number of bytes to transfer on this interrupt. */
ide_read_bcount_and_ireason(drive, &bcount, &ireason);
if (ide_check_ireason(drive, rq, bcount, ireason, write))
return ide_do_reset(drive);
done = min_t(unsigned int, bcount, cmd->nleft);
ide_pio_bytes(drive, cmd, write, done);
/* Update transferred byte count */
rq->resid_len -= done;
bcount -= done;
if (bcount)
ide_pad_transfer(drive, write, bcount);
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
rq->cmd[0], done, bcount, rq->resid_len);
/* And set the interrupt handler again */
ide_set_handler(drive, ide_pc_intr, timeout);
return ide_started;
}
static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf,
u16 bcount, u8 dma)
{
cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO;
cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM |
IDE_VALID_FEATURE | valid_tf;
cmd->tf.command = ATA_CMD_PACKET;
cmd->tf.feature = dma; /* Use PIO/DMA */
cmd->tf.lbam = bcount & 0xff;
cmd->tf.lbah = (bcount >> 8) & 0xff;
}
static u8 ide_read_ireason(ide_drive_t *drive)
{
struct ide_taskfile tf;
drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT);
return tf.nsect & 3;
}
static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
{
int retries = 100;
while (retries-- && ((ireason & ATAPI_COD) == 0 ||
(ireason & ATAPI_IO))) {
printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing "
"a packet command, retrying\n", drive->name);
udelay(100);
ireason = ide_read_ireason(drive);
if (retries == 0) {
printk(KERN_ERR PFX "%s: (IO,CoD != (0,1) while issuing"
" a packet command, ignoring\n",
drive->name);
ireason |= ATAPI_COD;
ireason &= ~ATAPI_IO;
}
}
return ireason;
}
static int ide_delayed_transfer_pc(ide_drive_t *drive)
{
/* Send the actual packet */
drive->hwif->tp_ops->output_data(drive, NULL, drive->pc->c, 12);
/* Timeout for the packet command */
return WAIT_FLOPPY_CMD;
}
static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
{
struct ide_atapi_pc *uninitialized_var(pc);
ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->rq;
ide_expiry_t *expiry;
unsigned int timeout;
int cmd_len;
ide_startstop_t startstop;
u8 ireason;
if (ide_wait_stat(&startstop, drive, ATA_DRQ, ATA_BUSY, WAIT_READY)) {
printk(KERN_ERR PFX "%s: Strange, packet command initiated yet "
"DRQ isn't asserted\n", drive->name);
return startstop;
}
if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
if (drive->dma)
drive->waiting_for_dma = 1;
}
if (dev_is_idecd(drive)) {
/* ATAPI commands get padded out to 12 bytes minimum */
cmd_len = COMMAND_SIZE(rq->cmd[0]);
if (cmd_len < ATAPI_MIN_CDB_BYTES)
cmd_len = ATAPI_MIN_CDB_BYTES;
timeout = rq->timeout;
expiry = ide_cd_expiry;
} else {
pc = drive->pc;
cmd_len = ATAPI_MIN_CDB_BYTES;
/*
* If necessary schedule the packet transfer to occur 'timeout'
* milliseconds later in ide_delayed_transfer_pc() after the
* device says it's ready for a packet.
*/
if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
timeout = drive->pc_delay;
expiry = &ide_delayed_transfer_pc;
} else {
timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
: WAIT_TAPE_CMD;
expiry = NULL;
}
ireason = ide_read_ireason(drive);
if (drive->media == ide_tape)
ireason = ide_wait_ireason(drive, ireason);
if ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO)) {
printk(KERN_ERR PFX "%s: (IO,CoD) != (0,1) while "
"issuing a packet command\n", drive->name);
return ide_do_reset(drive);
}
}
hwif->expiry = expiry;
/* Set the interrupt routine */
ide_set_handler(drive,
(dev_is_idecd(drive) ? drive->irq_handler
: ide_pc_intr),
timeout);
/* Send the actual packet */
if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
/* Begin DMA, if necessary */
if (dev_is_idecd(drive)) {
if (drive->dma)
hwif->dma_ops->dma_start(drive);
} else {
if (pc->flags & PC_FLAG_DMA_OK) {
pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
hwif->dma_ops->dma_start(drive);
}
}
return ide_started;
}
ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
{
struct ide_atapi_pc *pc;
ide_hwif_t *hwif = drive->hwif;
ide_expiry_t *expiry = NULL;
struct request *rq = hwif->rq;
unsigned int timeout, bytes;
u16 bcount;
u8 valid_tf;
u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT);
if (dev_is_idecd(drive)) {
valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL;
bcount = ide_cd_get_xferlen(rq);
expiry = ide_cd_expiry;
timeout = ATAPI_WAIT_PC;
if (drive->dma)
drive->dma = !ide_dma_prepare(drive, cmd);
} else {
pc = drive->pc;
valid_tf = IDE_VALID_DEVICE;
bytes = blk_rq_bytes(rq);
bcount = ((drive->media == ide_tape) ? bytes
: min_t(unsigned int,
bytes, 63 * 1024));
/* We haven't transferred any data yet */
rq->resid_len = bcount;
if (pc->flags & PC_FLAG_DMA_ERROR) {
pc->flags &= ~PC_FLAG_DMA_ERROR;
ide_dma_off(drive);
}
if (pc->flags & PC_FLAG_DMA_OK)
drive->dma = !ide_dma_prepare(drive, cmd);
if (!drive->dma)
pc->flags &= ~PC_FLAG_DMA_OK;
timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD
: WAIT_TAPE_CMD;
}
ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma);
(void)do_rw_taskfile(drive, cmd);
if (drq_int) {
if (drive->dma)
drive->waiting_for_dma = 0;
hwif->expiry = expiry;
}
ide_execute_command(drive, cmd, ide_transfer_pc, timeout);
return drq_int ? ide_started : ide_transfer_pc(drive);
}
EXPORT_SYMBOL_GPL(ide_issue_pc);