2009-01-16 09:45:32 +08:00
|
|
|
/*
|
|
|
|
* Driver for the Octeon bootbus compact flash.
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2012-04-27 02:10:28 +08:00
|
|
|
* Copyright (C) 2005 - 2012 Cavium Inc.
|
2009-01-16 09:45:32 +08:00
|
|
|
* Copyright (C) 2008 Wind River Systems
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/libata.h>
|
2012-04-27 02:10:28 +08:00
|
|
|
#include <linux/hrtimer.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2012-04-27 02:10:28 +08:00
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_platform.h>
|
2009-01-16 09:45:32 +08:00
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <scsi/scsi_host.h>
|
|
|
|
|
2012-02-04 01:36:57 +08:00
|
|
|
#include <asm/byteorder.h>
|
2009-01-16 09:45:32 +08:00
|
|
|
#include <asm/octeon/octeon.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Octeon bootbus compact flash interface is connected in at least
|
|
|
|
* 3 different configurations on various evaluation boards:
|
|
|
|
*
|
|
|
|
* -- 8 bits no irq, no DMA
|
|
|
|
* -- 16 bits no irq, no DMA
|
|
|
|
* -- 16 bits True IDE mode with DMA, but no irq.
|
|
|
|
*
|
|
|
|
* In the last case the DMA engine can generate an interrupt when the
|
|
|
|
* transfer is complete. For the first two cases only PIO is supported.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define DRV_NAME "pata_octeon_cf"
|
2012-04-27 02:10:28 +08:00
|
|
|
#define DRV_VERSION "2.2"
|
|
|
|
|
|
|
|
/* Poll interval in nS. */
|
|
|
|
#define OCTEON_CF_BUSY_POLL_INTERVAL 500000
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
#define DMA_CFG 0
|
|
|
|
#define DMA_TIM 0x20
|
|
|
|
#define DMA_INT 0x38
|
|
|
|
#define DMA_INT_EN 0x50
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
struct octeon_cf_port {
|
2012-04-27 02:10:28 +08:00
|
|
|
struct hrtimer delayed_finish;
|
2009-01-16 09:45:32 +08:00
|
|
|
struct ata_port *ap;
|
|
|
|
int dma_finished;
|
2012-04-27 02:10:28 +08:00
|
|
|
void *c0;
|
|
|
|
unsigned int cs0;
|
|
|
|
unsigned int cs1;
|
|
|
|
bool is_true_ide;
|
|
|
|
u64 dma_base;
|
2009-01-16 09:45:32 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct scsi_host_template octeon_cf_sht = {
|
|
|
|
ATA_PIO_SHT(DRV_NAME),
|
|
|
|
};
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
static int enable_dma;
|
|
|
|
module_param(enable_dma, int, 0444);
|
|
|
|
MODULE_PARM_DESC(enable_dma,
|
|
|
|
"Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
|
|
|
|
|
2009-01-16 09:45:32 +08:00
|
|
|
/**
|
|
|
|
* Convert nanosecond based time to setting used in the
|
|
|
|
* boot bus timing register, based on timing multiple
|
|
|
|
*/
|
|
|
|
static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
|
|
|
|
{
|
|
|
|
unsigned int val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute # of eclock periods to get desired duration in
|
|
|
|
* nanoseconds.
|
|
|
|
*/
|
2010-10-08 07:03:51 +08:00
|
|
|
val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
|
2009-01-16 09:45:32 +08:00
|
|
|
1000 * tim_mult);
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
|
2009-01-16 09:45:32 +08:00
|
|
|
{
|
|
|
|
union cvmx_mio_boot_reg_cfgx reg_cfg;
|
2012-04-27 02:10:28 +08:00
|
|
|
unsigned int tim_mult;
|
|
|
|
|
|
|
|
switch (multiplier) {
|
|
|
|
case 8:
|
|
|
|
tim_mult = 3;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
tim_mult = 0;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
tim_mult = 2;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
tim_mult = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-01-16 09:45:32 +08:00
|
|
|
reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
|
|
|
|
reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
|
2012-04-27 02:10:28 +08:00
|
|
|
reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */
|
2009-01-16 09:45:32 +08:00
|
|
|
reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
|
|
|
|
reg_cfg.s.sam = 0; /* Don't combine write and output enable */
|
|
|
|
reg_cfg.s.we_ext = 0; /* No write enable extension */
|
|
|
|
reg_cfg.s.oe_ext = 0; /* No read enable extension */
|
|
|
|
reg_cfg.s.en = 1; /* Enable this region */
|
|
|
|
reg_cfg.s.orbit = 0; /* Don't combine with previous region */
|
|
|
|
reg_cfg.s.ale = 0; /* Don't do address multiplexing */
|
|
|
|
cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Called after libata determines the needed PIO mode. This
|
|
|
|
* function programs the Octeon bootbus regions to support the
|
|
|
|
* timing requirements of the PIO mode.
|
|
|
|
*
|
|
|
|
* @ap: ATA port information
|
|
|
|
* @dev: ATA device
|
|
|
|
*/
|
|
|
|
static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
|
|
|
|
{
|
2012-04-27 02:10:28 +08:00
|
|
|
struct octeon_cf_port *cf_port = ap->private_data;
|
2009-01-16 09:45:32 +08:00
|
|
|
union cvmx_mio_boot_reg_timx reg_tim;
|
|
|
|
int T;
|
|
|
|
struct ata_timing timing;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
unsigned int div;
|
2009-01-16 09:45:32 +08:00
|
|
|
int use_iordy;
|
|
|
|
int trh;
|
|
|
|
int pause;
|
|
|
|
/* These names are timing parameters from the ATA spec */
|
|
|
|
int t2;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
/*
|
|
|
|
* A divisor value of four will overflow the timing fields at
|
|
|
|
* clock rates greater than 800MHz
|
|
|
|
*/
|
|
|
|
if (octeon_get_io_clock_rate() <= 800000000)
|
|
|
|
div = 4;
|
|
|
|
else
|
|
|
|
div = 8;
|
|
|
|
T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2016-09-22 03:59:48 +08:00
|
|
|
BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
t2 = timing.active;
|
|
|
|
if (t2)
|
|
|
|
t2--;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
trh = ns_to_tim_reg(div, 20);
|
2009-01-16 09:45:32 +08:00
|
|
|
if (trh)
|
|
|
|
trh--;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
pause = (int)timing.cycle - (int)timing.active -
|
|
|
|
(int)timing.setup - trh;
|
|
|
|
if (pause < 0)
|
|
|
|
pause = 0;
|
2009-01-16 09:45:32 +08:00
|
|
|
if (pause)
|
|
|
|
pause--;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
|
|
|
|
if (cf_port->is_true_ide)
|
2009-01-16 09:45:32 +08:00
|
|
|
/* True IDE mode, program both chip selects. */
|
2012-04-27 02:10:28 +08:00
|
|
|
octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
|
|
|
|
use_iordy = ata_pio_need_iordy(dev);
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
|
2009-01-16 09:45:32 +08:00
|
|
|
/* Disable page mode */
|
|
|
|
reg_tim.s.pagem = 0;
|
|
|
|
/* Enable dynamic timing */
|
|
|
|
reg_tim.s.waitm = use_iordy;
|
|
|
|
/* Pages are disabled */
|
|
|
|
reg_tim.s.pages = 0;
|
|
|
|
/* We don't use multiplexed address mode */
|
|
|
|
reg_tim.s.ale = 0;
|
|
|
|
/* Not used */
|
|
|
|
reg_tim.s.page = 0;
|
|
|
|
/* Time after IORDY to coninue to assert the data */
|
|
|
|
reg_tim.s.wait = 0;
|
|
|
|
/* Time to wait to complete the cycle. */
|
|
|
|
reg_tim.s.pause = pause;
|
|
|
|
/* How long to hold after a write to de-assert CE. */
|
|
|
|
reg_tim.s.wr_hld = trh;
|
|
|
|
/* How long to wait after a read to de-assert CE. */
|
|
|
|
reg_tim.s.rd_hld = trh;
|
|
|
|
/* How long write enable is asserted */
|
|
|
|
reg_tim.s.we = t2;
|
|
|
|
/* How long read enable is asserted */
|
|
|
|
reg_tim.s.oe = t2;
|
|
|
|
/* Time after CE that read/write starts */
|
2012-04-27 02:10:28 +08:00
|
|
|
reg_tim.s.ce = ns_to_tim_reg(div, 5);
|
2009-01-16 09:45:32 +08:00
|
|
|
/* Time before CE that address is valid */
|
|
|
|
reg_tim.s.adr = 0;
|
|
|
|
|
|
|
|
/* Program the bootbus region timing for the data port chip select. */
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
|
|
|
|
if (cf_port->is_true_ide)
|
2009-01-16 09:45:32 +08:00
|
|
|
/* True IDE mode, program both chip selects. */
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
|
|
|
|
reg_tim.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
|
|
|
|
{
|
2012-04-27 02:10:28 +08:00
|
|
|
struct octeon_cf_port *cf_port = ap->private_data;
|
|
|
|
union cvmx_mio_boot_pin_defs pin_defs;
|
2009-01-16 09:45:32 +08:00
|
|
|
union cvmx_mio_boot_dma_timx dma_tim;
|
|
|
|
unsigned int oe_a;
|
|
|
|
unsigned int oe_n;
|
|
|
|
unsigned int dma_ackh;
|
|
|
|
unsigned int dma_arq;
|
|
|
|
unsigned int pause;
|
|
|
|
unsigned int T0, Tkr, Td;
|
|
|
|
unsigned int tim_mult;
|
2012-04-27 02:10:28 +08:00
|
|
|
int c;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
const struct ata_timing *timing;
|
|
|
|
|
|
|
|
timing = ata_timing_find_mode(dev->dma_mode);
|
|
|
|
T0 = timing->cycle;
|
|
|
|
Td = timing->active;
|
|
|
|
Tkr = timing->recover;
|
|
|
|
dma_ackh = timing->dmack_hold;
|
|
|
|
|
|
|
|
dma_tim.u64 = 0;
|
|
|
|
/* dma_tim.s.tim_mult = 0 --> 4x */
|
|
|
|
tim_mult = 4;
|
|
|
|
|
|
|
|
/* not spec'ed, value in eclocks, not affected by tim_mult */
|
|
|
|
dma_arq = 8;
|
|
|
|
pause = 25 - dma_arq * 1000 /
|
2012-04-27 02:10:28 +08:00
|
|
|
(octeon_get_io_clock_rate() / 1000000); /* Tz */
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
oe_a = Td;
|
|
|
|
/* Tkr from cf spec, lengthened to meet T0 */
|
|
|
|
oe_n = max(T0 - oe_a, Tkr);
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
|
|
|
|
|
|
|
|
/* DMA channel number. */
|
|
|
|
c = (cf_port->dma_base & 8) >> 3;
|
|
|
|
|
|
|
|
/* Invert the polarity if the default is 0*/
|
|
|
|
dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
|
|
|
|
dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is tI, C.F. spec. says 0, but Sony CF card requires
|
|
|
|
* more, we use 20 nS.
|
|
|
|
*/
|
2009-06-29 00:26:17 +08:00
|
|
|
dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
|
2009-01-16 09:45:32 +08:00
|
|
|
dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
|
|
|
|
|
|
|
|
dma_tim.s.dmarq = dma_arq;
|
|
|
|
dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
|
|
|
|
|
|
|
|
dma_tim.s.rd_dly = 0; /* Sample right on edge */
|
|
|
|
|
|
|
|
/* writes only */
|
|
|
|
dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
|
|
|
|
dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
|
|
|
|
|
|
|
|
pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
|
|
|
|
ns_to_tim_reg(tim_mult, 60));
|
2012-04-27 02:10:28 +08:00
|
|
|
pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
|
2009-01-16 09:45:32 +08:00
|
|
|
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
|
|
|
|
dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handle an 8 bit I/O request.
|
|
|
|
*
|
2016-12-30 22:01:17 +08:00
|
|
|
* @qc: Queued command
|
2009-01-16 09:45:32 +08:00
|
|
|
* @buffer: Data buffer
|
|
|
|
* @buflen: Length of the buffer.
|
|
|
|
* @rw: True to write.
|
|
|
|
*/
|
2016-12-30 22:01:17 +08:00
|
|
|
static unsigned int octeon_cf_data_xfer8(struct ata_queued_cmd *qc,
|
2009-01-16 09:45:32 +08:00
|
|
|
unsigned char *buffer,
|
|
|
|
unsigned int buflen,
|
|
|
|
int rw)
|
|
|
|
{
|
2016-12-30 22:01:17 +08:00
|
|
|
struct ata_port *ap = qc->dev->link->ap;
|
2009-01-16 09:45:32 +08:00
|
|
|
void __iomem *data_addr = ap->ioaddr.data_addr;
|
|
|
|
unsigned long words;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
words = buflen;
|
|
|
|
if (rw) {
|
|
|
|
count = 16;
|
|
|
|
while (words--) {
|
|
|
|
iowrite8(*buffer, data_addr);
|
|
|
|
buffer++;
|
|
|
|
/*
|
|
|
|
* Every 16 writes do a read so the bootbus
|
|
|
|
* FIFO doesn't fill up.
|
|
|
|
*/
|
|
|
|
if (--count == 0) {
|
|
|
|
ioread8(ap->ioaddr.altstatus_addr);
|
|
|
|
count = 16;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ioread8_rep(data_addr, buffer, words);
|
|
|
|
}
|
|
|
|
return buflen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Handle a 16 bit I/O request.
|
|
|
|
*
|
2016-12-30 22:01:17 +08:00
|
|
|
* @qc: Queued command
|
2009-01-16 09:45:32 +08:00
|
|
|
* @buffer: Data buffer
|
|
|
|
* @buflen: Length of the buffer.
|
|
|
|
* @rw: True to write.
|
|
|
|
*/
|
2016-12-30 22:01:17 +08:00
|
|
|
static unsigned int octeon_cf_data_xfer16(struct ata_queued_cmd *qc,
|
2009-01-16 09:45:32 +08:00
|
|
|
unsigned char *buffer,
|
|
|
|
unsigned int buflen,
|
|
|
|
int rw)
|
|
|
|
{
|
2016-12-30 22:01:17 +08:00
|
|
|
struct ata_port *ap = qc->dev->link->ap;
|
2009-01-16 09:45:32 +08:00
|
|
|
void __iomem *data_addr = ap->ioaddr.data_addr;
|
|
|
|
unsigned long words;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
words = buflen / 2;
|
|
|
|
if (rw) {
|
|
|
|
count = 16;
|
|
|
|
while (words--) {
|
|
|
|
iowrite16(*(uint16_t *)buffer, data_addr);
|
|
|
|
buffer += sizeof(uint16_t);
|
|
|
|
/*
|
|
|
|
* Every 16 writes do a read so the bootbus
|
|
|
|
* FIFO doesn't fill up.
|
|
|
|
*/
|
|
|
|
if (--count == 0) {
|
|
|
|
ioread8(ap->ioaddr.altstatus_addr);
|
|
|
|
count = 16;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
while (words--) {
|
|
|
|
*(uint16_t *)buffer = ioread16(data_addr);
|
|
|
|
buffer += sizeof(uint16_t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Transfer trailing 1 byte, if any. */
|
|
|
|
if (unlikely(buflen & 0x01)) {
|
|
|
|
__le16 align_buf[1] = { 0 };
|
|
|
|
|
|
|
|
if (rw == READ) {
|
|
|
|
align_buf[0] = cpu_to_le16(ioread16(data_addr));
|
|
|
|
memcpy(buffer, align_buf, 1);
|
|
|
|
} else {
|
|
|
|
memcpy(align_buf, buffer, 1);
|
|
|
|
iowrite16(le16_to_cpu(align_buf[0]), data_addr);
|
|
|
|
}
|
|
|
|
words++;
|
|
|
|
}
|
|
|
|
return buflen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Read the taskfile for 16bit non-True IDE only.
|
|
|
|
*/
|
|
|
|
static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
|
|
|
|
{
|
|
|
|
u16 blob;
|
|
|
|
/* The base of the registers is at ioaddr.data_addr. */
|
|
|
|
void __iomem *base = ap->ioaddr.data_addr;
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 0xc);
|
|
|
|
tf->feature = blob >> 8;
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 2);
|
|
|
|
tf->nsect = blob & 0xff;
|
|
|
|
tf->lbal = blob >> 8;
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 4);
|
|
|
|
tf->lbam = blob & 0xff;
|
|
|
|
tf->lbah = blob >> 8;
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 6);
|
|
|
|
tf->device = blob & 0xff;
|
|
|
|
tf->command = blob >> 8;
|
|
|
|
|
|
|
|
if (tf->flags & ATA_TFLAG_LBA48) {
|
|
|
|
if (likely(ap->ioaddr.ctl_addr)) {
|
|
|
|
iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 0xc);
|
|
|
|
tf->hob_feature = blob >> 8;
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 2);
|
|
|
|
tf->hob_nsect = blob & 0xff;
|
|
|
|
tf->hob_lbal = blob >> 8;
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 4);
|
|
|
|
tf->hob_lbam = blob & 0xff;
|
|
|
|
tf->hob_lbah = blob >> 8;
|
|
|
|
|
|
|
|
iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
|
|
|
|
ap->last_ctl = tf->ctl;
|
|
|
|
} else {
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 octeon_cf_check_status16(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
u16 blob;
|
|
|
|
void __iomem *base = ap->ioaddr.data_addr;
|
|
|
|
|
|
|
|
blob = __raw_readw(base + 6);
|
|
|
|
return blob >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
|
|
|
|
unsigned long deadline)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = link->ap;
|
|
|
|
void __iomem *base = ap->ioaddr.data_addr;
|
|
|
|
int rc;
|
|
|
|
u8 err;
|
|
|
|
|
|
|
|
DPRINTK("about to softreset\n");
|
|
|
|
__raw_writew(ap->ctl, base + 0xe);
|
|
|
|
udelay(20);
|
|
|
|
__raw_writew(ap->ctl | ATA_SRST, base + 0xe);
|
|
|
|
udelay(20);
|
|
|
|
__raw_writew(ap->ctl, base + 0xe);
|
|
|
|
|
|
|
|
rc = ata_sff_wait_after_reset(link, 1, deadline);
|
|
|
|
if (rc) {
|
ata: Convert ata_<foo>_printk(KERN_<LEVEL> to ata_<foo>_<level>
Saves text by removing nearly duplicated text format strings by
creating ata_<foo>_printk functions and printf extension %pV.
ata defconfig size shrinks ~5% (~8KB), allyesconfig ~2.5% (~13KB)
Format string duplication comes from:
#define ata_link_printk(link, lv, fmt, args...) do { \
if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \
printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \
(link)->pmp , ##args); \
else \
printk("%sata%u: "fmt, lv, (link)->ap->print_id , ##args); \
} while(0)
Coalesce long formats.
$ size drivers/ata/built-in.*
text data bss dec hex filename
544969 73893 116584 735446 b38d6 drivers/ata/built-in.allyesconfig.ata.o
558429 73893 117864 750186 b726a drivers/ata/built-in.allyesconfig.dev_level.o
141328 14689 4220 160237 271ed drivers/ata/built-in.defconfig.ata.o
149567 14689 4220 168476 2921c drivers/ata/built-in.defconfig.dev_level.o
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
2011-04-16 06:51:59 +08:00
|
|
|
ata_link_err(link, "SRST failed (errno=%d)\n", rc);
|
2009-01-16 09:45:32 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* determine by signature whether we have ATA or ATAPI devices */
|
|
|
|
classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
|
|
|
|
DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Load the taskfile for 16bit non-True IDE only. The device_addr is
|
|
|
|
* not loaded, we do this as part of octeon_cf_exec_command16.
|
|
|
|
*/
|
|
|
|
static void octeon_cf_tf_load16(struct ata_port *ap,
|
|
|
|
const struct ata_taskfile *tf)
|
|
|
|
{
|
|
|
|
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
|
|
|
|
/* The base of the registers is at ioaddr.data_addr. */
|
|
|
|
void __iomem *base = ap->ioaddr.data_addr;
|
|
|
|
|
|
|
|
if (tf->ctl != ap->last_ctl) {
|
|
|
|
iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
|
|
|
|
ap->last_ctl = tf->ctl;
|
|
|
|
ata_wait_idle(ap);
|
|
|
|
}
|
|
|
|
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
|
|
|
__raw_writew(tf->hob_feature << 8, base + 0xc);
|
|
|
|
__raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
|
|
|
|
__raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
|
|
|
|
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
|
|
|
|
tf->hob_feature,
|
|
|
|
tf->hob_nsect,
|
|
|
|
tf->hob_lbal,
|
|
|
|
tf->hob_lbam,
|
|
|
|
tf->hob_lbah);
|
|
|
|
}
|
|
|
|
if (is_addr) {
|
|
|
|
__raw_writew(tf->feature << 8, base + 0xc);
|
|
|
|
__raw_writew(tf->nsect | tf->lbal << 8, base + 2);
|
|
|
|
__raw_writew(tf->lbam | tf->lbah << 8, base + 4);
|
|
|
|
VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
|
|
|
|
tf->feature,
|
|
|
|
tf->nsect,
|
|
|
|
tf->lbal,
|
|
|
|
tf->lbam,
|
|
|
|
tf->lbah);
|
|
|
|
}
|
|
|
|
ata_wait_idle(ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
|
|
|
|
{
|
|
|
|
/* There is only one device, do nothing. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issue ATA command to host controller. The device_addr is also sent
|
|
|
|
* as it must be written in a combined write with the command.
|
|
|
|
*/
|
|
|
|
static void octeon_cf_exec_command16(struct ata_port *ap,
|
|
|
|
const struct ata_taskfile *tf)
|
|
|
|
{
|
|
|
|
/* The base of the registers is at ioaddr.data_addr. */
|
|
|
|
void __iomem *base = ap->ioaddr.data_addr;
|
|
|
|
u16 blob;
|
|
|
|
|
|
|
|
if (tf->flags & ATA_TFLAG_DEVICE) {
|
|
|
|
VPRINTK("device 0x%X\n", tf->device);
|
|
|
|
blob = tf->device;
|
|
|
|
} else {
|
|
|
|
blob = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
|
|
|
|
blob |= (tf->command << 8);
|
|
|
|
__raw_writew(blob, base + 6);
|
|
|
|
|
|
|
|
|
|
|
|
ata_wait_idle(ap);
|
|
|
|
}
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
static void octeon_cf_ata_port_noaction(struct ata_port *ap)
|
2009-01-16 09:45:32 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
struct octeon_cf_port *cf_port;
|
|
|
|
|
2009-04-01 06:04:39 +08:00
|
|
|
cf_port = ap->private_data;
|
2009-01-16 09:45:32 +08:00
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
/* issue r/w command */
|
|
|
|
qc->cursg = qc->sg;
|
|
|
|
cf_port->dma_finished = 0;
|
|
|
|
ap->ops->sff_exec_command(ap, &qc->tf);
|
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Start a DMA transfer that was already setup
|
|
|
|
*
|
|
|
|
* @qc: Information about the DMA
|
|
|
|
*/
|
|
|
|
static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
|
|
|
|
{
|
2012-04-27 02:10:28 +08:00
|
|
|
struct octeon_cf_port *cf_port = qc->ap->private_data;
|
2009-01-16 09:45:32 +08:00
|
|
|
union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
|
|
|
|
union cvmx_mio_boot_dma_intx mio_boot_dma_int;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
|
|
|
|
VPRINTK("%d scatterlists\n", qc->n_elem);
|
|
|
|
|
|
|
|
/* Get the scatter list entry we need to DMA into */
|
|
|
|
sg = qc->cursg;
|
|
|
|
BUG_ON(!sg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the DMA complete status.
|
|
|
|
*/
|
|
|
|
mio_boot_dma_int.u64 = 0;
|
|
|
|
mio_boot_dma_int.s.done = 1;
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
/* Enable the interrupt. */
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
/* Set the direction of the DMA */
|
|
|
|
mio_boot_dma_cfg.u64 = 0;
|
2012-02-04 01:36:57 +08:00
|
|
|
#ifdef __LITTLE_ENDIAN
|
|
|
|
mio_boot_dma_cfg.s.endian = 1;
|
|
|
|
#endif
|
2009-01-16 09:45:32 +08:00
|
|
|
mio_boot_dma_cfg.s.en = 1;
|
|
|
|
mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't stop the DMA if the device deasserts DMARQ. Many
|
|
|
|
* compact flashes deassert DMARQ for a short time between
|
|
|
|
* sectors. Instead of stopping and restarting the DMA, we'll
|
|
|
|
* let the hardware do it. If the DMA is really stopped early
|
|
|
|
* due to an error condition, a later timeout will force us to
|
|
|
|
* stop.
|
|
|
|
*/
|
|
|
|
mio_boot_dma_cfg.s.clr = 0;
|
|
|
|
|
|
|
|
/* Size is specified in 16bit words and minus one notation */
|
|
|
|
mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
|
|
|
|
|
|
|
|
/* We need to swap the high and low bytes of every 16 bits */
|
|
|
|
mio_boot_dma_cfg.s.swap8 = 1;
|
|
|
|
|
|
|
|
mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
|
|
|
|
|
|
|
|
VPRINTK("%s %d bytes address=%p\n",
|
|
|
|
(mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
|
|
|
|
(void *)(unsigned long)mio_boot_dma_cfg.s.adr);
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* spin_lock_irqsave(host lock)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
|
|
|
|
struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_eh_info *ehi = &ap->link.eh_info;
|
2012-04-27 02:10:28 +08:00
|
|
|
struct octeon_cf_port *cf_port = ap->private_data;
|
2009-01-16 09:45:32 +08:00
|
|
|
union cvmx_mio_boot_dma_cfgx dma_cfg;
|
|
|
|
union cvmx_mio_boot_dma_intx dma_int;
|
|
|
|
u8 status;
|
|
|
|
|
|
|
|
VPRINTK("ata%u: protocol %d task_state %d\n",
|
|
|
|
ap->print_id, qc->tf.protocol, ap->hsm_task_state);
|
|
|
|
|
|
|
|
|
|
|
|
if (ap->hsm_task_state != HSM_ST_LAST)
|
|
|
|
return 0;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
|
2009-01-16 09:45:32 +08:00
|
|
|
if (dma_cfg.s.size != 0xfffff) {
|
|
|
|
/* Error, the transfer was not complete. */
|
|
|
|
qc->err_mask |= AC_ERR_HOST_BUS;
|
|
|
|
ap->hsm_task_state = HSM_ST_ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stop and clear the dma engine. */
|
|
|
|
dma_cfg.u64 = 0;
|
|
|
|
dma_cfg.s.size = -1;
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
/* Disable the interrupt. */
|
|
|
|
dma_int.u64 = 0;
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
/* Clear the DMA complete status */
|
|
|
|
dma_int.s.done = 1;
|
2012-04-27 02:10:28 +08:00
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
status = ap->ops->sff_check_status(ap);
|
|
|
|
|
|
|
|
ata_sff_hsm_move(ap, qc, status, 0);
|
|
|
|
|
|
|
|
if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
|
|
|
|
ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if any queued commands have more DMAs, if so start the next
|
|
|
|
* transfer, else do end of transfer handling.
|
|
|
|
*/
|
|
|
|
static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
|
|
|
|
{
|
|
|
|
struct ata_host *host = dev_instance;
|
|
|
|
struct octeon_cf_port *cf_port;
|
|
|
|
int i;
|
|
|
|
unsigned int handled = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
for (i = 0; i < host->n_ports; i++) {
|
|
|
|
u8 status;
|
|
|
|
struct ata_port *ap;
|
|
|
|
struct ata_queued_cmd *qc;
|
|
|
|
union cvmx_mio_boot_dma_intx dma_int;
|
|
|
|
union cvmx_mio_boot_dma_cfgx dma_cfg;
|
|
|
|
|
|
|
|
ap = host->ports[i];
|
2009-04-01 06:04:39 +08:00
|
|
|
cf_port = ap->private_data;
|
2012-04-27 02:10:28 +08:00
|
|
|
|
|
|
|
dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
|
|
|
|
dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (dma_int.s.done && !dma_cfg.s.en) {
|
|
|
|
if (!sg_is_last(qc->cursg)) {
|
|
|
|
qc->cursg = sg_next(qc->cursg);
|
2009-01-16 09:45:32 +08:00
|
|
|
handled = 1;
|
2012-04-27 02:10:28 +08:00
|
|
|
octeon_cf_dma_start(qc);
|
|
|
|
continue;
|
2009-01-16 09:45:32 +08:00
|
|
|
} else {
|
2012-04-27 02:10:28 +08:00
|
|
|
cf_port->dma_finished = 1;
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
}
|
2012-04-27 02:10:28 +08:00
|
|
|
if (!cf_port->dma_finished)
|
|
|
|
continue;
|
|
|
|
status = ioread8(ap->ioaddr.altstatus_addr);
|
|
|
|
if (status & (ATA_BUSY | ATA_DRQ)) {
|
|
|
|
/*
|
|
|
|
* We are busy, try to handle it later. This
|
|
|
|
* is the DMA finished interrupt, and it could
|
|
|
|
* take a little while for the card to be
|
|
|
|
* ready for more commands.
|
|
|
|
*/
|
|
|
|
/* Clear DMA irq. */
|
|
|
|
dma_int.u64 = 0;
|
|
|
|
dma_int.s.done = 1;
|
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_INT,
|
|
|
|
dma_int.u64);
|
|
|
|
hrtimer_start_range_ns(&cf_port->delayed_finish,
|
|
|
|
ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
|
|
|
|
OCTEON_CF_BUSY_POLL_INTERVAL / 5,
|
|
|
|
HRTIMER_MODE_REL);
|
|
|
|
handled = 1;
|
|
|
|
} else {
|
|
|
|
handled |= octeon_cf_dma_finished(ap, qc);
|
|
|
|
}
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
|
|
DPRINTK("EXIT\n");
|
|
|
|
return IRQ_RETVAL(handled);
|
|
|
|
}
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
|
2009-01-16 09:45:32 +08:00
|
|
|
{
|
2012-04-27 02:10:28 +08:00
|
|
|
struct octeon_cf_port *cf_port = container_of(hrt,
|
2009-01-16 09:45:32 +08:00
|
|
|
struct octeon_cf_port,
|
2012-04-27 02:10:28 +08:00
|
|
|
delayed_finish);
|
2009-01-16 09:45:32 +08:00
|
|
|
struct ata_port *ap = cf_port->ap;
|
|
|
|
struct ata_host *host = ap->host;
|
|
|
|
struct ata_queued_cmd *qc;
|
|
|
|
unsigned long flags;
|
|
|
|
u8 status;
|
2012-04-27 02:10:28 +08:00
|
|
|
enum hrtimer_restart rv = HRTIMER_NORESTART;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the port is not waiting for completion, it must have
|
|
|
|
* handled it previously. The hsm_task_state is
|
|
|
|
* protected by host->lock.
|
|
|
|
*/
|
|
|
|
if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
status = ioread8(ap->ioaddr.altstatus_addr);
|
|
|
|
if (status & (ATA_BUSY | ATA_DRQ)) {
|
|
|
|
/* Still busy, try again. */
|
2012-04-27 02:10:28 +08:00
|
|
|
hrtimer_forward_now(hrt,
|
|
|
|
ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
|
|
|
|
rv = HRTIMER_RESTART;
|
2009-01-16 09:45:32 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
2012-04-27 02:10:28 +08:00
|
|
|
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
|
2009-01-16 09:45:32 +08:00
|
|
|
octeon_cf_dma_finished(ap, qc);
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
2012-04-27 02:10:28 +08:00
|
|
|
return rv;
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_cf_dev_config(struct ata_device *dev)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* A maximum of 2^20 - 1 16 bit transfers are possible with
|
|
|
|
* the bootbus DMA. So we need to throttle max_sectors to
|
|
|
|
* (2^12 - 1 == 4095) to assure that this can never happen.
|
|
|
|
*/
|
|
|
|
dev->max_sectors = min(dev->max_sectors, 4095U);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't do ATAPI DMA so return 0.
|
|
|
|
*/
|
|
|
|
static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
|
|
|
|
switch (qc->tf.protocol) {
|
|
|
|
case ATA_PROT_DMA:
|
|
|
|
WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
|
|
|
|
|
|
|
|
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
|
|
|
|
octeon_cf_dma_setup(qc); /* set up dma */
|
|
|
|
octeon_cf_dma_start(qc); /* initiate dma */
|
|
|
|
ap->hsm_task_state = HSM_ST_LAST;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ATAPI_PROT_DMA:
|
|
|
|
dev_err(ap->dev, "Error, ATAPI not supported\n");
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
default:
|
|
|
|
return ata_sff_qc_issue(qc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ata_port_operations octeon_cf_ops = {
|
|
|
|
.inherits = &ata_sff_port_ops,
|
|
|
|
.check_atapi_dma = octeon_cf_check_atapi_dma,
|
|
|
|
.qc_prep = ata_noop_qc_prep,
|
|
|
|
.qc_issue = octeon_cf_qc_issue,
|
|
|
|
.sff_dev_select = octeon_cf_dev_select,
|
2012-04-27 02:10:28 +08:00
|
|
|
.sff_irq_on = octeon_cf_ata_port_noaction,
|
|
|
|
.sff_irq_clear = octeon_cf_ata_port_noaction,
|
2009-01-16 09:45:32 +08:00
|
|
|
.cable_detect = ata_cable_40wire,
|
|
|
|
.set_piomode = octeon_cf_set_piomode,
|
|
|
|
.set_dmamode = octeon_cf_set_dmamode,
|
|
|
|
.dev_config = octeon_cf_dev_config,
|
|
|
|
};
|
|
|
|
|
2012-12-22 05:19:58 +08:00
|
|
|
static int octeon_cf_probe(struct platform_device *pdev)
|
2009-01-16 09:45:32 +08:00
|
|
|
{
|
|
|
|
struct resource *res_cs0, *res_cs1;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
bool is_16bit;
|
|
|
|
const __be32 *cs_num;
|
|
|
|
struct property *reg_prop;
|
|
|
|
int n_addr, n_size, reg_len;
|
|
|
|
struct device_node *node;
|
2009-01-16 09:45:32 +08:00
|
|
|
void __iomem *cs0;
|
|
|
|
void __iomem *cs1 = NULL;
|
|
|
|
struct ata_host *host;
|
|
|
|
struct ata_port *ap;
|
|
|
|
int irq = 0;
|
|
|
|
irq_handler_t irq_handler = NULL;
|
|
|
|
void __iomem *base;
|
|
|
|
struct octeon_cf_port *cf_port;
|
2012-04-27 02:10:28 +08:00
|
|
|
int rv = -ENOMEM;
|
2017-08-28 03:55:09 +08:00
|
|
|
u32 bus_width;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
node = pdev->dev.of_node;
|
|
|
|
if (node == NULL)
|
2009-01-16 09:45:32 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2014-04-15 00:28:37 +08:00
|
|
|
cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL);
|
2012-04-27 02:10:28 +08:00
|
|
|
if (!cf_port)
|
|
|
|
return -ENOMEM;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2017-08-28 03:55:09 +08:00
|
|
|
cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2017-08-28 03:55:09 +08:00
|
|
|
if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
|
|
|
|
is_16bit = (bus_width == 16);
|
2012-04-27 02:10:28 +08:00
|
|
|
else
|
|
|
|
is_16bit = false;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
n_addr = of_n_addr_cells(node);
|
|
|
|
n_size = of_n_size_cells(node);
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
reg_prop = of_find_property(node, "reg", ®_len);
|
2014-04-15 00:28:37 +08:00
|
|
|
if (!reg_prop || reg_len < sizeof(__be32))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
cs_num = reg_prop->value;
|
|
|
|
cf_port->cs0 = be32_to_cpup(cs_num);
|
|
|
|
|
|
|
|
if (cf_port->is_true_ide) {
|
|
|
|
struct device_node *dma_node;
|
|
|
|
dma_node = of_parse_phandle(node,
|
|
|
|
"cavium,dma-engine-handle", 0);
|
|
|
|
if (dma_node) {
|
|
|
|
struct platform_device *dma_dev;
|
|
|
|
dma_dev = of_find_device_by_node(dma_node);
|
|
|
|
if (dma_dev) {
|
|
|
|
struct resource *res_dma;
|
|
|
|
int i;
|
|
|
|
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
|
|
|
|
if (!res_dma) {
|
|
|
|
of_node_put(dma_node);
|
2014-04-15 00:28:37 +08:00
|
|
|
return -EINVAL;
|
2012-04-27 02:10:28 +08:00
|
|
|
}
|
2020-01-06 16:43:50 +08:00
|
|
|
cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
|
2012-04-27 02:10:28 +08:00
|
|
|
resource_size(res_dma));
|
|
|
|
if (!cf_port->dma_base) {
|
|
|
|
of_node_put(dma_node);
|
2014-04-15 00:28:37 +08:00
|
|
|
return -EINVAL;
|
2012-04-27 02:10:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_handler = octeon_cf_interrupt;
|
|
|
|
i = platform_get_irq(dma_dev, 0);
|
|
|
|
if (i > 0)
|
|
|
|
irq = i;
|
|
|
|
}
|
|
|
|
of_node_put(dma_node);
|
|
|
|
}
|
|
|
|
res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
2014-04-15 00:28:37 +08:00
|
|
|
if (!res_cs1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-01-06 16:43:50 +08:00
|
|
|
cs1 = devm_ioremap(&pdev->dev, res_cs1->start,
|
2013-03-16 22:32:11 +08:00
|
|
|
resource_size(res_cs1));
|
2009-01-16 09:45:32 +08:00
|
|
|
if (!cs1)
|
2014-04-15 00:28:37 +08:00
|
|
|
return rv;
|
|
|
|
|
|
|
|
if (reg_len < (n_addr + n_size + 1) * sizeof(__be32))
|
|
|
|
return -EINVAL;
|
2012-04-27 02:10:28 +08:00
|
|
|
|
|
|
|
cs_num += n_addr + n_size;
|
|
|
|
cf_port->cs1 = be32_to_cpup(cs_num);
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
2014-04-15 00:28:37 +08:00
|
|
|
if (!res_cs0)
|
|
|
|
return -EINVAL;
|
2012-04-27 02:10:28 +08:00
|
|
|
|
2020-01-06 16:43:50 +08:00
|
|
|
cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
|
2012-04-27 02:10:28 +08:00
|
|
|
resource_size(res_cs0));
|
|
|
|
if (!cs0)
|
2014-04-15 00:28:37 +08:00
|
|
|
return rv;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
/* allocate host */
|
|
|
|
host = ata_host_alloc(&pdev->dev, 1);
|
|
|
|
if (!host)
|
2014-04-15 00:28:37 +08:00
|
|
|
return rv;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
ap = host->ports[0];
|
|
|
|
ap->private_data = cf_port;
|
2012-04-27 02:10:28 +08:00
|
|
|
pdev->dev.platform_data = cf_port;
|
2009-01-16 09:45:32 +08:00
|
|
|
cf_port->ap = ap;
|
|
|
|
ap->ops = &octeon_cf_ops;
|
2009-03-15 04:38:24 +08:00
|
|
|
ap->pio_mask = ATA_PIO6;
|
2011-02-05 03:05:48 +08:00
|
|
|
ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
if (!is_16bit) {
|
|
|
|
base = cs0 + 0x800;
|
2009-01-16 09:45:32 +08:00
|
|
|
ap->ioaddr.cmd_addr = base;
|
|
|
|
ata_sff_std_ports(&ap->ioaddr);
|
|
|
|
|
|
|
|
ap->ioaddr.altstatus_addr = base + 0xe;
|
|
|
|
ap->ioaddr.ctl_addr = base + 0xe;
|
|
|
|
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
|
2012-04-27 02:10:28 +08:00
|
|
|
} else if (cf_port->is_true_ide) {
|
|
|
|
base = cs0;
|
2009-01-16 09:45:32 +08:00
|
|
|
ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
|
|
|
|
ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
|
|
|
|
ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
|
|
|
|
ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
|
|
|
|
ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
|
|
|
|
ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
|
|
|
|
ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
|
|
|
|
ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
|
|
|
|
ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
|
|
|
|
ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
|
|
|
|
ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
|
|
|
|
ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
|
|
|
|
ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
|
|
|
|
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
/* True IDE mode needs a timer to poll for not-busy. */
|
|
|
|
hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL);
|
|
|
|
cf_port->delayed_finish.function = octeon_cf_delayed_finish;
|
2009-01-16 09:45:32 +08:00
|
|
|
} else {
|
|
|
|
/* 16 bit but not True IDE */
|
2012-04-27 02:10:28 +08:00
|
|
|
base = cs0 + 0x800;
|
2009-01-16 09:45:32 +08:00
|
|
|
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
|
|
|
|
octeon_cf_ops.softreset = octeon_cf_softreset16;
|
|
|
|
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
|
|
|
|
octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
|
|
|
|
octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
|
|
|
|
octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
|
|
|
|
|
|
|
|
ap->ioaddr.data_addr = base + ATA_REG_DATA;
|
|
|
|
ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
|
|
|
|
ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
|
|
|
|
ap->ioaddr.ctl_addr = base + 0xe;
|
|
|
|
ap->ioaddr.altstatus_addr = base + 0xe;
|
|
|
|
}
|
2012-04-27 02:10:28 +08:00
|
|
|
cf_port->c0 = ap->ioaddr.ctl_addr;
|
|
|
|
|
2013-06-27 20:34:52 +08:00
|
|
|
rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
2009-01-16 09:45:32 +08:00
|
|
|
|
|
|
|
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
|
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
|
|
|
|
is_16bit ? 16 : 8,
|
|
|
|
cf_port->is_true_ide ? ", True IDE" : "");
|
2009-01-16 09:45:32 +08:00
|
|
|
|
2012-04-27 02:10:28 +08:00
|
|
|
return ata_host_activate(host, irq, irq_handler,
|
|
|
|
IRQF_SHARED, &octeon_cf_sht);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void octeon_cf_shutdown(struct device *dev)
|
|
|
|
{
|
|
|
|
union cvmx_mio_boot_dma_cfgx dma_cfg;
|
|
|
|
union cvmx_mio_boot_dma_intx dma_int;
|
|
|
|
|
2013-07-30 16:16:05 +08:00
|
|
|
struct octeon_cf_port *cf_port = dev_get_platdata(dev);
|
2012-04-27 02:10:28 +08:00
|
|
|
|
|
|
|
if (cf_port->dma_base) {
|
|
|
|
/* Stop and clear the dma engine. */
|
|
|
|
dma_cfg.u64 = 0;
|
|
|
|
dma_cfg.s.size = -1;
|
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
|
|
|
|
|
|
|
|
/* Disable the interrupt. */
|
|
|
|
dma_int.u64 = 0;
|
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
|
|
|
|
|
|
|
|
/* Clear the DMA complete status */
|
|
|
|
dma_int.s.done = 1;
|
|
|
|
cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
|
|
|
|
|
|
|
|
__raw_writeb(0, cf_port->c0);
|
|
|
|
udelay(20);
|
|
|
|
__raw_writeb(ATA_SRST, cf_port->c0);
|
|
|
|
udelay(20);
|
|
|
|
__raw_writeb(0, cf_port->c0);
|
|
|
|
mdelay(100);
|
|
|
|
}
|
2009-01-16 09:45:32 +08:00
|
|
|
}
|
|
|
|
|
2017-06-16 19:54:41 +08:00
|
|
|
static const struct of_device_id octeon_cf_match[] = {
|
2012-04-27 02:10:28 +08:00
|
|
|
{
|
|
|
|
.compatible = "cavium,ebt3000-compact-flash",
|
|
|
|
},
|
|
|
|
{},
|
|
|
|
};
|
2015-06-08 16:32:43 +08:00
|
|
|
MODULE_DEVICE_TABLE(of, octeon_cf_match);
|
2012-04-27 02:10:28 +08:00
|
|
|
|
2009-01-16 09:45:32 +08:00
|
|
|
static struct platform_driver octeon_cf_driver = {
|
|
|
|
.probe = octeon_cf_probe,
|
|
|
|
.driver = {
|
|
|
|
.name = DRV_NAME,
|
2012-04-27 02:10:28 +08:00
|
|
|
.of_match_table = octeon_cf_match,
|
|
|
|
.shutdown = octeon_cf_shutdown
|
2009-01-16 09:45:32 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init octeon_cf_init(void)
|
|
|
|
{
|
|
|
|
return platform_driver_register(&octeon_cf_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
|
|
|
|
MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_VERSION(DRV_VERSION);
|
|
|
|
MODULE_ALIAS("platform:" DRV_NAME);
|
|
|
|
|
|
|
|
module_init(octeon_cf_init);
|