2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 11:44:01 +08:00
linux-next/drivers/mmc/core/mmc_ops.c
Ulf Hansson b9ec26160f mmc: core: Fixup busy detection for mmc switch operations
If the host controller supports busy detection in HW, we expect the
MMC_CAP_WAIT_WHILE_BUSY to be set. Likewise the corresponding
host->max_busy_timeout should reflect the maximum busy detection
timeout supported by the host.

Previously we expected a host that supported MMC_CAP_WAIT_WHILE_BUSY to
cope with any timeout, which just isn't feasible due to HW limitations.

For most switch operations, R1B responses are expected and thus we need
to check for busy detection completion. To cope with cases where the
requested busy detection timeout is greater than what the host are able
to support, we fallback to use a R1 response instead. This will prevent
the host from doing HW busy detection.

In those cases, busy detection completion is handled by polling the for
the card's status using CMD13. This is the same mechanism used when the
host doesn't support MMC_CAP_WAIT_WHILE_BUSY.

Do note, a host->max_busy_timeout set to zero, is interpreted by the
mmc core as it don't know what the host supports. It will then provide
the host with whatever timeout the mmc core finds suitable.

For some cases the mmc core has unfurtunate no clue of what timeout to
use. In these cases we provide the host with a timeout value of zero,
which the host may interpret as use whatever timeout it finds suitable.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Chris Ball <chris@printf.net>
2014-02-23 10:40:38 -05:00

658 lines
14 KiB
C

/*
* linux/drivers/mmc/core/mmc_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include "core.h"
#include "mmc_ops.h"
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
bool ignore_crc)
{
int err;
struct mmc_command cmd = {0};
BUG_ON(!card);
BUG_ON(!card->host);
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
if (ignore_crc)
cmd.flags &= ~MMC_RSP_CRC;
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
/* NOTE: callers are required to understand the difference
* between "native" and SPI format status words!
*/
if (status)
*status = cmd.resp[0];
return 0;
}
int mmc_send_status(struct mmc_card *card, u32 *status)
{
return __mmc_send_status(card, status, false);
}
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
int err;
struct mmc_command cmd = {0};
BUG_ON(!host);
cmd.opcode = MMC_SELECT_CARD;
if (card) {
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
} else {
cmd.arg = 0;
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
}
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
return 0;
}
int mmc_select_card(struct mmc_card *card)
{
BUG_ON(!card);
return _mmc_select_card(card->host, card);
}
int mmc_deselect_cards(struct mmc_host *host)
{
return _mmc_select_card(host, NULL);
}
int mmc_go_idle(struct mmc_host *host)
{
int err;
struct mmc_command cmd = {0};
/*
* Non-SPI hosts need to prevent chipselect going active during
* GO_IDLE; that would put chips into SPI mode. Remind them of
* that in case of hardware that won't pull up DAT3/nCS otherwise.
*
* SPI hosts ignore ios.chip_select; it's managed according to
* rules that must accommodate non-MMC slaves which this layer
* won't even know about.
*/
if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_HIGH);
mmc_delay(1);
}
cmd.opcode = MMC_GO_IDLE_STATE;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
err = mmc_wait_for_cmd(host, &cmd, 0);
mmc_delay(1);
if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_DONTCARE);
mmc_delay(1);
}
host->use_spi_crc = 0;
return err;
}
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {0};
int i, err = 0;
BUG_ON(!host);
cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
for (i = 100; i; i--) {
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (mmc_host_is_spi(host)) {
if (!(cmd.resp[0] & R1_SPI_IDLE))
break;
} else {
if (cmd.resp[0] & MMC_CARD_BUSY)
break;
}
err = -ETIMEDOUT;
mmc_delay(10);
}
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
return err;
}
int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
{
int err;
struct mmc_command cmd = {0};
BUG_ON(!host);
BUG_ON(!cid);
cmd.opcode = MMC_ALL_SEND_CID;
cmd.arg = 0;
cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
memcpy(cid, cmd.resp, sizeof(u32) * 4);
return 0;
}
int mmc_set_relative_addr(struct mmc_card *card)
{
int err;
struct mmc_command cmd = {0};
BUG_ON(!card);
BUG_ON(!card->host);
cmd.opcode = MMC_SET_RELATIVE_ADDR;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
return 0;
}
static int
mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
{
int err;
struct mmc_command cmd = {0};
BUG_ON(!host);
BUG_ON(!cxd);
cmd.opcode = opcode;
cmd.arg = arg;
cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
memcpy(cxd, cmd.resp, sizeof(u32) * 4);
return 0;
}
/*
* NOTE: void *buf, caller for the buf is required to use DMA-capable
* buffer or on-stack buffer (with some overhead in callee).
*/
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
u32 opcode, void *buf, unsigned len)
{
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
void *data_buf;
int is_on_stack;
is_on_stack = object_is_on_stack(buf);
if (is_on_stack) {
/*
* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(len, GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
} else
data_buf = buf;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = opcode;
cmd.arg = 0;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = len;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, data_buf, len);
if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
/*
* The spec states that CSR and CID accesses have a timeout
* of 64 clock cycles.
*/
data.timeout_ns = 0;
data.timeout_clks = 64;
} else
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(host, &mrq);
if (is_on_stack) {
memcpy(buf, data_buf, len);
kfree(data_buf);
}
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
int mmc_send_csd(struct mmc_card *card, u32 *csd)
{
int ret, i;
u32 *csd_tmp;
if (!mmc_host_is_spi(card->host))
return mmc_send_cxd_native(card->host, card->rca << 16,
csd, MMC_SEND_CSD);
csd_tmp = kmalloc(16, GFP_KERNEL);
if (!csd_tmp)
return -ENOMEM;
ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
if (ret)
goto err;
for (i = 0;i < 4;i++)
csd[i] = be32_to_cpu(csd_tmp[i]);
err:
kfree(csd_tmp);
return ret;
}
int mmc_send_cid(struct mmc_host *host, u32 *cid)
{
int ret, i;
u32 *cid_tmp;
if (!mmc_host_is_spi(host)) {
if (!host->card)
return -EINVAL;
return mmc_send_cxd_native(host, host->card->rca << 16,
cid, MMC_SEND_CID);
}
cid_tmp = kmalloc(16, GFP_KERNEL);
if (!cid_tmp)
return -ENOMEM;
ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
if (ret)
goto err;
for (i = 0;i < 4;i++)
cid[i] = be32_to_cpu(cid_tmp[i]);
err:
kfree(cid_tmp);
return ret;
}
int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
ext_csd, 512);
}
EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
struct mmc_command cmd = {0};
int err;
cmd.opcode = MMC_SPI_READ_OCR;
cmd.arg = highcap ? (1 << 30) : 0;
cmd.flags = MMC_RSP_SPI_R3;
err = mmc_wait_for_cmd(host, &cmd, 0);
*ocrp = cmd.resp[1];
return err;
}
int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
{
struct mmc_command cmd = {0};
int err;
cmd.opcode = MMC_SPI_CRC_ON_OFF;
cmd.flags = MMC_RSP_SPI_R1;
cmd.arg = use_crc;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (!err)
host->use_spi_crc = use_crc;
return err;
}
/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
* @set: cmd set values
* @index: EXT_CSD register index
* @value: value to program into EXT_CSD register
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
* @use_busy_signal: use the busy signal as response type
* @send_status: send status cmd to poll for busy
* @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, bool use_busy_signal, bool send_status,
bool ignore_crc)
{
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {0};
unsigned long timeout;
u32 status = 0;
bool use_r1b_resp = use_busy_signal;
/*
* If the cmd timeout and the max_busy_timeout of the host are both
* specified, let's validate them. A failure means we need to prevent
* the host from doing hw busy detection, which is done by converting
* to a R1 response instead of a R1B.
*/
if (timeout_ms && host->max_busy_timeout &&
(timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
cmd.opcode = MMC_SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(index << 16) |
(value << 8) |
set;
cmd.flags = MMC_CMD_AC;
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
/*
* A busy_timeout of zero means the host can decide to use
* whatever value it finds suitable.
*/
cmd.busy_timeout = timeout_ms;
} else {
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
}
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
/* No need to check card status in case of unblocking command */
if (!use_busy_signal)
return 0;
/*
* CRC errors shall only be ignored in cases were CMD13 is used to poll
* to detect busy completion.
*/
if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
ignore_crc = false;
/* We have an unspecified cmd timeout, use the fallback value. */
if (!timeout_ms)
timeout_ms = MMC_OPS_TIMEOUT_MS;
/* Must check status to be sure of no errors. */
timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (send_status) {
err = __mmc_send_status(card, &status, ignore_crc);
if (err)
return err;
}
if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
break;
if (mmc_host_is_spi(host))
break;
/*
* We are not allowed to issue a status command and the host
* does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
* rely on waiting for the stated timeout to be sufficient.
*/
if (!send_status) {
mmc_delay(timeout_ms);
return 0;
}
/* Timeout if the device never leaves the program state. */
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(host), __func__);
return -ETIMEDOUT;
}
} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
if (mmc_host_is_spi(host)) {
if (status & R1_SPI_ILLEGAL_COMMAND)
return -EBADMSG;
} else {
if (status & 0xFDFFA000)
pr_warn("%s: unexpected status %#x after switch\n",
mmc_hostname(host), status);
if (status & R1_SWITCH_ERROR)
return -EBADMSG;
}
return 0;
}
EXPORT_SYMBOL_GPL(__mmc_switch);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
return __mmc_switch(card, set, index, value, timeout_ms, true, true,
false);
}
EXPORT_SYMBOL_GPL(mmc_switch);
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
{
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg;
u8 *data_buf;
u8 *test_buf;
int i, err;
static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(len, GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
if (len == 8)
test_buf = testdata_8bit;
else if (len == 4)
test_buf = testdata_4bit;
else {
pr_err("%s: Invalid bus_width %d\n",
mmc_hostname(host), len);
kfree(data_buf);
return -EINVAL;
}
if (opcode == MMC_BUS_TEST_W)
memcpy(data_buf, test_buf, len);
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = opcode;
cmd.arg = 0;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = len;
data.blocks = 1;
if (opcode == MMC_BUS_TEST_R)
data.flags = MMC_DATA_READ;
else
data.flags = MMC_DATA_WRITE;
data.sg = &sg;
data.sg_len = 1;
mmc_set_data_timeout(&data, card);
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
if (opcode == MMC_BUS_TEST_R) {
for (i = 0; i < len / 4; i++)
if ((test_buf[i] ^ data_buf[i]) != 0xff) {
err = -EIO;
break;
}
}
kfree(data_buf);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return err;
}
int mmc_bus_test(struct mmc_card *card, u8 bus_width)
{
int err, width;
if (bus_width == MMC_BUS_WIDTH_8)
width = 8;
else if (bus_width == MMC_BUS_WIDTH_4)
width = 4;
else if (bus_width == MMC_BUS_WIDTH_1)
return 0; /* no need for test */
else
return -EINVAL;
/*
* Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
* is a problem. This improves chances that the test will work.
*/
mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
return err;
}
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
{
struct mmc_command cmd = {0};
unsigned int opcode;
int err;
if (!card->ext_csd.hpi) {
pr_warning("%s: Card didn't support HPI command\n",
mmc_hostname(card->host));
return -EINVAL;
}
opcode = card->ext_csd.hpi_cmd;
if (opcode == MMC_STOP_TRANSMISSION)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
else if (opcode == MMC_SEND_STATUS)
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
cmd.opcode = opcode;
cmd.arg = card->rca << 16 | 1;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_warn("%s: error %d interrupting operation. "
"HPI command response %#x\n", mmc_hostname(card->host),
err, cmd.resp[0]);
return err;
}
if (status)
*status = cmd.resp[0];
return 0;
}