ARM: common: edma: add suspend resume hook

This patch makes the edma driver resume correctly after suspend. Tested
on an AM33xx platform with cyclic audio streams and omap_hsmmc.

All information can be reconstructed by already known runtime
information.

As we now use some functions that were previously only used from __init
context, annotations had to be dropped.

[nm@ti.com: added error handling for runtime + suspend_late/early_resume]
Signed-off-by: Nishanth Menon <nm@ti.com>
Signed-off-by: Daniel Mack <zonque@gmail.com>
Tested-by: Joel Fernandes <joelf@ti.com>
Acked-by: Joel Fernandes <joelf@ti.com>
[nsekhar@ti.com: remove unneeded pm_runtime_get_sync() from resume]
Signed-off-by: Sekhar Nori <nsekhar@ti.com>
This commit is contained in:
Daniel Mack 2014-08-26 10:52:53 +02:00 committed by Sekhar Nori
parent f114040e3e
commit a2b1175131

View File

@ -244,6 +244,8 @@ struct edma {
/* list of channels with no even trigger; terminated by "-1" */ /* list of channels with no even trigger; terminated by "-1" */
const s8 *noevent; const s8 *noevent;
struct edma_soc_info *info;
/* The edma_inuse bit for each PaRAM slot is clear unless the /* The edma_inuse bit for each PaRAM slot is clear unless the
* channel is in use ... by ARM or DSP, for QDMA, or whatever. * channel is in use ... by ARM or DSP, for QDMA, or whatever.
*/ */
@ -295,7 +297,7 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
~(0x7 << bit), queue_no << bit); ~(0x7 << bit), queue_no << bit);
} }
static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, static void assign_priority_to_queue(unsigned ctlr, int queue_no,
int priority) int priority)
{ {
int bit = queue_no * 4; int bit = queue_no * 4;
@ -314,7 +316,7 @@ static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
* included in that particular EDMA variant (Eg : dm646x) * included in that particular EDMA variant (Eg : dm646x)
* *
*/ */
static void __init map_dmach_param(unsigned ctlr) static void map_dmach_param(unsigned ctlr)
{ {
int i; int i;
for (i = 0; i < EDMA_MAX_DMACH; i++) for (i = 0; i < EDMA_MAX_DMACH; i++)
@ -1792,15 +1794,61 @@ static int edma_probe(struct platform_device *pdev)
edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
edma_write_array(j, EDMA_QRAE, i, 0x0); edma_write_array(j, EDMA_QRAE, i, 0x0);
} }
edma_cc[j]->info = info[j];
arch_num_cc++; arch_num_cc++;
} }
return 0; return 0;
} }
static int edma_pm_resume(struct device *dev)
{
int i, j;
for (j = 0; j < arch_num_cc; j++) {
struct edma *cc = edma_cc[j];
s8 (*queue_priority_mapping)[2];
queue_priority_mapping = cc->info->queue_priority_mapping;
/* Event queue priority mapping */
for (i = 0; queue_priority_mapping[i][0] != -1; i++)
assign_priority_to_queue(j,
queue_priority_mapping[i][0],
queue_priority_mapping[i][1]);
/*
* Map the channel to param entry if channel mapping logic
* exist
*/
if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
map_dmach_param(j);
for (i = 0; i < cc->num_channels; i++) {
if (test_bit(i, cc->edma_inuse)) {
/* ensure access through shadow region 0 */
edma_or_array2(j, EDMA_DRAE, 0, i >> 5,
BIT(i & 0x1f));
setup_dma_interrupt(i,
cc->intr_data[i].callback,
cc->intr_data[i].data);
}
}
}
return 0;
}
static const struct dev_pm_ops edma_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
};
static struct platform_driver edma_driver = { static struct platform_driver edma_driver = {
.driver = { .driver = {
.name = "edma", .name = "edma",
.pm = &edma_pm_ops,
.of_match_table = edma_of_ids, .of_match_table = edma_of_ids,
}, },
.probe = edma_probe, .probe = edma_probe,