2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 21:54:06 +08:00

ASoC: kirkwood: Use managed DMA buffer allocation

Instead of manually managing its DMA buffers using
dma_{alloc,free}_coherent() lets the sound core take care of this using
managed buffers.

On one hand this reduces the amount of boiler plate code, but the main
motivation for the change is to use the shared code where possible. This
makes it easier to argue about correctness and that the code does not
contain subtle bugs like data leakage or similar.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Link: https://lore.kernel.org/r/20210106133650.13509-3-lars@metafoo.de
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Lars-Peter Clausen 2021-01-06 14:36:50 +01:00 committed by Mark Brown
parent 5ac813c834
commit b3c0ae75f5
No known key found for this signature in database
GPG Key ID: 24D68B725D5487D0

View File

@ -182,25 +182,6 @@ static int kirkwood_dma_close(struct snd_soc_component *component,
return 0;
}
static int kirkwood_dma_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
runtime->dma_bytes = params_buffer_bytes(params);
return 0;
}
static int kirkwood_dma_hw_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
static int kirkwood_dma_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
@ -244,82 +225,28 @@ static snd_pcm_uframes_t kirkwood_dma_pointer(
return count;
}
static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = kirkwood_dma_snd_hw.buffer_bytes_max;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->area = dma_alloc_coherent(pcm->card->dev, size,
&buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->bytes = size;
buf->private_data = NULL;
return 0;
}
static int kirkwood_dma_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
size_t size = kirkwood_dma_snd_hw.buffer_bytes_max;
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = kirkwood_dma_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
return ret;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = kirkwood_dma_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
return ret;
}
snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
card->dev, size, size);
return 0;
}
static void kirkwood_dma_free_dma_buffers(struct snd_soc_component *component,
struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_coherent(pcm->card->dev, buf->bytes,
buf->area, buf->addr);
buf->area = NULL;
}
}
const struct snd_soc_component_driver kirkwood_soc_component = {
.name = DRV_NAME,
.open = kirkwood_dma_open,
.close = kirkwood_dma_close,
.hw_params = kirkwood_dma_hw_params,
.hw_free = kirkwood_dma_hw_free,
.prepare = kirkwood_dma_prepare,
.pointer = kirkwood_dma_pointer,
.pcm_construct = kirkwood_dma_new,
.pcm_destruct = kirkwood_dma_free_dma_buffers,
};