2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

ALSA: memalloc: Use proper SG helpers for noncontig allocations

The recently introduced non-contiguous page allocation support helpers
are using the simplified code to calculate the page and DMA address
based on the vmalloc helpers, but this isn't quite right as the vmap
is valid only for the direct DMA.

This patch corrects those accessors to use the proper SG helpers
instead.

Fixes: a25684a956 ("ALSA: memalloc: Support for non-contiguous page allocation")
Tested-by: Alex Xu (Hello71) <alex_y_xu@yahoo.ca>
Link: https://lore.kernel.org/r/20211108151059.31898-1-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai 2021-11-08 16:10:59 +01:00
parent 43d35ccc36
commit ad4f93ca41

View File

@ -552,15 +552,73 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
}
}
static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
struct sg_page_iter *piter,
size_t offset)
{
struct sg_table *sgt = dmab->private_data;
__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
offset >> PAGE_SHIFT);
}
static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
size_t offset)
{
struct sg_dma_page_iter iter;
snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
__sg_page_iter_dma_next(&iter);
return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
}
static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
size_t offset)
{
struct sg_page_iter iter;
snd_dma_noncontig_iter_set(dmab, &iter, offset);
__sg_page_iter_next(&iter);
return sg_page_iter_page(&iter);
}
static unsigned int
snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size)
{
struct sg_dma_page_iter iter;
unsigned int start, end;
unsigned long addr;
start = ALIGN_DOWN(ofs, PAGE_SIZE);
end = ofs + size - 1; /* the last byte address */
snd_dma_noncontig_iter_set(dmab, &iter.base, start);
if (!__sg_page_iter_dma_next(&iter))
return 0;
/* check page continuity */
addr = sg_page_iter_dma_address(&iter);
for (;;) {
start += PAGE_SIZE;
if (start > end)
break;
addr += PAGE_SIZE;
if (!__sg_page_iter_dma_next(&iter) ||
sg_page_iter_dma_address(&iter) != addr)
return start - ofs;
}
/* ok, all on continuous pages */
return size;
}
static const struct snd_malloc_ops snd_dma_noncontig_ops = {
.alloc = snd_dma_noncontig_alloc,
.free = snd_dma_noncontig_free,
.mmap = snd_dma_noncontig_mmap,
.sync = snd_dma_noncontig_sync,
/* re-use vmalloc helpers for get_* ops */
.get_addr = snd_dma_vmalloc_get_addr,
.get_page = snd_dma_vmalloc_get_page,
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
.get_addr = snd_dma_noncontig_get_addr,
.get_page = snd_dma_noncontig_get_page,
.get_chunk_size = snd_dma_noncontig_get_chunk_size,
};
/*