mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-19 16:14:13 +08:00
[ALSA] Remove unneeded ugly hack for i386 in memalloc.c
The hack for dma_alloc_coherent() is no longer needed on 2.6.26 since the base code was improved. Signed-off-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Jaroslav Kysela <perex@perex.cz>
This commit is contained in:
parent
f3e9d5d1fd
commit
c17cf06bfc
@ -79,68 +79,6 @@ struct snd_mem_list {
|
||||
#define snd_assert(expr, args...) /**/
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Hacks
|
||||
*/
|
||||
|
||||
#if defined(__i386__)
|
||||
/*
|
||||
* A hack to allocate large buffers via dma_alloc_coherent()
|
||||
*
|
||||
* since dma_alloc_coherent always tries GFP_DMA when the requested
|
||||
* pci memory region is below 32bit, it happens quite often that even
|
||||
* 2 order of pages cannot be allocated.
|
||||
*
|
||||
* so in the following, we allocate at first without dma_mask, so that
|
||||
* allocation will be done without GFP_DMA. if the area doesn't match
|
||||
* with the requested region, then realloate with the original dma_mask
|
||||
* again.
|
||||
*
|
||||
* Really, we want to move this type of thing into dma_alloc_coherent()
|
||||
* so dma_mask doesn't have to be messed with.
|
||||
*/
|
||||
|
||||
static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle,
|
||||
gfp_t flags)
|
||||
{
|
||||
void *ret;
|
||||
u64 dma_mask, coherent_dma_mask;
|
||||
|
||||
if (dev == NULL || !dev->dma_mask)
|
||||
return dma_alloc_coherent(dev, size, dma_handle, flags);
|
||||
dma_mask = *dev->dma_mask;
|
||||
coherent_dma_mask = dev->coherent_dma_mask;
|
||||
*dev->dma_mask = 0xffffffff; /* do without masking */
|
||||
dev->coherent_dma_mask = 0xffffffff; /* do without masking */
|
||||
ret = dma_alloc_coherent(dev, size, dma_handle, flags);
|
||||
*dev->dma_mask = dma_mask; /* restore */
|
||||
dev->coherent_dma_mask = coherent_dma_mask; /* restore */
|
||||
if (ret) {
|
||||
/* obtained address is out of range? */
|
||||
if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) {
|
||||
/* reallocate with the proper mask */
|
||||
dma_free_coherent(dev, size, ret, *dma_handle);
|
||||
ret = dma_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
} else {
|
||||
/* wish to success now with the proper mask... */
|
||||
if (dma_mask != 0xffffffffUL) {
|
||||
/* allocation with GFP_ATOMIC to avoid the long stall */
|
||||
flags &= ~GFP_KERNEL;
|
||||
flags |= GFP_ATOMIC;
|
||||
ret = dma_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* redefine dma_alloc_coherent for some architectures */
|
||||
#undef dma_alloc_coherent
|
||||
#define dma_alloc_coherent snd_dma_hack_alloc_coherent
|
||||
|
||||
#endif /* arch */
|
||||
|
||||
/*
|
||||
*
|
||||
* Generic memory allocators
|
||||
|
Loading…
Reference in New Issue
Block a user