2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 15:13:55 +08:00
linux-next/arch/avr32/include/asm/cache.h
FUJITA Tomonori a6eb9fe105 dma-mapping: rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN
Now each architecture has the own dma_get_cache_alignment implementation.

dma_get_cache_alignment returns the minimum DMA alignment.  Architectures
define it as ARCH_KMALLOC_MINALIGN (it's used to make sure that malloc'ed
buffer is DMA-safe; the buffer doesn't share a cache with the others).  So
we can unify dma_get_cache_alignment implementations.

This patch:

dma_get_cache_alignment() needs to know if an architecture defines
ARCH_KMALLOC_MINALIGN or not (needs to know if architecture has DMA
alignment restriction).  However, slab.h define ARCH_KMALLOC_MINALIGN if
architectures doesn't define it.

Let's rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN.
ARCH_KMALLOC_MINALIGN is used only in the internals of slab/slob/slub
(except for crypto).

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-11 08:59:21 -07:00

39 lines
972 B
C

#ifndef __ASM_AVR32_CACHE_H
#define __ASM_AVR32_CACHE_H
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
* sure that all such allocations are cache aligned. Otherwise,
* unrelated code may cause parts of the buffer to be read into the
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifndef __ASSEMBLER__
struct cache_info {
unsigned int ways;
unsigned int sets;
unsigned int linesz;
};
#endif /* __ASSEMBLER */
/* Cache operation constants */
#define ICACHE_FLUSH 0x00
#define ICACHE_INVALIDATE 0x01
#define ICACHE_LOCK 0x02
#define ICACHE_UNLOCK 0x03
#define ICACHE_PREFETCH 0x04
#define DCACHE_FLUSH 0x08
#define DCACHE_LOCK 0x09
#define DCACHE_UNLOCK 0x0a
#define DCACHE_INVALIDATE 0x0b
#define DCACHE_CLEAN 0x0c
#define DCACHE_CLEAN_INVAL 0x0d
#endif /* __ASM_AVR32_CACHE_H */