linux/arch/riscv/include/asm/cache.h
Jisheng Zhang dcb2743d1e
riscv: mm: still create swiotlb buffer for kmalloc() bouncing if required
After commit f51f7a0fc2 ("riscv: enable DMA_BOUNCE_UNALIGNED_KMALLOC
for !dma_coherent"), for non-coherent platforms with less than 4GB
memory, we rely on users to pass "swiotlb=mmnn,force" kernel parameters
to enable DMA bouncing for unaligned kmalloc() buffers. Now let's go
further: If no bouncing needed for ZONE_DMA, let kernel automatically
allocate 1MB swiotlb buffer per 1GB of RAM for kmalloc() bouncing on
non-coherent platforms, so that no need to pass "swiotlb=mmnn,force"
any more.

The math of "1MB swiotlb buffer per 1GB of RAM for kmalloc() bouncing"
is taken from arm64. Users can still force smaller swiotlb buffer by
passing "swiotlb=mmnn".

Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20240325110036.1564-1-jszhang@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-04-30 10:35:45 -07:00

41 lines
897 B
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com>
* Copyright (C) 2012 Regents of the University of California
*/
#ifndef _ASM_RISCV_CACHE_H
#define _ASM_RISCV_CACHE_H
#define L1_CACHE_SHIFT 6
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define ARCH_KMALLOC_MINALIGN (8)
#endif
/*
* RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
* the flat loader aligns it accordingly.
*/
#ifndef CONFIG_MMU
#define ARCH_SLAB_MINALIGN 16
#endif
#ifndef __ASSEMBLY__
extern int dma_cache_alignment;
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
#define dma_get_cache_alignment dma_get_cache_alignment
static inline int dma_get_cache_alignment(void)
{
return dma_cache_alignment;
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_CACHE_H */