diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 3255878d6f30..fa5641868d65 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -61,7 +61,7 @@ extern void invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(unsigned long start, unsigned long end); extern void __inval_dcache_area(unsigned long start, unsigned long end); extern void __clean_dcache_area_poc(unsigned long start, unsigned long end); -extern void __clean_dcache_area_pop(void *addr, size_t len); +extern void __clean_dcache_area_pop(unsigned long start, unsigned long end); extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern void sync_icache_aliases(void *kaddr, unsigned long len); diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c index c83bb5a4aad2..62ea989effe8 100644 --- a/arch/arm64/lib/uaccess_flushcache.c +++ b/arch/arm64/lib/uaccess_flushcache.c @@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt) * barrier to order the cache maintenance against the memcpy. */ memcpy(dst, src, cnt); - __clean_dcache_area_pop(dst, cnt); + __clean_dcache_area_pop((unsigned long)dst, (unsigned long)dst + cnt); } EXPORT_SYMBOL_GPL(memcpy_flushcache); @@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from, rc = raw_copy_from_user(to, from, n); /* See above */ - __clean_dcache_area_pop(to, n - rc); + __clean_dcache_area_pop((unsigned long)to, (unsigned long)to + n - rc); return rc; } diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index e2e2740c55ce..b71fcf56516b 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -194,16 +194,15 @@ SYM_FUNC_END_PI(__clean_dcache_area_poc) SYM_FUNC_END(__dma_clean_area) /* - * __clean_dcache_area_pop(kaddr, size) + * __clean_dcache_area_pop(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoP. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ SYM_FUNC_START_PI(__clean_dcache_area_pop) - add x1, x0, x1 alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index be650b573b2a..b2c226d93ca5 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -84,7 +84,7 @@ void arch_wb_cache_pmem(void *addr, size_t size) { /* Ensure order against any prior non-cacheable writes */ dmb(osh); - __clean_dcache_area_pop(addr, size); + __clean_dcache_area_pop((unsigned long)addr, (unsigned long)addr + size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);