2007-08-23 01:14:51 +08:00
|
|
|
/*
|
|
|
|
* arch/xtensa/mm/cache.c
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001-2006 Tensilica Inc.
|
|
|
|
*
|
|
|
|
* Chris Zankel <chris@zankel.net>
|
|
|
|
* Joe Taylor
|
|
|
|
* Marc Gauthier
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/ptrace.h>
|
2018-10-31 06:09:49 +08:00
|
|
|
#include <linux/memblock.h>
|
2007-08-23 01:14:51 +08:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/pagemap.h>
|
2020-06-09 12:32:42 +08:00
|
|
|
#include <linux/pgtable.h>
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
#include <asm/bootparam.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note:
|
|
|
|
* The kernel provides one architecture bit PG_arch_1 in the page flags that
|
|
|
|
* can be used for cache coherency.
|
|
|
|
*
|
|
|
|
* I$-D$ coherency.
|
|
|
|
*
|
|
|
|
* The Xtensa architecture doesn't keep the instruction cache coherent with
|
|
|
|
* the data cache. We use the architecture bit to indicate if the caches
|
|
|
|
* are coherent. The kernel clears this bit whenever a page is added to the
|
|
|
|
* page cache. At that time, the caches might not be in sync. We, therefore,
|
|
|
|
* define this flag as 'clean' if set.
|
|
|
|
*
|
|
|
|
* D-cache aliasing.
|
|
|
|
*
|
|
|
|
* With cache aliasing, we have to always flush the cache when pages are
|
|
|
|
* unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
|
|
|
|
* page.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2014-07-21 22:54:11 +08:00
|
|
|
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
|
|
|
static inline void kmap_invalidate_coherent(struct page *page,
|
|
|
|
unsigned long vaddr)
|
|
|
|
{
|
|
|
|
if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
|
|
|
|
unsigned long kvaddr;
|
|
|
|
|
|
|
|
if (!PageHighMem(page)) {
|
|
|
|
kvaddr = (unsigned long)page_to_virt(page);
|
|
|
|
|
|
|
|
__invalidate_dcache_page(kvaddr);
|
|
|
|
} else {
|
|
|
|
kvaddr = TLBTEMP_BASE_1 +
|
|
|
|
(page_to_phys(page) & DCACHE_ALIAS_MASK);
|
|
|
|
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_disable();
|
2014-07-21 22:54:11 +08:00
|
|
|
__invalidate_dcache_page_alias(kvaddr,
|
|
|
|
page_to_phys(page));
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_enable();
|
2014-07-21 22:54:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *coherent_kvaddr(struct page *page, unsigned long base,
|
|
|
|
unsigned long vaddr, unsigned long *paddr)
|
|
|
|
{
|
2021-02-17 14:39:41 +08:00
|
|
|
*paddr = page_to_phys(page);
|
|
|
|
return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
|
2014-07-21 22:54:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void clear_user_highpage(struct page *page, unsigned long vaddr)
|
|
|
|
{
|
|
|
|
unsigned long paddr;
|
|
|
|
void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
|
|
|
|
|
2016-02-26 04:27:51 +08:00
|
|
|
preempt_disable();
|
2014-07-21 22:54:11 +08:00
|
|
|
kmap_invalidate_coherent(page, vaddr);
|
|
|
|
set_bit(PG_arch_1, &page->flags);
|
|
|
|
clear_page_alias(kvaddr, paddr);
|
2016-02-26 04:27:51 +08:00
|
|
|
preempt_enable();
|
2014-07-21 22:54:11 +08:00
|
|
|
}
|
2017-08-02 02:15:15 +08:00
|
|
|
EXPORT_SYMBOL(clear_user_highpage);
|
2014-07-21 22:54:11 +08:00
|
|
|
|
|
|
|
void copy_user_highpage(struct page *dst, struct page *src,
|
|
|
|
unsigned long vaddr, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
unsigned long dst_paddr, src_paddr;
|
|
|
|
void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
|
|
|
|
&dst_paddr);
|
|
|
|
void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
|
|
|
|
&src_paddr);
|
|
|
|
|
2016-02-26 04:27:51 +08:00
|
|
|
preempt_disable();
|
2014-07-21 22:54:11 +08:00
|
|
|
kmap_invalidate_coherent(dst, vaddr);
|
|
|
|
set_bit(PG_arch_1, &dst->flags);
|
|
|
|
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
|
2016-02-26 04:27:51 +08:00
|
|
|
preempt_enable();
|
2014-07-21 22:54:11 +08:00
|
|
|
}
|
2017-08-02 02:15:15 +08:00
|
|
|
EXPORT_SYMBOL(copy_user_highpage);
|
2014-07-21 22:54:11 +08:00
|
|
|
|
2007-08-23 01:14:51 +08:00
|
|
|
/*
|
|
|
|
* Any time the kernel writes to a user page cache page, or it is about to
|
|
|
|
* read from a page cache page this routine is called.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
void flush_dcache_page(struct page *page)
|
|
|
|
{
|
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9daa4fc ("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed. So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function. But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
CPU1 CPU2
__get_user_pages() swapoff()
flush_dcache_page()
mapping = page_mapping()
... exit_swap_address_space()
... kvfree(spaces)
mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used. The implementation of flush_dcache_page() in all architectures
follows this too. They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately. And they will use interval tree (mapping->i_mmap)
to find all user space mappings. While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise. All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-06 07:24:39 +08:00
|
|
|
struct address_space *mapping = page_mapping_file(page);
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a mapping but the page is not mapped to user-space
|
|
|
|
* yet, we simply mark this page dirty and defer flushing the
|
|
|
|
* caches until update_mmu().
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (mapping && !mapping_mapped(mapping)) {
|
|
|
|
if (!test_bit(PG_arch_1, &page->flags))
|
|
|
|
set_bit(PG_arch_1, &page->flags);
|
|
|
|
return;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
unsigned long phys = page_to_phys(page);
|
|
|
|
unsigned long temp = page->index << PAGE_SHIFT;
|
|
|
|
unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
|
|
|
|
unsigned long virt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush the page in kernel space and user space.
|
|
|
|
* Note that we can omit that step if aliasing is not
|
|
|
|
* an issue, but we do have to synchronize I$ and D$
|
|
|
|
* if we have a mapping.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!alias && !mapping)
|
|
|
|
return;
|
|
|
|
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_disable();
|
2014-07-21 08:24:40 +08:00
|
|
|
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
|
|
|
|
__flush_invalidate_dcache_page_alias(virt, phys);
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
|
|
|
|
|
|
|
|
if (alias)
|
|
|
|
__flush_invalidate_dcache_page_alias(virt, phys);
|
|
|
|
|
|
|
|
if (mapping)
|
|
|
|
__invalidate_icache_page_alias(virt, phys);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_enable();
|
2007-08-23 01:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* There shouldn't be an entry in the cache for this page anymore. */
|
|
|
|
}
|
2017-08-02 02:15:15 +08:00
|
|
|
EXPORT_SYMBOL(flush_dcache_page);
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For now, flush the whole cache. FIXME??
|
|
|
|
*/
|
|
|
|
|
2013-10-17 06:42:26 +08:00
|
|
|
void local_flush_cache_range(struct vm_area_struct *vma,
|
2007-08-23 01:14:51 +08:00
|
|
|
unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
__flush_invalidate_dcache_all();
|
|
|
|
__invalidate_icache_all();
|
|
|
|
}
|
2017-08-02 02:15:15 +08:00
|
|
|
EXPORT_SYMBOL(local_flush_cache_range);
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove any entry in the cache for this page.
|
|
|
|
*
|
|
|
|
* Note that this function is only called for user pages, so use the
|
|
|
|
* alias versions of the cache flush functions.
|
|
|
|
*/
|
|
|
|
|
2013-10-17 06:42:26 +08:00
|
|
|
void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
|
2012-11-29 08:53:51 +08:00
|
|
|
unsigned long pfn)
|
2007-08-23 01:14:51 +08:00
|
|
|
{
|
|
|
|
/* Note that we have to use the 'alias' address to avoid multi-hit */
|
|
|
|
|
|
|
|
unsigned long phys = page_to_phys(pfn_to_page(pfn));
|
|
|
|
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
|
|
|
|
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_disable();
|
2007-08-23 01:14:51 +08:00
|
|
|
__flush_invalidate_dcache_page_alias(virt, phys);
|
|
|
|
__invalidate_icache_page_alias(virt, phys);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_enable();
|
2007-08-23 01:14:51 +08:00
|
|
|
}
|
2017-08-02 02:15:15 +08:00
|
|
|
EXPORT_SYMBOL(local_flush_cache_page);
|
2007-08-23 01:14:51 +08:00
|
|
|
|
2017-07-29 08:42:59 +08:00
|
|
|
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
void
|
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-12-19 00:40:18 +08:00
|
|
|
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
2007-08-23 01:14:51 +08:00
|
|
|
{
|
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-12-19 00:40:18 +08:00
|
|
|
unsigned long pfn = pte_pfn(*ptep);
|
2007-08-23 01:14:51 +08:00
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
page = pfn_to_page(pfn);
|
|
|
|
|
|
|
|
/* Invalidate old entry in TLBs */
|
|
|
|
|
2013-10-17 06:42:26 +08:00
|
|
|
flush_tlb_page(vma, addr);
|
2007-08-23 01:14:51 +08:00
|
|
|
|
2017-07-29 08:42:59 +08:00
|
|
|
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
|
|
|
|
unsigned long phys = page_to_phys(page);
|
2014-07-21 08:24:40 +08:00
|
|
|
unsigned long tmp;
|
2007-08-23 01:14:51 +08:00
|
|
|
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_disable();
|
2014-07-21 08:24:40 +08:00
|
|
|
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
|
|
|
|
__flush_invalidate_dcache_page_alias(tmp, phys);
|
|
|
|
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
|
2012-11-29 08:53:51 +08:00
|
|
|
__flush_invalidate_dcache_page_alias(tmp, phys);
|
|
|
|
__invalidate_icache_page_alias(tmp, phys);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_enable();
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
clear_bit(PG_arch_1, &page->flags);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
|
|
|
|
&& (vma->vm_flags & VM_EXEC) != 0) {
|
2014-02-04 06:17:09 +08:00
|
|
|
unsigned long paddr = (unsigned long)kmap_atomic(page);
|
2007-09-06 16:38:18 +08:00
|
|
|
__flush_dcache_page(paddr);
|
|
|
|
__invalidate_icache_page(paddr);
|
2007-08-23 01:14:51 +08:00
|
|
|
set_bit(PG_arch_1, &page->flags);
|
2014-02-04 06:17:09 +08:00
|
|
|
kunmap_atomic((void *)paddr);
|
2007-08-23 01:14:51 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* access_process_vm() has called get_user_pages(), which has done a
|
|
|
|
* flush_dcache_page() on the page.
|
|
|
|
*/
|
|
|
|
|
2017-07-29 08:42:59 +08:00
|
|
|
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
2007-08-23 01:14:51 +08:00
|
|
|
|
2012-11-29 08:53:51 +08:00
|
|
|
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
2007-08-23 01:14:51 +08:00
|
|
|
unsigned long vaddr, void *dst, const void *src,
|
|
|
|
unsigned long len)
|
|
|
|
{
|
|
|
|
unsigned long phys = page_to_phys(page);
|
|
|
|
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
|
|
|
|
|
|
|
|
/* Flush and invalidate user page if aliased. */
|
|
|
|
|
|
|
|
if (alias) {
|
2012-11-29 08:53:51 +08:00
|
|
|
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_disable();
|
2012-11-29 08:53:51 +08:00
|
|
|
__flush_invalidate_dcache_page_alias(t, phys);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_enable();
|
2007-08-23 01:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy data */
|
|
|
|
|
|
|
|
memcpy(dst, src, len);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush and invalidate kernel page if aliased and synchronize
|
|
|
|
* data and instruction caches for executable pages.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (alias) {
|
2012-11-29 08:53:51 +08:00
|
|
|
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
|
2007-08-23 01:14:51 +08:00
|
|
|
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_disable();
|
2007-08-23 01:14:51 +08:00
|
|
|
__flush_invalidate_dcache_range((unsigned long) dst, len);
|
2012-11-29 08:53:51 +08:00
|
|
|
if ((vma->vm_flags & VM_EXEC) != 0)
|
|
|
|
__invalidate_icache_page_alias(t, phys);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_enable();
|
2007-08-23 01:14:51 +08:00
|
|
|
|
|
|
|
} else if ((vma->vm_flags & VM_EXEC) != 0) {
|
|
|
|
__flush_dcache_range((unsigned long)dst,len);
|
|
|
|
__invalidate_icache_range((unsigned long) dst, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
|
|
|
unsigned long vaddr, void *dst, const void *src,
|
|
|
|
unsigned long len)
|
|
|
|
{
|
|
|
|
unsigned long phys = page_to_phys(page);
|
|
|
|
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush user page if aliased.
|
|
|
|
* (Note: a simply flush would be sufficient)
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (alias) {
|
2012-11-29 08:53:51 +08:00
|
|
|
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_disable();
|
2012-11-29 08:53:51 +08:00
|
|
|
__flush_invalidate_dcache_page_alias(t, phys);
|
2020-11-16 17:38:59 +08:00
|
|
|
preempt_enable();
|
2007-08-23 01:14:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(dst, src, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|