2018-09-05 14:25:10 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
|
2019-06-18 20:34:35 +08:00
|
|
|
/*
|
|
|
|
* One C-SKY MMU TLB entry contain two PFN/page entry, ie:
|
|
|
|
* 1VPN -> 2PFN
|
|
|
|
*/
|
|
|
|
#define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
|
|
|
|
#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
|
|
|
|
|
2018-09-05 14:25:10 +08:00
|
|
|
void flush_tlb_all(void)
|
|
|
|
{
|
|
|
|
tlb_invalid_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_mm(struct mm_struct *mm)
|
|
|
|
{
|
2019-06-18 20:34:35 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_TLBI
|
2020-12-24 13:59:57 +08:00
|
|
|
sync_is();
|
|
|
|
asm volatile(
|
|
|
|
"tlbi.asids %0 \n"
|
|
|
|
"sync.i \n"
|
|
|
|
:
|
|
|
|
: "r" (cpu_asid(mm))
|
|
|
|
: "memory");
|
2019-06-18 20:34:35 +08:00
|
|
|
#else
|
2018-09-05 14:25:10 +08:00
|
|
|
tlb_invalid_all();
|
2019-06-18 20:34:35 +08:00
|
|
|
#endif
|
2018-09-05 14:25:10 +08:00
|
|
|
}
|
|
|
|
|
2019-06-18 20:34:35 +08:00
|
|
|
/*
|
|
|
|
* MMU operation regs only could invalid tlb entry in jtlb and we
|
|
|
|
* need change asid field to invalid I-utlb & D-utlb.
|
|
|
|
*/
|
|
|
|
#ifndef CONFIG_CPU_HAS_TLBI
|
|
|
|
#define restore_asid_inv_utlb(oldpid, newpid) \
|
|
|
|
do { \
|
|
|
|
if (oldpid == newpid) \
|
|
|
|
write_mmu_entryhi(oldpid + 1); \
|
|
|
|
write_mmu_entryhi(oldpid); \
|
|
|
|
} while (0)
|
|
|
|
#endif
|
|
|
|
|
2018-09-05 14:25:10 +08:00
|
|
|
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
2019-06-18 17:20:10 +08:00
|
|
|
unsigned long end)
|
2018-09-05 14:25:10 +08:00
|
|
|
{
|
2019-06-18 20:34:35 +08:00
|
|
|
unsigned long newpid = cpu_asid(vma->vm_mm);
|
|
|
|
|
|
|
|
start &= TLB_ENTRY_SIZE_MASK;
|
|
|
|
end += TLB_ENTRY_SIZE - 1;
|
|
|
|
end &= TLB_ENTRY_SIZE_MASK;
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_HAS_TLBI
|
2020-12-24 13:59:57 +08:00
|
|
|
sync_is();
|
2019-06-18 20:34:35 +08:00
|
|
|
while (start < end) {
|
2020-12-24 13:59:57 +08:00
|
|
|
asm volatile(
|
|
|
|
"tlbi.vas %0 \n"
|
|
|
|
:
|
|
|
|
: "r" (start | newpid)
|
|
|
|
: "memory");
|
|
|
|
|
2019-06-18 20:34:35 +08:00
|
|
|
start += 2*PAGE_SIZE;
|
|
|
|
}
|
2020-12-24 13:59:57 +08:00
|
|
|
asm volatile("sync.i\n");
|
2019-06-18 20:34:35 +08:00
|
|
|
#else
|
|
|
|
{
|
|
|
|
unsigned long flags, oldpid;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
oldpid = read_mmu_entryhi() & ASID_MASK;
|
|
|
|
while (start < end) {
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
write_mmu_entryhi(start | newpid);
|
|
|
|
start += 2*PAGE_SIZE;
|
|
|
|
tlb_probe();
|
|
|
|
idx = read_mmu_index();
|
|
|
|
if (idx >= 0)
|
|
|
|
tlb_invalid_indexed();
|
|
|
|
}
|
|
|
|
restore_asid_inv_utlb(oldpid, newpid);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
2018-09-05 14:25:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
2019-06-18 20:34:35 +08:00
|
|
|
start &= TLB_ENTRY_SIZE_MASK;
|
|
|
|
end += TLB_ENTRY_SIZE - 1;
|
|
|
|
end &= TLB_ENTRY_SIZE_MASK;
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_HAS_TLBI
|
2020-12-24 13:59:57 +08:00
|
|
|
sync_is();
|
2019-06-18 20:34:35 +08:00
|
|
|
while (start < end) {
|
2020-12-24 13:59:57 +08:00
|
|
|
asm volatile(
|
|
|
|
"tlbi.vaas %0 \n"
|
|
|
|
:
|
|
|
|
: "r" (start)
|
|
|
|
: "memory");
|
|
|
|
|
2019-06-18 20:34:35 +08:00
|
|
|
start += 2*PAGE_SIZE;
|
|
|
|
}
|
2020-12-24 13:59:57 +08:00
|
|
|
asm volatile("sync.i\n");
|
2019-06-18 20:34:35 +08:00
|
|
|
#else
|
|
|
|
{
|
|
|
|
unsigned long flags, oldpid;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
oldpid = read_mmu_entryhi() & ASID_MASK;
|
|
|
|
while (start < end) {
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
write_mmu_entryhi(start | oldpid);
|
|
|
|
start += 2*PAGE_SIZE;
|
|
|
|
tlb_probe();
|
|
|
|
idx = read_mmu_index();
|
|
|
|
if (idx >= 0)
|
|
|
|
tlb_invalid_indexed();
|
|
|
|
}
|
|
|
|
restore_asid_inv_utlb(oldpid, oldpid);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
2018-09-05 14:25:10 +08:00
|
|
|
}
|
|
|
|
|
2019-06-18 17:20:10 +08:00
|
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
2018-09-05 14:25:10 +08:00
|
|
|
{
|
2019-06-18 20:34:35 +08:00
|
|
|
int newpid = cpu_asid(vma->vm_mm);
|
|
|
|
|
|
|
|
addr &= TLB_ENTRY_SIZE_MASK;
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_HAS_TLBI
|
|
|
|
sync_is();
|
2020-12-24 13:59:57 +08:00
|
|
|
asm volatile(
|
|
|
|
"tlbi.vas %0 \n"
|
|
|
|
"sync.i \n"
|
|
|
|
:
|
|
|
|
: "r" (addr | newpid)
|
|
|
|
: "memory");
|
2019-06-18 20:34:35 +08:00
|
|
|
#else
|
|
|
|
{
|
|
|
|
int oldpid, idx;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
oldpid = read_mmu_entryhi() & ASID_MASK;
|
|
|
|
write_mmu_entryhi(addr | newpid);
|
|
|
|
tlb_probe();
|
|
|
|
idx = read_mmu_index();
|
|
|
|
if (idx >= 0)
|
|
|
|
tlb_invalid_indexed();
|
|
|
|
|
|
|
|
restore_asid_inv_utlb(oldpid, newpid);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
2018-09-05 14:25:10 +08:00
|
|
|
}
|
|
|
|
|
2019-06-18 17:20:10 +08:00
|
|
|
void flush_tlb_one(unsigned long addr)
|
2018-09-05 14:25:10 +08:00
|
|
|
{
|
2019-06-18 20:34:35 +08:00
|
|
|
addr &= TLB_ENTRY_SIZE_MASK;
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_HAS_TLBI
|
|
|
|
sync_is();
|
2020-12-24 13:59:57 +08:00
|
|
|
asm volatile(
|
|
|
|
"tlbi.vaas %0 \n"
|
|
|
|
"sync.i \n"
|
|
|
|
:
|
|
|
|
: "r" (addr)
|
|
|
|
: "memory");
|
2019-06-18 20:34:35 +08:00
|
|
|
#else
|
|
|
|
{
|
|
|
|
int oldpid, idx;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
oldpid = read_mmu_entryhi() & ASID_MASK;
|
|
|
|
write_mmu_entryhi(addr | oldpid);
|
|
|
|
tlb_probe();
|
|
|
|
idx = read_mmu_index();
|
|
|
|
if (idx >= 0)
|
|
|
|
tlb_invalid_indexed();
|
|
|
|
|
|
|
|
restore_asid_inv_utlb(oldpid, oldpid);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
2018-09-05 14:25:10 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flush_tlb_one);
|