2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 01:04:08 +08:00

RISC-V Fixes for 6.3-rc3

* A pair of fixes to the ASID allocator to avoid leaking stale mappings
   between tasks.
 * A fix to the vmalloc fault handler to tolerate huge pages.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmQUf1cTHHBhbG1lckBk
 YWJiZWx0LmNvbQAKCRAuExnzX7sYiafvD/4ixaHUMYFBBsw0Vo2kXaILmBYNZOmz
 KoAHykqlg4TRZ0xtOK/iLcSsiDXVVbI91iBeKjrwOiJ2+Sk4gDm01JMhOK6eJh4I
 boQAoRNgUBJLiKp7ZlybJ3R8yXw4VkKK0lJKNd9zOko+76Z8cQitsiwliWQwnpJw
 jtKpzYZ8Plxki+0jUt7/21FUF0sy1UspgFTQdV6XfBGtIqVuVNgRLK4emjrKxl7s
 fpkvQfD9ZPCuCNqg42o9VULK8fQfQSi5jt9POrGVKg7EaEHb7NfxttWxu/VkMBoI
 cTa9zNSM4DYfmubOTqPoE4MxxmY294vii2JnoimQPDWlT9gGRD5Puf/rmm420cUE
 yhsl4HdurDBRw3608pIfXWl9pTBo/doFImrQfY/IuGlR6Jy632NFFdPXa0vA/RoM
 JBpAVJrUGRRo6w5B+GM5XVpxQNiBtMtGSVYNG2185Gtszlw6CebG31Da39kBPr2O
 G/QFTVaZJnlHVqEJwOm/7TuYM/8u+uT6eiuYiRBcHImOIleUJPGYnDfG+dav3nln
 E4DXBref4ikAZX794rEQnB6Ayt3Hl1E5lZ9HA+sezMNwv2zhT9rYAgF+oM8/A6FV
 3JxcBmkNj3lqKzwNK85YOHE7us/5u+PY7HPrUngC7iORvh2wSh+AVfiu7mXdhrWD
 e6NwgE4EoZOgqw==
 =A1sl
 -----END PGP SIGNATURE-----

Merge tag 'riscv-for-linus-6.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - fixes to the ASID allocator to avoid leaking stale mappings between
   tasks

 - fix the vmalloc fault handler to tolerate huge pages

* tag 'riscv-for-linus-6.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  RISC-V: mm: Support huge page in vmalloc_fault()
  riscv: asid: Fixup stale TLB entry cause application crash
  Revert "riscv: mm: notify remote harts about mmu cache updates"
This commit is contained in:
Linus Torvalds 2023-03-17 10:33:33 -07:00
commit cb80b960ce
5 changed files with 42 additions and 51 deletions

View File

@ -19,8 +19,6 @@ typedef struct {
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
/* A local tlb flush is needed before user execution can resume. */
cpumask_t tlb_stale_mask;
#endif
} mm_context_t;

View File

@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
{
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)

View File

@ -196,16 +196,6 @@ switch_mm_fast:
if (need_flush_tlb)
local_flush_tlb_all();
#ifdef CONFIG_SMP
else {
cpumask_t *mask = &mm->context.tlb_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
local_flush_tlb_all_asid(cntx & asid_mask);
}
}
#endif
}
static void set_mm_noasid(struct mm_struct *mm)
@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
local_flush_tlb_all();
}
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
if (static_branch_unlikely(&use_asid_allocator))
set_mm_asid(mm, cpu);
else
set_mm_noasid(mm);
/*
* The mm_cpumask indicates which harts' TLBs contain the virtual
* address mapping of the mm. Compared to noasid, using asid
* can't guarantee that stale TLB entries are invalidated because
* the asid mechanism wouldn't flush TLB for every switch_mm for
* performance. So when using asid, keep all CPUs footmarks in
* cpumask() until mm reset.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
if (static_branch_unlikely(&use_asid_allocator)) {
set_mm_asid(next, cpu);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
set_mm_noasid(next);
}
}
static int __init asids_init(void)
@ -274,7 +276,8 @@ static int __init asids_init(void)
}
early_initcall(asids_init);
#else
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
/* Nothing to do here when there is no MMU */
}
@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
set_mm(next, cpu);
set_mm(prev, next, cpu);
flush_icache_deferred(next, cpu);
}

View File

@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
no_context(regs, addr);
return;
}
if (pud_leaf(*pud_k))
goto flush_tlb;
/*
* Since the vmalloc area is global, it is unnecessary
@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
no_context(regs, addr);
return;
}
if (pmd_leaf(*pmd_k))
goto flush_tlb;
/*
* Make sure the actual PTE exists as well to
@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* ordering constraint, not a cache flush; it is
* necessary even after writing invalid entries.
*/
flush_tlb:
local_flush_tlb_page(addr);
}

View File

@ -5,7 +5,23 @@
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
void flush_tlb_all(void)
{
@ -15,7 +31,6 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id);
/*
* TLB will be immediately flushed on harts concurrently
* executing this MM context. TLB flush on other harts
* is deferred until this MM context migrates there.
*/
cpumask_setall(pmask);
cpumask_clear_cpu(cpuid, pmask);
cpumask_andnot(pmask, pmask, cmask);
if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else if (size <= stride) {