mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 20:48:49 +08:00
LoongArch fixes for v6.11-rc3
-----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmazQeUWHGNoZW5odWFj YWlAa2VybmVsLm9yZwAKCRAChivD8uImepOhD/46XKoog/bxelKRiAbSbHslijJF mF6VtME0zZ+f+nRU2j6q92gbE0RZsPokjNeWMhy5koj6Z7EkqMY1wa8FVYhO+OMm p1Is7tp76iTTLXffVrL2nbA30Ak5rVBlF//sWE7yxaDbj2Y9JS2fq2Aia2bZ0G63 HOYEyXjL26H79aOEZ49HuRPrHDIIcBkjxvQqdnLm1kdD3GdSJe+20C8UWAdRbLjB 5auu64aHO6Tzu3Ka1rQddC1zGA8ZddHUmk8KXX2wPft70VWBeLehXNdrc8YjbVv3 lMEEKHpdSv//BdEXnzRfU9UaeoR2lHIo187CMKsZECZNHNTSE8mG/2uSNEqOO0Fm mlhcTXMEcT52FqCEjmpHcRf1imI87P9celGqggMZsWwT1IAclxXNCFtLlWQqXAP0 HUdTjyn0I3e5iy02Kgf1RlfSWHaS68me5yDqdtdJByr5Gr0PI9yQ8Cc2r6EM3GtU SqpuYTMGr8e9zoAwej9JOUTlEiMeO8ZRbgJubPtR7MRO3KvoMZ8DawUTpAzjyF87 Z/7Z/t5c8DqfPY+HaJdf55VQyzfuMS++SE4XsKXMvoqRdywmC/J+5Z6raa6bWD8c cIzfPHdWg5I/dSYnWtFHNhz1F5qJWjjy2FCTH9Wl2PUtREMDDRzcX1jBYEc9cfmI klySV7kOzeQJdtqRRg== =aEeY -----END PGP SIGNATURE----- Merge tag 'loongarch-fixes-6.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson Pull LoongArch fixes from Huacai Chen: "Enable general EFI poweroff method to make poweroff usable on hardwares which lack ACPI S5, use accessors to page table entries instead of direct dereference to avoid potential problems, and two trivial kvm cleanups" * tag 'loongarch-fixes-6.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: KVM: Remove undefined a6 argument comment for kvm_hypercall() LoongArch: KVM: Remove unnecessary definition of KVM_PRIVATE_MEM_SLOTS LoongArch: Use accessors to page table entries instead of direct dereference LoongArch: Enable general EFI poweroff method
This commit is contained in:
commit
cf6d429eb6
@ -34,7 +34,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = *ptep;
|
||||
pte_t pte = ptep_get(ptep);
|
||||
|
||||
pte_val(clear) = (unsigned long)invalid_pte_table;
|
||||
set_pte_at(mm, addr, ptep, clear);
|
||||
@ -65,7 +65,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
pte_t *ptep, pte_t pte,
|
||||
int dirty)
|
||||
{
|
||||
int changed = !pte_same(*ptep, pte);
|
||||
int changed = !pte_same(ptep_get(ptep), pte);
|
||||
|
||||
if (changed) {
|
||||
set_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
|
@ -53,13 +53,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||
{
|
||||
pte_t *pte = virt_to_kpte(addr);
|
||||
|
||||
if (WARN_ON(!pte) || pte_none(*pte))
|
||||
if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
|
||||
return false;
|
||||
|
||||
if (protect)
|
||||
set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
|
||||
set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
|
||||
else
|
||||
set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
|
||||
set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));
|
||||
|
||||
preempt_disable();
|
||||
local_flush_tlb_one(addr);
|
||||
|
@ -26,8 +26,6 @@
|
||||
|
||||
#define KVM_MAX_VCPUS 256
|
||||
#define KVM_MAX_CPUCFG_REGS 21
|
||||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 0
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
|
||||
|
@ -39,9 +39,9 @@ struct kvm_steal_time {
|
||||
* Hypercall interface for KVM hypervisor
|
||||
*
|
||||
* a0: function identifier
|
||||
* a1-a6: args
|
||||
* a1-a5: args
|
||||
* Return value will be placed in a0.
|
||||
* Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
|
||||
* Up to 5 arguments are passed in a1, a2, a3, a4, a5.
|
||||
*/
|
||||
static __always_inline long kvm_hypercall0(u64 fid)
|
||||
{
|
||||
|
@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
#define KFENCE_AREA_START (VMEMMAP_END + 1)
|
||||
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
|
||||
|
||||
#define ptep_get(ptep) READ_ONCE(*(ptep))
|
||||
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d)
|
||||
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
|
||||
}
|
||||
|
||||
static inline void p4d_clear(p4d_t *p4dp)
|
||||
{
|
||||
p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
|
||||
}
|
||||
|
||||
static inline pud_t *p4d_pgtable(p4d_t p4d)
|
||||
{
|
||||
return (pud_t *)p4d_val(p4d);
|
||||
@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
|
||||
|
||||
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
|
||||
{
|
||||
*p4d = p4dval;
|
||||
WRITE_ONCE(*p4d, p4dval);
|
||||
}
|
||||
|
||||
static inline void p4d_clear(p4d_t *p4dp)
|
||||
{
|
||||
set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
|
||||
}
|
||||
|
||||
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
|
||||
@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud)
|
||||
return pud_val(pud) != (unsigned long)invalid_pmd_table;
|
||||
}
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
|
||||
}
|
||||
|
||||
static inline pmd_t *pud_pgtable(pud_t pud)
|
||||
{
|
||||
return (pmd_t *)pud_val(pud);
|
||||
}
|
||||
|
||||
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
|
||||
static inline void set_pud(pud_t *pud, pud_t pudval)
|
||||
{
|
||||
WRITE_ONCE(*pud, pudval);
|
||||
}
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
|
||||
}
|
||||
|
||||
#define pud_phys(pud) PHYSADDR(pud_val(pud))
|
||||
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
|
||||
@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd)
|
||||
return pmd_val(pmd) != (unsigned long)invalid_pte_table;
|
||||
}
|
||||
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
|
||||
{
|
||||
pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
|
||||
WRITE_ONCE(*pmd, pmdval);
|
||||
}
|
||||
|
||||
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
{
|
||||
set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
|
||||
}
|
||||
|
||||
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
|
||||
|
||||
@ -314,7 +323,8 @@ extern void paging_init(void);
|
||||
|
||||
static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
*ptep = pteval;
|
||||
WRITE_ONCE(*ptep, pteval);
|
||||
|
||||
if (pte_val(pteval) & _PAGE_GLOBAL) {
|
||||
pte_t *buddy = ptep_buddy(ptep);
|
||||
/*
|
||||
@ -341,8 +351,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
|
||||
: [global] "r" (page_global));
|
||||
#else /* !CONFIG_SMP */
|
||||
if (pte_none(*buddy))
|
||||
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
||||
if (pte_none(ptep_get(buddy)))
|
||||
WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
}
|
||||
@ -350,7 +360,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
/* Preserve global status for the pair */
|
||||
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
|
||||
if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL)
|
||||
set_pte(ptep, __pte(_PAGE_GLOBAL));
|
||||
else
|
||||
set_pte(ptep, __pte(0));
|
||||
@ -603,7 +613,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
pmd_t old = *pmdp;
|
||||
pmd_t old = pmdp_get(pmdp);
|
||||
|
||||
pmd_clear(pmdp);
|
||||
|
||||
|
@ -66,6 +66,12 @@ void __init efi_runtime_init(void)
|
||||
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
||||
}
|
||||
|
||||
bool efi_poweroff_required(void)
|
||||
{
|
||||
return efi_enabled(EFI_RUNTIME_SERVICES) &&
|
||||
(acpi_gbl_reduced_hardware || acpi_no_s5);
|
||||
}
|
||||
|
||||
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
|
||||
|
||||
#if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)
|
||||
|
@ -714,19 +714,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
|
||||
* value) and then p*d_offset() walks into the target huge page instead
|
||||
* of the old page table (sees the new value).
|
||||
*/
|
||||
pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
|
||||
pgd = pgdp_get(pgd_offset(kvm->mm, hva));
|
||||
if (pgd_none(pgd))
|
||||
goto out;
|
||||
|
||||
p4d = READ_ONCE(*p4d_offset(&pgd, hva));
|
||||
p4d = p4dp_get(p4d_offset(&pgd, hva));
|
||||
if (p4d_none(p4d) || !p4d_present(p4d))
|
||||
goto out;
|
||||
|
||||
pud = READ_ONCE(*pud_offset(&p4d, hva));
|
||||
pud = pudp_get(pud_offset(&p4d, hva));
|
||||
if (pud_none(pud) || !pud_present(pud))
|
||||
goto out;
|
||||
|
||||
pmd = READ_ONCE(*pmd_offset(&pud, hva));
|
||||
pmd = pmdp_get(pmd_offset(&pud, hva));
|
||||
if (pmd_none(pmd) || !pmd_present(pmd))
|
||||
goto out;
|
||||
|
||||
|
@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmd = NULL;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_present(*pgd)) {
|
||||
if (pgd_present(pgdp_get(pgd))) {
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_present(*p4d)) {
|
||||
if (p4d_present(p4dp_get(p4d))) {
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_present(*pud))
|
||||
if (pud_present(pudp_get(pud)))
|
||||
pmd = pmd_offset(pud, addr);
|
||||
}
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
|
||||
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
|
||||
unsigned long addr, unsigned long next)
|
||||
{
|
||||
int huge = pmd_val(*pmd) & _PAGE_HUGE;
|
||||
int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
|
||||
|
||||
if (huge)
|
||||
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
||||
@ -173,7 +173,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
if (p4d_none(*p4d)) {
|
||||
if (p4d_none(p4dp_get(p4d))) {
|
||||
pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!pud)
|
||||
panic("%s: Failed to allocate memory\n", __func__);
|
||||
@ -184,7 +184,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
|
||||
}
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud)) {
|
||||
if (pud_none(pudp_get(pud))) {
|
||||
pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!pmd)
|
||||
panic("%s: Failed to allocate memory\n", __func__);
|
||||
@ -195,7 +195,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_present(*pmd)) {
|
||||
if (!pmd_present(pmdp_get(pmd))) {
|
||||
pte_t *pte;
|
||||
|
||||
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
@ -216,7 +216,7 @@ void __init __set_fixmap(enum fixed_addresses idx,
|
||||
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
|
||||
|
||||
ptep = populate_kernel_pte(addr);
|
||||
if (!pte_none(*ptep)) {
|
||||
if (!pte_none(ptep_get(ptep))) {
|
||||
pte_ERROR(*ptep);
|
||||
return;
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
|
||||
|
||||
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
|
||||
{
|
||||
if (__pmd_none(early, READ_ONCE(*pmdp))) {
|
||||
if (__pmd_none(early, pmdp_get(pmdp))) {
|
||||
phys_addr_t pte_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
|
||||
if (!early)
|
||||
@ -118,7 +118,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
|
||||
|
||||
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
|
||||
{
|
||||
if (__pud_none(early, READ_ONCE(*pudp))) {
|
||||
if (__pud_none(early, pudp_get(pudp))) {
|
||||
phys_addr_t pmd_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
|
||||
if (!early)
|
||||
@ -131,7 +131,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
|
||||
|
||||
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
|
||||
{
|
||||
if (__p4d_none(early, READ_ONCE(*p4dp))) {
|
||||
if (__p4d_none(early, p4dp_get(p4dp))) {
|
||||
phys_addr_t pud_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
|
||||
if (!early)
|
||||
@ -154,7 +154,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
next = addr + PAGE_SIZE;
|
||||
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
|
||||
} while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
|
||||
} while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
|
||||
}
|
||||
|
||||
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
|
||||
@ -166,7 +166,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
kasan_pte_populate(pmdp, addr, next, node, early);
|
||||
} while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
|
||||
} while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
|
||||
}
|
||||
|
||||
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
|
||||
|
@ -128,7 +128,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
*pmdp = pmd;
|
||||
WRITE_ONCE(*pmdp, pmd);
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user