mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 20:53:53 +08:00
e2ae634014
We have a handful of new kernel features for 5.11: * Support for the contiguous memory allocator. * Support for IRQ Time Accounting * Support for stack tracing * Support for strict /dev/mem * Support for kernel section protection I'm being a bit conservative on the cutoff for this round due to the timing, so this is all the new development I'm going to take for this cycle (even if some of it probably normally would have been OK). There are, however, some fixes on the list that I will likely be sending along either later this week or early next week. There is one issue in here: one of my test configurations (PREEMPT{,_DEBUG}=y) fails to boot on QEMU 5.0.0 (from April) as of the .text.init alignment patch. With any luck we'll sort out the issue, but given how many bugs get fixed all over the place and how unrelated those features seem my guess is that we're just running into something that's been lurking for a while and has already been fixed in the newer QEMU (though I wouldn't be surprised if it's one of these implicit assumptions we have in the boot flow). If it was hardware I'd be strongly inclined to look more closely, but given that users can upgrade their simulators I'm less worried about it. There are two merge conflicts, both in build files. They're both a bit clunky: arch/riscv/Kconfig is out of order (I have a script that's supposed to keep them in order, I'll fix it) and lib/Makefile is out of order (though GENERIC_LIB here doesn't mean quite what it does above). -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAl/cHO4THHBhbG1lckBk YWJiZWx0LmNvbQAKCRAuExnzX7sYiTlmD/4uDyNHBM1XH/XD4fSEwTYJvGLqt/Jo vtrGR/fm0SlQFUKCcywSzxcVAeGn56CACbEIDYLuL4xXRJmbwEuaRrHVx2sEhS9p pNhy+wus/SgDz5EUAawMyR2AEWgzl77hY5T/+AAo4yv65SGGBfsIdz5noIVwGNqW r0g5cw2O99z0vwu1aSrK4isWHconG9MfQnnVyepPSh67pyWS4aUCr1K3vLiqD2dE XcgtwdcgzUIY5aEoJNrWo5qTrcaG8m6MRNCDAKJ6MKdDA2wdGIN868G0wQnoURRm Y+yW7w3P20kM0b87zH50jujTWg38NBKOfaXb0mAfawZMapL60veTVmvs2kNtFXCy F6JWRkgTiRnGY72FtRR0igWXT5M7fz0EiLFXLMItGcgj79TUget4l/3sRMN47S/O cA/WiwptJH3mh8IkL6z5ZxWEThdOrbFt8F1T+Gyq/ayblcPnJaLn/wrWoeOwviWR fvEC7smuF5SBTbWZK5tBOP21Nvhb7bfr49Sgr8Tvdjl15tz97qK+2tsLXwkBoQnJ wU45jcXfzr5wgiGBOQANRite5bLsJ0TuOrTgA5gsGpv+JSDGbpcJbm0833x00nX/ 3GsW5xr+vsLCvljgPAtKsyDNRlGQu908Gxrat2+s8u92bLr1bwn30uKL5h6i/n1w QgWATuPPGXZZdw== =GWIH -----END PGP SIGNATURE----- Merge tag 'riscv-for-linus-5.11-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux Pull RISC-V updates from Palmer Dabbelt: "We have a handful of new kernel features for 5.11: - Support for the contiguous memory allocator. - Support for IRQ Time Accounting - Support for stack tracing - Support for strict /dev/mem - Support for kernel section protection I'm being a bit conservative on the cutoff for this round due to the timing, so this is all the new development I'm going to take for this cycle (even if some of it probably normally would have been OK). There are, however, some fixes on the list that I will likely be sending along either later this week or early next week. There is one issue in here: one of my test configurations (PREEMPT{,_DEBUG}=y) fails to boot on QEMU 5.0.0 (from April) as of the .text.init alignment patch. With any luck we'll sort out the issue, but given how many bugs get fixed all over the place and how unrelated those features seem my guess is that we're just running into something that's been lurking for a while and has already been fixed in the newer QEMU (though I wouldn't be surprised if it's one of these implicit assumptions we have in the boot flow). If it was hardware I'd be strongly inclined to look more closely, but given that users can upgrade their simulators I'm less worried about it" * tag 'riscv-for-linus-5.11-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: arm64: Use the generic devmem_is_allowed() arm: Use the generic devmem_is_allowed() RISC-V: Use the new generic devmem_is_allowed() lib: Add a generic version of devmem_is_allowed() riscv: Fixed kernel test robot warning riscv: kernel: Drop unused clean rule riscv: provide memmove implementation RISC-V: Move dynamic relocation section under __init RISC-V: Protect all kernel sections including init early RISC-V: Align the .init.text section RISC-V: Initialize SBI early riscv: Enable ARCH_STACKWALK riscv: Make stack walk callback consistent with generic code riscv: Cleanup stacktrace riscv: Add HAVE_IRQ_TIME_ACCOUNTING riscv: Enable CMA support riscv: Ignore Image.* and loader.bin riscv: Clean up boot dir riscv: Fix compressed Image formats build RISC-V: Add kernel image sections to the resource tree
236 lines
5.1 KiB
C
236 lines
5.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2019 SiFive
|
|
*/
|
|
|
|
#include <linux/pagewalk.h>
|
|
#include <linux/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/bitops.h>
|
|
#include <asm/set_memory.h>
|
|
|
|
struct pageattr_masks {
|
|
pgprot_t set_mask;
|
|
pgprot_t clear_mask;
|
|
};
|
|
|
|
static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
|
|
{
|
|
struct pageattr_masks *masks = walk->private;
|
|
unsigned long new_val = val;
|
|
|
|
new_val &= ~(pgprot_val(masks->clear_mask));
|
|
new_val |= (pgprot_val(masks->set_mask));
|
|
|
|
return new_val;
|
|
}
|
|
|
|
static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
pgd_t val = READ_ONCE(*pgd);
|
|
|
|
if (pgd_leaf(val)) {
|
|
val = __pgd(set_pageattr_masks(pgd_val(val), walk));
|
|
set_pgd(pgd, val);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
p4d_t val = READ_ONCE(*p4d);
|
|
|
|
if (p4d_leaf(val)) {
|
|
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
|
|
set_p4d(p4d, val);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
pud_t val = READ_ONCE(*pud);
|
|
|
|
if (pud_leaf(val)) {
|
|
val = __pud(set_pageattr_masks(pud_val(val), walk));
|
|
set_pud(pud, val);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
pmd_t val = READ_ONCE(*pmd);
|
|
|
|
if (pmd_leaf(val)) {
|
|
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
|
|
set_pmd(pmd, val);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
|
|
unsigned long next, struct mm_walk *walk)
|
|
{
|
|
pte_t val = READ_ONCE(*pte);
|
|
|
|
val = __pte(set_pageattr_masks(pte_val(val), walk));
|
|
set_pte(pte, val);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int pageattr_pte_hole(unsigned long addr, unsigned long next,
|
|
int depth, struct mm_walk *walk)
|
|
{
|
|
/* Nothing to do here */
|
|
return 0;
|
|
}
|
|
|
|
static const struct mm_walk_ops pageattr_ops = {
|
|
.pgd_entry = pageattr_pgd_entry,
|
|
.p4d_entry = pageattr_p4d_entry,
|
|
.pud_entry = pageattr_pud_entry,
|
|
.pmd_entry = pageattr_pmd_entry,
|
|
.pte_entry = pageattr_pte_entry,
|
|
.pte_hole = pageattr_pte_hole,
|
|
};
|
|
|
|
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
|
|
pgprot_t clear_mask)
|
|
{
|
|
int ret;
|
|
unsigned long start = addr;
|
|
unsigned long end = start + PAGE_SIZE * numpages;
|
|
struct pageattr_masks masks = {
|
|
.set_mask = set_mask,
|
|
.clear_mask = clear_mask
|
|
};
|
|
|
|
if (!numpages)
|
|
return 0;
|
|
|
|
mmap_read_lock(&init_mm);
|
|
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
|
|
&masks);
|
|
mmap_read_unlock(&init_mm);
|
|
|
|
flush_tlb_kernel_range(start, end);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int set_memory_rw_nx(unsigned long addr, int numpages)
|
|
{
|
|
return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
|
|
__pgprot(_PAGE_EXEC));
|
|
}
|
|
|
|
int set_memory_ro(unsigned long addr, int numpages)
|
|
{
|
|
return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
|
|
__pgprot(_PAGE_WRITE));
|
|
}
|
|
|
|
int set_memory_rw(unsigned long addr, int numpages)
|
|
{
|
|
return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
|
|
__pgprot(0));
|
|
}
|
|
|
|
int set_memory_x(unsigned long addr, int numpages)
|
|
{
|
|
return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
|
|
}
|
|
|
|
int set_memory_nx(unsigned long addr, int numpages)
|
|
{
|
|
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
|
|
}
|
|
|
|
int set_direct_map_invalid_noflush(struct page *page)
|
|
{
|
|
int ret;
|
|
unsigned long start = (unsigned long)page_address(page);
|
|
unsigned long end = start + PAGE_SIZE;
|
|
struct pageattr_masks masks = {
|
|
.set_mask = __pgprot(0),
|
|
.clear_mask = __pgprot(_PAGE_PRESENT)
|
|
};
|
|
|
|
mmap_read_lock(&init_mm);
|
|
ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
|
|
mmap_read_unlock(&init_mm);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int set_direct_map_default_noflush(struct page *page)
|
|
{
|
|
int ret;
|
|
unsigned long start = (unsigned long)page_address(page);
|
|
unsigned long end = start + PAGE_SIZE;
|
|
struct pageattr_masks masks = {
|
|
.set_mask = PAGE_KERNEL,
|
|
.clear_mask = __pgprot(0)
|
|
};
|
|
|
|
mmap_read_lock(&init_mm);
|
|
ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
|
|
mmap_read_unlock(&init_mm);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
void __kernel_map_pages(struct page *page, int numpages, int enable)
|
|
{
|
|
if (!debug_pagealloc_enabled())
|
|
return;
|
|
|
|
if (enable)
|
|
__set_memory((unsigned long)page_address(page), numpages,
|
|
__pgprot(_PAGE_PRESENT), __pgprot(0));
|
|
else
|
|
__set_memory((unsigned long)page_address(page), numpages,
|
|
__pgprot(0), __pgprot(_PAGE_PRESENT));
|
|
}
|
|
#endif
|
|
|
|
bool kernel_page_present(struct page *page)
|
|
{
|
|
unsigned long addr = (unsigned long)page_address(page);
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
p4d_t *p4d;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
if (!pgd_present(*pgd))
|
|
return false;
|
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
if (!p4d_present(*p4d))
|
|
return false;
|
|
|
|
pud = pud_offset(p4d, addr);
|
|
if (!pud_present(*pud))
|
|
return false;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
if (!pmd_present(*pmd))
|
|
return false;
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
return pte_present(*pte);
|
|
}
|