2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/mm/init.c
|
|
|
|
*
|
2005-10-28 21:48:37 +08:00
|
|
|
* Copyright (C) 1995-2005 Russell King
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mman.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-09 01:51:36 +08:00
|
|
|
#include <linux/sched/task.h>
|
2011-08-01 04:17:29 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/nodemask.h>
|
|
|
|
#include <linux/initrd.h>
|
2011-04-29 04:27:20 +08:00
|
|
|
#include <linux/of_fdt.h>
|
2008-09-18 03:21:55 +08:00
|
|
|
#include <linux/highmem.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/gfp.h>
|
2010-07-09 23:27:52 +08:00
|
|
|
#include <linux/memblock.h>
|
2011-12-29 20:09:51 +08:00
|
|
|
#include <linux/dma-contiguous.h>
|
2012-06-24 19:46:26 +08:00
|
|
|
#include <linux/sizes.h>
|
2015-12-01 02:36:28 +08:00
|
|
|
#include <linux/stop_machine.h>
|
2019-07-23 17:33:12 +08:00
|
|
|
#include <linux/swiotlb.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-04-14 01:57:29 +08:00
|
|
|
#include <asm/cp15.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/mach-types.h>
|
2012-01-13 23:00:51 +08:00
|
|
|
#include <asm/memblock.h>
|
2017-01-30 00:31:32 +08:00
|
|
|
#include <asm/memory.h>
|
2011-04-29 04:27:21 +08:00
|
|
|
#include <asm/prom.h>
|
2008-12-01 19:53:07 +08:00
|
|
|
#include <asm/sections.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/setup.h>
|
2019-10-11 20:51:43 +08:00
|
|
|
#include <asm/set_memory.h>
|
2014-04-04 08:28:11 +08:00
|
|
|
#include <asm/system_info.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/tlb.h>
|
2010-02-08 04:45:47 +08:00
|
|
|
#include <asm/fixmap.h>
|
2017-12-12 08:43:57 +08:00
|
|
|
#include <asm/ptdump.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/mach/arch.h>
|
|
|
|
#include <asm/mach/map.h>
|
|
|
|
|
2006-08-22 00:06:38 +08:00
|
|
|
#include "mm.h"
|
|
|
|
|
2014-04-14 01:57:29 +08:00
|
|
|
#ifdef CONFIG_CPU_CP15_MMU
|
|
|
|
unsigned long __init __clear_cr(unsigned long mask)
|
|
|
|
{
|
|
|
|
cr_alignment = cr_alignment & ~mask;
|
|
|
|
return cr_alignment;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-11-06 06:54:27 +08:00
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
2008-09-06 17:57:03 +08:00
|
|
|
static int __init parse_tag_initrd(const struct tag *tag)
|
|
|
|
{
|
2014-10-28 19:26:42 +08:00
|
|
|
pr_warn("ATAG_INITRD is deprecated; "
|
2008-09-06 17:57:03 +08:00
|
|
|
"please update your bootloader.\n");
|
|
|
|
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
|
|
|
|
phys_initrd_size = tag->u.initrd.size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__tagtable(ATAG_INITRD, parse_tag_initrd);
|
|
|
|
|
|
|
|
static int __init parse_tag_initrd2(const struct tag *tag)
|
|
|
|
{
|
|
|
|
phys_initrd_start = tag->u.initrd.start;
|
|
|
|
phys_initrd_size = tag->u.initrd.size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
|
2018-11-06 06:54:27 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-10-28 02:49:33 +08:00
|
|
|
static void __init find_limits(unsigned long *min, unsigned long *max_low,
|
2011-08-26 07:10:29 +08:00
|
|
|
unsigned long *max_high)
|
ARM: Fix broken highmem support
Currently, highmem is selectable, and you can request an increased
vmalloc area. However, none of this has any effect on the memory
layout since a patch in the highmem series was accidentally dropped.
Moreover, even if you did want highmem, all memory would still be
registered as lowmem, possibly resulting in overflow of the available
virtual mapping space.
The highmem boundary is determined by the highest allowed beginning
of the vmalloc area, which depends on its configurable minimum size
(see commit 60296c71f6c5063e3c1f1d2619ca0b60940162e7 for details on
this).
We should create mappings and initialize bootmem only for low memory,
while the zone allocator must still be told about highmem.
Currently, memory nodes which are completely located in high memory
are not supported. This is not a huge limitation since systems
relying on highmem support are unlikely to have discontiguous memory
with large holes.
[ A similar patch was meant to be merged before commit 5f0fbf9ecaf3
and be available in Linux v2.6.30, however some git rebase screw-up
of mine dropped the first commit of the series, and that goofage
escaped testing somehow as well. -- Nico ]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Nicolas Pitre <nico@marvell.com>
2009-08-15 19:36:00 +08:00
|
|
|
{
|
2014-04-14 05:54:58 +08:00
|
|
|
*max_low = PFN_DOWN(memblock_get_current_limit());
|
|
|
|
*min = PFN_UP(memblock_start_of_DRAM());
|
|
|
|
*max_high = PFN_DOWN(memblock_end_of_DRAM());
|
ARM: Fix broken highmem support
Currently, highmem is selectable, and you can request an increased
vmalloc area. However, none of this has any effect on the memory
layout since a patch in the highmem series was accidentally dropped.
Moreover, even if you did want highmem, all memory would still be
registered as lowmem, possibly resulting in overflow of the available
virtual mapping space.
The highmem boundary is determined by the highest allowed beginning
of the vmalloc area, which depends on its configurable minimum size
(see commit 60296c71f6c5063e3c1f1d2619ca0b60940162e7 for details on
this).
We should create mappings and initialize bootmem only for low memory,
while the zone allocator must still be told about highmem.
Currently, memory nodes which are completely located in high memory
are not supported. This is not a huge limitation since systems
relying on highmem support are unlikely to have discontiguous memory
with large holes.
[ A similar patch was meant to be merged before commit 5f0fbf9ecaf3
and be available in Linux v2.6.30, however some git rebase screw-up
of mine dropped the first commit of the series, and that goofage
escaped testing somehow as well. -- Nico ]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Nicolas Pitre <nico@marvell.com>
2009-08-15 19:36:00 +08:00
|
|
|
}
|
|
|
|
|
2011-05-11 22:39:00 +08:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
2011-07-19 03:05:10 +08:00
|
|
|
|
2013-08-02 04:29:29 +08:00
|
|
|
phys_addr_t arm_dma_zone_size __read_mostly;
|
2011-07-19 03:05:10 +08:00
|
|
|
EXPORT_SYMBOL(arm_dma_zone_size);
|
|
|
|
|
2011-07-09 04:26:59 +08:00
|
|
|
/*
|
|
|
|
* The DMA mask corresponding to the maximum bus address allocatable
|
|
|
|
* using GFP_DMA. The default here places no restriction on DMA
|
|
|
|
* allocations. This must be the smallest DMA mask in the system,
|
|
|
|
* so a successful GFP_DMA allocation will always satisfy this.
|
|
|
|
*/
|
2012-06-06 18:05:01 +08:00
|
|
|
phys_addr_t arm_dma_limit;
|
2013-07-09 19:14:49 +08:00
|
|
|
unsigned long arm_dma_pfn_limit;
|
2011-05-11 22:39:00 +08:00
|
|
|
#endif
|
|
|
|
|
2013-07-26 21:55:59 +08:00
|
|
|
void __init setup_dma_zone(const struct machine_desc *mdesc)
|
2011-12-29 20:09:51 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
|
if (mdesc->dma_zone_size) {
|
|
|
|
arm_dma_zone_size = mdesc->dma_zone_size;
|
2014-01-08 01:53:54 +08:00
|
|
|
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
|
2011-12-29 20:09:51 +08:00
|
|
|
} else
|
|
|
|
arm_dma_limit = 0xffffffff;
|
2013-07-09 19:14:49 +08:00
|
|
|
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
|
2011-12-29 20:09:51 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-06-30 12:28:46 +08:00
|
|
|
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
|
2010-10-28 02:17:31 +08:00
|
|
|
unsigned long max_high)
|
2008-10-01 23:58:32 +08:00
|
|
|
{
|
2020-06-04 06:57:19 +08:00
|
|
|
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
|
2008-10-01 23:58:32 +08:00
|
|
|
|
2020-06-04 06:57:19 +08:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
|
max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
|
ARM: Fix broken highmem support
Currently, highmem is selectable, and you can request an increased
vmalloc area. However, none of this has any effect on the memory
layout since a patch in the highmem series was accidentally dropped.
Moreover, even if you did want highmem, all memory would still be
registered as lowmem, possibly resulting in overflow of the available
virtual mapping space.
The highmem boundary is determined by the highest allowed beginning
of the vmalloc area, which depends on its configurable minimum size
(see commit 60296c71f6c5063e3c1f1d2619ca0b60940162e7 for details on
this).
We should create mappings and initialize bootmem only for low memory,
while the zone allocator must still be told about highmem.
Currently, memory nodes which are completely located in high memory
are not supported. This is not a huge limitation since systems
relying on highmem support are unlikely to have discontiguous memory
with large holes.
[ A similar patch was meant to be merged before commit 5f0fbf9ecaf3
and be available in Linux v2.6.30, however some git rebase screw-up
of mine dropped the first commit of the series, and that goofage
escaped testing somehow as well. -- Nico ]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Nicolas Pitre <nico@marvell.com>
2009-08-15 19:36:00 +08:00
|
|
|
#endif
|
2020-06-04 06:57:19 +08:00
|
|
|
max_zone_pfn[ZONE_NORMAL] = max_low;
|
ARM: Fix broken highmem support
Currently, highmem is selectable, and you can request an increased
vmalloc area. However, none of this has any effect on the memory
layout since a patch in the highmem series was accidentally dropped.
Moreover, even if you did want highmem, all memory would still be
registered as lowmem, possibly resulting in overflow of the available
virtual mapping space.
The highmem boundary is determined by the highest allowed beginning
of the vmalloc area, which depends on its configurable minimum size
(see commit 60296c71f6c5063e3c1f1d2619ca0b60940162e7 for details on
this).
We should create mappings and initialize bootmem only for low memory,
while the zone allocator must still be told about highmem.
Currently, memory nodes which are completely located in high memory
are not supported. This is not a huge limitation since systems
relying on highmem support are unlikely to have discontiguous memory
with large holes.
[ A similar patch was meant to be merged before commit 5f0fbf9ecaf3
and be available in Linux v2.6.30, however some git rebase screw-up
of mine dropped the first commit of the series, and that goofage
escaped testing somehow as well. -- Nico ]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Nicolas Pitre <nico@marvell.com>
2009-08-15 19:36:00 +08:00
|
|
|
#ifdef CONFIG_HIGHMEM
|
2020-06-04 06:57:19 +08:00
|
|
|
max_zone_pfn[ZONE_HIGHMEM] = max_high;
|
ARM: Fix broken highmem support
Currently, highmem is selectable, and you can request an increased
vmalloc area. However, none of this has any effect on the memory
layout since a patch in the highmem series was accidentally dropped.
Moreover, even if you did want highmem, all memory would still be
registered as lowmem, possibly resulting in overflow of the available
virtual mapping space.
The highmem boundary is determined by the highest allowed beginning
of the vmalloc area, which depends on its configurable minimum size
(see commit 60296c71f6c5063e3c1f1d2619ca0b60940162e7 for details on
this).
We should create mappings and initialize bootmem only for low memory,
while the zone allocator must still be told about highmem.
Currently, memory nodes which are completely located in high memory
are not supported. This is not a huge limitation since systems
relying on highmem support are unlikely to have discontiguous memory
with large holes.
[ A similar patch was meant to be merged before commit 5f0fbf9ecaf3
and be available in Linux v2.6.30, however some git rebase screw-up
of mine dropped the first commit of the series, and that goofage
escaped testing somehow as well. -- Nico ]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Nicolas Pitre <nico@marvell.com>
2009-08-15 19:36:00 +08:00
|
|
|
#endif
|
2020-06-04 06:57:19 +08:00
|
|
|
free_area_init(max_zone_pfn);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
ARM: 6913/1: sparsemem: allow pfn_valid to be overridden when using SPARSEMEM
In commit eb33575c ("[ARM] Double check memmap is actually valid with a
memmap has unexpected holes V2"), a new function, memmap_valid_within,
was introduced to mmzone.h so that holes in the memmap which pass
pfn_valid in SPARSEMEM configurations can be detected and avoided.
The fix to this problem checks that the pfn <-> page linkages are
correct by calculating the page for the pfn and then checking that
page_to_pfn on that page returns the original pfn. Unfortunately, in
SPARSEMEM configurations, this results in reading from the page flags to
determine the correct section. Since the memmap here has been freed,
junk is read from memory and the check is no longer robust.
In the best case, reading from /proc/pagetypeinfo will give you the
wrong answer. In the worst case, you get SEGVs, Kernel OOPses and hung
CPUs. Furthermore, ioremap implementations that use pfn_valid to
disallow the remapping of normal memory will break.
This patch allows architectures to provide their own pfn_valid function
instead of using the default implementation used by sparsemem. The
architecture-specific version is aware of the memmap state and will
return false when passed a pfn for a freed page within a valid section.
Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: H Hartley Sweeten <hsweeten@visionengravers.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-05-19 20:21:14 +08:00
|
|
|
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
|
2009-09-07 22:06:42 +08:00
|
|
|
int pfn_valid(unsigned long pfn)
|
|
|
|
{
|
2019-08-26 11:07:37 +08:00
|
|
|
phys_addr_t addr = __pfn_to_phys(pfn);
|
|
|
|
|
|
|
|
if (__phys_to_pfn(addr) != pfn)
|
|
|
|
return 0;
|
|
|
|
|
2019-09-22 21:25:51 +08:00
|
|
|
return memblock_is_map_memory(addr);
|
2009-09-07 22:06:42 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pfn_valid);
|
ARM: 6913/1: sparsemem: allow pfn_valid to be overridden when using SPARSEMEM
In commit eb33575c ("[ARM] Double check memmap is actually valid with a
memmap has unexpected holes V2"), a new function, memmap_valid_within,
was introduced to mmzone.h so that holes in the memmap which pass
pfn_valid in SPARSEMEM configurations can be detected and avoided.
The fix to this problem checks that the pfn <-> page linkages are
correct by calculating the page for the pfn and then checking that
page_to_pfn on that page returns the original pfn. Unfortunately, in
SPARSEMEM configurations, this results in reading from the page flags to
determine the correct section. Since the memmap here has been freed,
junk is read from memory and the check is no longer robust.
In the best case, reading from /proc/pagetypeinfo will give you the
wrong answer. In the worst case, you get SEGVs, Kernel OOPses and hung
CPUs. Furthermore, ioremap implementations that use pfn_valid to
disallow the remapping of normal memory will break.
This patch allows architectures to provide their own pfn_valid function
instead of using the default implementation used by sparsemem. The
architecture-specific version is aware of the memmap state and will
return false when passed a pfn for a freed page within a valid section.
Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: H Hartley Sweeten <hsweeten@visionengravers.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-05-19 20:21:14 +08:00
|
|
|
#endif
|
2009-10-30 01:06:17 +08:00
|
|
|
|
2012-01-13 23:00:51 +08:00
|
|
|
static bool arm_memblock_steal_permitted = true;
|
|
|
|
|
2012-01-19 22:35:19 +08:00
|
|
|
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
|
2012-01-13 23:00:51 +08:00
|
|
|
{
|
|
|
|
phys_addr_t phys;
|
|
|
|
|
|
|
|
BUG_ON(!arm_memblock_steal_permitted);
|
|
|
|
|
2019-03-12 14:29:06 +08:00
|
|
|
phys = memblock_phys_alloc(size, align);
|
2019-03-12 14:29:26 +08:00
|
|
|
if (!phys)
|
|
|
|
panic("Failed to steal %pa bytes at %pS\n",
|
|
|
|
&size, (void *)_RET_IP_);
|
|
|
|
|
2012-01-13 23:00:51 +08:00
|
|
|
memblock_free(phys, size);
|
|
|
|
memblock_remove(phys, size);
|
|
|
|
|
|
|
|
return phys;
|
|
|
|
}
|
|
|
|
|
2017-01-16 23:11:10 +08:00
|
|
|
static void __init arm_initrd_init(void)
|
2010-07-09 23:27:52 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
2017-01-16 23:21:05 +08:00
|
|
|
phys_addr_t start;
|
|
|
|
unsigned long size;
|
|
|
|
|
2014-01-14 06:25:18 +08:00
|
|
|
initrd_start = initrd_end = 0;
|
2017-01-16 23:13:25 +08:00
|
|
|
|
|
|
|
if (!phys_initrd_size)
|
|
|
|
return;
|
|
|
|
|
2017-01-16 23:21:05 +08:00
|
|
|
/*
|
|
|
|
* Round the memory region to page boundaries as per free_initrd_mem()
|
|
|
|
* This allows us to detect whether the pages overlapping the initrd
|
|
|
|
* are in use, but more importantly, reserves the entire set of pages
|
|
|
|
* as we don't want these pages allocated for other purposes.
|
|
|
|
*/
|
|
|
|
start = round_down(phys_initrd_start, PAGE_SIZE);
|
|
|
|
size = phys_initrd_size + (phys_initrd_start - start);
|
|
|
|
size = round_up(size, PAGE_SIZE);
|
|
|
|
|
|
|
|
if (!memblock_is_region_memory(start, size)) {
|
2012-06-23 02:26:04 +08:00
|
|
|
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
|
2017-01-16 23:21:05 +08:00
|
|
|
(u64)start, size);
|
2017-01-16 23:13:25 +08:00
|
|
|
return;
|
2011-06-11 07:43:21 +08:00
|
|
|
}
|
2017-01-16 23:13:25 +08:00
|
|
|
|
2017-01-16 23:21:05 +08:00
|
|
|
if (memblock_is_region_reserved(start, size)) {
|
2012-06-23 02:26:04 +08:00
|
|
|
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
|
2017-01-16 23:21:05 +08:00
|
|
|
(u64)start, size);
|
2017-01-16 23:13:25 +08:00
|
|
|
return;
|
2011-01-30 19:21:05 +08:00
|
|
|
}
|
2010-07-09 23:27:52 +08:00
|
|
|
|
2017-01-16 23:21:05 +08:00
|
|
|
memblock_reserve(start, size);
|
2017-01-16 23:13:25 +08:00
|
|
|
|
|
|
|
/* Now convert initrd to virtual addresses */
|
|
|
|
initrd_start = __phys_to_virt(phys_initrd_start);
|
|
|
|
initrd_end = initrd_start + phys_initrd_size;
|
2010-07-09 23:27:52 +08:00
|
|
|
#endif
|
2017-01-16 23:11:10 +08:00
|
|
|
}
|
|
|
|
|
2019-05-28 16:38:14 +08:00
|
|
|
#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
|
|
|
|
void check_cpu_icache_size(int cpuid)
|
|
|
|
{
|
|
|
|
u32 size, ctr;
|
|
|
|
|
|
|
|
asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
|
|
|
|
|
|
|
|
size = 1 << ((ctr & 0xf) + 2);
|
|
|
|
if (cpuid != 0 && icache_size != size)
|
|
|
|
pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
|
|
|
|
cpuid);
|
|
|
|
if (icache_size > size)
|
|
|
|
icache_size = size;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-16 23:11:10 +08:00
|
|
|
void __init arm_memblock_init(const struct machine_desc *mdesc)
|
|
|
|
{
|
|
|
|
/* Register the kernel text, kernel data and initrd with memblock. */
|
|
|
|
memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
|
|
|
|
|
|
|
|
arm_initrd_init();
|
2010-07-09 23:27:52 +08:00
|
|
|
|
|
|
|
arm_mm_memblock_reserve();
|
|
|
|
|
2010-05-23 02:47:18 +08:00
|
|
|
/* reserve any platform specific memblock areas */
|
|
|
|
if (mdesc->reserve)
|
|
|
|
mdesc->reserve();
|
|
|
|
|
2015-06-01 19:40:31 +08:00
|
|
|
early_init_fdt_reserve_self();
|
2014-02-28 21:42:54 +08:00
|
|
|
early_init_fdt_scan_reserved_mem();
|
|
|
|
|
2015-01-16 18:21:05 +08:00
|
|
|
/* reserve memory for DMA contiguous allocations */
|
2014-10-10 06:26:49 +08:00
|
|
|
dma_contiguous_reserve(arm_dma_limit);
|
2011-12-29 20:09:51 +08:00
|
|
|
|
2012-01-13 23:00:51 +08:00
|
|
|
arm_memblock_steal_permitted = false;
|
2010-07-09 23:27:52 +08:00
|
|
|
memblock_dump_all();
|
|
|
|
}
|
|
|
|
|
2010-05-23 02:47:18 +08:00
|
|
|
void __init bootmem_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-11-24 03:42:18 +08:00
|
|
|
memblock_allow_resize();
|
ARM: Fix broken highmem support
Currently, highmem is selectable, and you can request an increased
vmalloc area. However, none of this has any effect on the memory
layout since a patch in the highmem series was accidentally dropped.
Moreover, even if you did want highmem, all memory would still be
registered as lowmem, possibly resulting in overflow of the available
virtual mapping space.
The highmem boundary is determined by the highest allowed beginning
of the vmalloc area, which depends on its configurable minimum size
(see commit 60296c71f6c5063e3c1f1d2619ca0b60940162e7 for details on
this).
We should create mappings and initialize bootmem only for low memory,
while the zone allocator must still be told about highmem.
Currently, memory nodes which are completely located in high memory
are not supported. This is not a huge limitation since systems
relying on highmem support are unlikely to have discontiguous memory
with large holes.
[ A similar patch was meant to be merged before commit 5f0fbf9ecaf3
and be available in Linux v2.6.30, however some git rebase screw-up
of mine dropped the first commit of the series, and that goofage
escaped testing somehow as well. -- Nico ]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Nicolas Pitre <nico@marvell.com>
2009-08-15 19:36:00 +08:00
|
|
|
|
2019-01-23 04:05:10 +08:00
|
|
|
find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
|
ARM: Fix broken highmem support
Currently, highmem is selectable, and you can request an increased
vmalloc area. However, none of this has any effect on the memory
layout since a patch in the highmem series was accidentally dropped.
Moreover, even if you did want highmem, all memory would still be
registered as lowmem, possibly resulting in overflow of the available
virtual mapping space.
The highmem boundary is determined by the highest allowed beginning
of the vmalloc area, which depends on its configurable minimum size
(see commit 60296c71f6c5063e3c1f1d2619ca0b60940162e7 for details on
this).
We should create mappings and initialize bootmem only for low memory,
while the zone allocator must still be told about highmem.
Currently, memory nodes which are completely located in high memory
are not supported. This is not a huge limitation since systems
relying on highmem support are unlikely to have discontiguous memory
with large holes.
[ A similar patch was meant to be merged before commit 5f0fbf9ecaf3
and be available in Linux v2.6.30, however some git rebase screw-up
of mine dropped the first commit of the series, and that goofage
escaped testing somehow as well. -- Nico ]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Nicolas Pitre <nico@marvell.com>
2009-08-15 19:36:00 +08:00
|
|
|
|
2019-01-23 04:05:10 +08:00
|
|
|
early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
|
|
|
|
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
|
2015-04-15 06:48:37 +08:00
|
|
|
|
2010-05-08 00:40:33 +08:00
|
|
|
/*
|
|
|
|
* Sparsemem tries to allocate bootmem in memory_present(),
|
|
|
|
* so must be done after the fixed reservations
|
|
|
|
*/
|
2019-03-19 21:34:32 +08:00
|
|
|
memblocks_present();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-01 23:58:32 +08:00
|
|
|
/*
|
|
|
|
* sparse_init() needs the bootmem allocator up and running.
|
|
|
|
*/
|
|
|
|
sparse_init();
|
|
|
|
|
|
|
|
/*
|
2020-06-04 06:57:19 +08:00
|
|
|
* Now free the memory - free_area_init needs
|
2008-10-01 23:58:32 +08:00
|
|
|
* the sparse mem_map arrays initialized by sparse_init()
|
|
|
|
* for memmap_init_zone(), otherwise all PFNs are invalid.
|
|
|
|
*/
|
2019-01-23 04:05:10 +08:00
|
|
|
zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
|
2005-10-28 21:48:37 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-08 01:43:36 +08:00
|
|
|
/*
|
|
|
|
* Poison init memory with an undefined instruction (ARM) or a branch to an
|
|
|
|
* undefined instruction (Thumb).
|
|
|
|
*/
|
|
|
|
static inline void poison_init_mem(void *s, size_t count)
|
|
|
|
{
|
|
|
|
u32 *p = (u32 *)s;
|
2011-08-04 16:39:31 +08:00
|
|
|
for (; count != 0; count -= 4)
|
2011-07-08 01:43:36 +08:00
|
|
|
*p++ = 0xe7fddef0;
|
|
|
|
}
|
|
|
|
|
2019-12-18 08:18:49 +08:00
|
|
|
static inline void __init
|
2010-05-08 00:40:33 +08:00
|
|
|
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
|
2005-06-27 21:16:47 +08:00
|
|
|
{
|
|
|
|
struct page *start_pg, *end_pg;
|
2012-06-21 20:09:05 +08:00
|
|
|
phys_addr_t pg, pgend;
|
2005-06-27 21:16:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert start_pfn/end_pfn to a struct page pointer.
|
|
|
|
*/
|
2009-10-07 00:57:22 +08:00
|
|
|
start_pg = pfn_to_page(start_pfn - 1) + 1;
|
2011-04-29 01:44:31 +08:00
|
|
|
end_pg = pfn_to_page(end_pfn - 1) + 1;
|
2005-06-27 21:16:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert to physical addresses, and
|
|
|
|
* round start upwards and end downwards.
|
|
|
|
*/
|
2012-06-21 20:09:05 +08:00
|
|
|
pg = PAGE_ALIGN(__pa(start_pg));
|
|
|
|
pgend = __pa(end_pg) & PAGE_MASK;
|
2005-06-27 21:16:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are free pages between these,
|
|
|
|
* free the section of the memmap array.
|
|
|
|
*/
|
|
|
|
if (pg < pgend)
|
2014-01-22 07:50:49 +08:00
|
|
|
memblock_free_early(pg, pgend - pg);
|
2005-06-27 21:16:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The mem_map array can get very big. Free the unused area of the memory map.
|
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
static void __init free_unused_memmap(void)
|
2005-06-27 21:16:47 +08:00
|
|
|
{
|
2014-04-14 05:54:58 +08:00
|
|
|
unsigned long start, prev_end = 0;
|
|
|
|
struct memblock_region *reg;
|
2005-06-27 21:16:47 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-15 04:06:56 +08:00
|
|
|
* This relies on each bank being in address order.
|
|
|
|
* The banks are sorted previously in bootmem_init().
|
2005-06-27 21:16:47 +08:00
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
for_each_memblock(memory, reg) {
|
|
|
|
start = memblock_region_memory_base_pfn(reg);
|
2005-06-27 21:16:47 +08:00
|
|
|
|
2011-04-29 01:44:31 +08:00
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
|
|
/*
|
|
|
|
* Take care not to free memmap entries that don't exist
|
|
|
|
* due to SPARSEMEM sections which aren't present.
|
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
start = min(start,
|
|
|
|
ALIGN(prev_end, PAGES_PER_SECTION));
|
2011-09-29 16:37:23 +08:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* Align down here since the VM subsystem insists that the
|
|
|
|
* memmap entries are valid from the bank start aligned to
|
|
|
|
* MAX_ORDER_NR_PAGES.
|
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
start = round_down(start, MAX_ORDER_NR_PAGES);
|
2011-04-29 01:44:31 +08:00
|
|
|
#endif
|
2005-06-27 21:16:47 +08:00
|
|
|
/*
|
|
|
|
* If we had a previous bank, and there is a space
|
|
|
|
* between the current bank and the previous, free it.
|
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
if (prev_end && prev_end < start)
|
|
|
|
free_memmap(prev_end, start);
|
2005-06-27 21:16:47 +08:00
|
|
|
|
2010-06-15 04:06:56 +08:00
|
|
|
/*
|
|
|
|
* Align up here since the VM subsystem insists that the
|
|
|
|
* memmap entries are valid from the bank end aligned to
|
|
|
|
* MAX_ORDER_NR_PAGES.
|
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
|
|
|
|
MAX_ORDER_NR_PAGES);
|
2005-06-27 21:16:47 +08:00
|
|
|
}
|
2011-04-29 01:44:31 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
2014-04-14 05:54:58 +08:00
|
|
|
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
|
|
|
|
free_memmap(prev_end,
|
|
|
|
ALIGN(prev_end, PAGES_PER_SECTION));
|
2011-04-29 01:44:31 +08:00
|
|
|
#endif
|
2005-06-27 21:16:47 +08:00
|
|
|
}
|
|
|
|
|
2013-04-30 06:06:26 +08:00
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
static inline void free_area_high(unsigned long pfn, unsigned long end)
|
|
|
|
{
|
2013-04-30 06:07:03 +08:00
|
|
|
for (; pfn < end; pfn++)
|
|
|
|
free_highmem_page(pfn_to_page(pfn));
|
2013-04-30 06:06:26 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-28 02:37:06 +08:00
|
|
|
static void __init free_highpages(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
2013-08-01 10:12:01 +08:00
|
|
|
unsigned long max_low = max_low_pfn;
|
2010-10-28 02:45:49 +08:00
|
|
|
struct memblock_region *mem, *res;
|
2010-10-28 02:37:06 +08:00
|
|
|
|
|
|
|
/* set highmem page free */
|
2010-10-28 02:45:49 +08:00
|
|
|
for_each_memblock(memory, mem) {
|
|
|
|
unsigned long start = memblock_region_memory_base_pfn(mem);
|
|
|
|
unsigned long end = memblock_region_memory_end_pfn(mem);
|
|
|
|
|
|
|
|
/* Ignore complete lowmem entries */
|
|
|
|
if (end <= max_low)
|
|
|
|
continue;
|
|
|
|
|
2015-10-01 23:58:11 +08:00
|
|
|
if (memblock_is_nomap(mem))
|
|
|
|
continue;
|
|
|
|
|
2010-10-28 02:45:49 +08:00
|
|
|
/* Truncate partial highmem entries */
|
|
|
|
if (start < max_low)
|
|
|
|
start = max_low;
|
|
|
|
|
|
|
|
/* Find and exclude any reserved regions */
|
|
|
|
for_each_memblock(reserved, res) {
|
|
|
|
unsigned long res_start, res_end;
|
|
|
|
|
|
|
|
res_start = memblock_region_reserved_base_pfn(res);
|
|
|
|
res_end = memblock_region_reserved_end_pfn(res);
|
|
|
|
|
|
|
|
if (res_end < start)
|
|
|
|
continue;
|
|
|
|
if (res_start < start)
|
|
|
|
res_start = start;
|
|
|
|
if (res_start > end)
|
|
|
|
res_start = end;
|
|
|
|
if (res_end > end)
|
|
|
|
res_end = end;
|
|
|
|
if (res_start != start)
|
2013-04-30 06:06:26 +08:00
|
|
|
free_area_high(start, res_start);
|
2010-10-28 02:45:49 +08:00
|
|
|
start = res_end;
|
|
|
|
if (start == end)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And now free anything which remains */
|
|
|
|
if (start < end)
|
2013-04-30 06:06:26 +08:00
|
|
|
free_area_high(start, end);
|
2010-10-28 02:37:06 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* mem_init() marks the free areas in the mem_map and tells us how much
|
|
|
|
* memory is free. This is done after various parts of the system have
|
|
|
|
* claimed their memory after the kernel image.
|
|
|
|
*/
|
|
|
|
void __init mem_init(void)
|
|
|
|
{
|
2019-07-23 17:33:12 +08:00
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
|
|
swiotlb_init(1);
|
|
|
|
#endif
|
|
|
|
|
2013-11-24 03:36:42 +08:00
|
|
|
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* this will put all unused low memory onto the freelists */
|
2014-04-14 05:54:58 +08:00
|
|
|
free_unused_memmap();
|
2018-10-31 06:09:30 +08:00
|
|
|
memblock_free_all();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SA1111
|
|
|
|
/* now that our DMA memory is actually so designated, we can free it */
|
2013-07-14 05:58:36 +08:00
|
|
|
free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2010-10-28 02:37:06 +08:00
|
|
|
free_highpages();
|
2008-09-18 03:21:55 +08:00
|
|
|
|
2013-07-04 06:03:48 +08:00
|
|
|
mem_init_print_info(NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-08 04:47:58 +08:00
|
|
|
/*
|
|
|
|
* Check boundaries twice: Some fundamental inconsistencies can
|
|
|
|
* be detected at build time already.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
|
|
|
|
BUG_ON(TASK_SIZE > MODULES_VADDR);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
|
|
|
|
BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-02-07 08:31:58 +08:00
|
|
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
2014-04-04 08:28:11 +08:00
|
|
|
struct section_perm {
|
2016-01-26 08:19:36 +08:00
|
|
|
const char *name;
|
2014-04-04 08:28:11 +08:00
|
|
|
unsigned long start;
|
|
|
|
unsigned long end;
|
|
|
|
pmdval_t mask;
|
|
|
|
pmdval_t prot;
|
2014-04-04 04:29:50 +08:00
|
|
|
pmdval_t clear;
|
2014-04-04 08:28:11 +08:00
|
|
|
};
|
|
|
|
|
2016-01-26 08:20:21 +08:00
|
|
|
/* First section-aligned location at or after __start_rodata. */
|
|
|
|
extern char __start_rodata_section_aligned[];
|
|
|
|
|
2014-04-04 04:29:50 +08:00
|
|
|
static struct section_perm nx_perms[] = {
|
2014-04-04 08:28:11 +08:00
|
|
|
/* Make pages tables, etc before _stext RW (set NX). */
|
|
|
|
{
|
2016-01-26 08:19:36 +08:00
|
|
|
.name = "pre-text NX",
|
2014-04-04 08:28:11 +08:00
|
|
|
.start = PAGE_OFFSET,
|
|
|
|
.end = (unsigned long)_stext,
|
|
|
|
.mask = ~PMD_SECT_XN,
|
|
|
|
.prot = PMD_SECT_XN,
|
|
|
|
},
|
|
|
|
/* Make init RW (set NX). */
|
|
|
|
{
|
2016-01-26 08:19:36 +08:00
|
|
|
.name = "init NX",
|
2014-04-04 08:28:11 +08:00
|
|
|
.start = (unsigned long)__init_begin,
|
|
|
|
.end = (unsigned long)_sdata,
|
|
|
|
.mask = ~PMD_SECT_XN,
|
|
|
|
.prot = PMD_SECT_XN,
|
|
|
|
},
|
2014-04-04 04:29:50 +08:00
|
|
|
/* Make rodata NX (set RO in ro_perms below). */
|
|
|
|
{
|
2016-01-26 08:19:36 +08:00
|
|
|
.name = "rodata NX",
|
2016-01-26 08:20:21 +08:00
|
|
|
.start = (unsigned long)__start_rodata_section_aligned,
|
2014-04-04 04:29:50 +08:00
|
|
|
.end = (unsigned long)__init_begin,
|
|
|
|
.mask = ~PMD_SECT_XN,
|
|
|
|
.prot = PMD_SECT_XN,
|
|
|
|
},
|
2014-04-04 08:28:11 +08:00
|
|
|
};
|
|
|
|
|
2014-04-04 04:29:50 +08:00
|
|
|
static struct section_perm ro_perms[] = {
|
|
|
|
/* Make kernel code and rodata RX (set RO). */
|
|
|
|
{
|
2016-01-26 08:19:36 +08:00
|
|
|
.name = "text/rodata RO",
|
2014-04-04 04:29:50 +08:00
|
|
|
.start = (unsigned long)_stext,
|
|
|
|
.end = (unsigned long)__init_begin,
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
2017-11-14 07:55:25 +08:00
|
|
|
.mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
|
|
|
|
.prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
|
2014-04-04 04:29:50 +08:00
|
|
|
#else
|
|
|
|
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
|
|
|
|
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
|
|
|
|
.clear = PMD_SECT_AP_WRITE,
|
|
|
|
#endif
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2014-04-04 08:28:11 +08:00
|
|
|
/*
|
|
|
|
* Updates section permissions only for the current mm (sections are
|
|
|
|
* copied into each mm). During startup, this is the init_mm. Is only
|
|
|
|
* safe to be called with preemption disabled, as under stop_machine().
|
|
|
|
*/
|
|
|
|
static inline void section_update(unsigned long addr, pmdval_t mask,
|
2015-12-01 02:36:28 +08:00
|
|
|
pmdval_t prot, struct mm_struct *mm)
|
2014-04-04 08:28:11 +08:00
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
|
|
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
|
|
|
|
#else
|
|
|
|
if (addr & SECTION_SIZE)
|
|
|
|
pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
|
|
|
|
else
|
|
|
|
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
|
|
|
|
#endif
|
|
|
|
flush_pmd_entry(pmd);
|
|
|
|
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure extended page tables are in use. */
|
|
|
|
static inline bool arch_has_strict_perms(void)
|
|
|
|
{
|
|
|
|
if (cpu_architecture() < CPU_ARCH_ARMv6)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return !!(get_cr() & CR_XP);
|
|
|
|
}
|
|
|
|
|
2019-10-11 20:51:52 +08:00
|
|
|
static void set_section_perms(struct section_perm *perms, int n, bool set,
|
|
|
|
struct mm_struct *mm)
|
2015-12-01 02:36:28 +08:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
if (!arch_has_strict_perms())
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
|
|
|
|
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
|
2016-01-26 08:19:36 +08:00
|
|
|
pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
|
|
|
|
perms[i].name, perms[i].start, perms[i].end,
|
2015-12-01 02:36:28 +08:00
|
|
|
SECTION_SIZE);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (addr = perms[i].start;
|
|
|
|
addr < perms[i].end;
|
|
|
|
addr += SECTION_SIZE)
|
|
|
|
section_update(addr, perms[i].mask,
|
|
|
|
set ? perms[i].prot : perms[i].clear, mm);
|
|
|
|
}
|
|
|
|
|
2014-04-04 08:28:11 +08:00
|
|
|
}
|
|
|
|
|
2017-04-26 04:20:52 +08:00
|
|
|
/**
|
|
|
|
* update_sections_early intended to be called only through stop_machine
|
|
|
|
* framework and executed by only one CPU while all other CPUs will spin and
|
|
|
|
* wait, so no locking is required in this function.
|
|
|
|
*/
|
2015-12-01 02:36:28 +08:00
|
|
|
static void update_sections_early(struct section_perm perms[], int n)
|
2014-04-04 08:28:11 +08:00
|
|
|
{
|
2015-12-01 02:36:28 +08:00
|
|
|
struct task_struct *t, *s;
|
|
|
|
|
|
|
|
for_each_process(t) {
|
|
|
|
if (t->flags & PF_KTHREAD)
|
|
|
|
continue;
|
|
|
|
for_each_thread(t, s)
|
2019-07-02 01:50:11 +08:00
|
|
|
if (s->mm)
|
|
|
|
set_section_perms(perms, n, true, s->mm);
|
2015-12-01 02:36:28 +08:00
|
|
|
}
|
|
|
|
set_section_perms(perms, n, true, current->active_mm);
|
|
|
|
set_section_perms(perms, n, true, &init_mm);
|
|
|
|
}
|
|
|
|
|
2017-04-26 04:20:52 +08:00
|
|
|
static int __fix_kernmem_perms(void *unused)
|
2015-12-01 02:36:28 +08:00
|
|
|
{
|
|
|
|
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-26 04:20:52 +08:00
|
|
|
static void fix_kernmem_perms(void)
|
2015-12-01 02:36:28 +08:00
|
|
|
{
|
|
|
|
stop_machine(__fix_kernmem_perms, NULL, NULL);
|
2014-04-04 08:28:11 +08:00
|
|
|
}
|
2014-04-04 04:29:50 +08:00
|
|
|
|
2017-04-26 04:20:52 +08:00
|
|
|
static int __mark_rodata_ro(void *unused)
|
2015-12-01 02:36:28 +08:00
|
|
|
{
|
|
|
|
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-10 15:22:40 +08:00
|
|
|
static int kernel_set_to_readonly __read_mostly;
|
|
|
|
|
2014-04-04 04:29:50 +08:00
|
|
|
void mark_rodata_ro(void)
|
|
|
|
{
|
2018-07-10 15:22:40 +08:00
|
|
|
kernel_set_to_readonly = 1;
|
2015-12-01 02:36:28 +08:00
|
|
|
stop_machine(__mark_rodata_ro, NULL, NULL);
|
2017-12-12 08:43:57 +08:00
|
|
|
debug_checkwx();
|
2014-04-04 04:29:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void set_kernel_text_rw(void)
|
|
|
|
{
|
2018-07-10 15:22:40 +08:00
|
|
|
if (!kernel_set_to_readonly)
|
|
|
|
return;
|
|
|
|
|
2015-12-01 02:36:28 +08:00
|
|
|
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
|
|
|
|
current->active_mm);
|
2014-04-04 04:29:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void set_kernel_text_ro(void)
|
|
|
|
{
|
2018-07-10 15:22:40 +08:00
|
|
|
if (!kernel_set_to_readonly)
|
|
|
|
return;
|
|
|
|
|
2015-12-01 02:36:28 +08:00
|
|
|
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
|
|
|
|
current->active_mm);
|
2014-04-04 04:29:50 +08:00
|
|
|
}
|
|
|
|
|
2014-04-04 08:28:11 +08:00
|
|
|
#else
|
|
|
|
static inline void fix_kernmem_perms(void) { }
|
2017-02-07 08:31:58 +08:00
|
|
|
#endif /* CONFIG_STRICT_KERNEL_RWX */
|
2014-04-04 08:28:11 +08:00
|
|
|
|
|
|
|
void free_initmem(void)
|
|
|
|
{
|
|
|
|
fix_kernmem_perms();
|
2009-09-16 00:30:37 +08:00
|
|
|
|
2011-07-08 01:43:36 +08:00
|
|
|
poison_init_mem(__init_begin, __init_end - __init_begin);
|
2008-09-18 02:50:42 +08:00
|
|
|
if (!machine_is_integrator() && !machine_is_cintegrator())
|
2013-07-04 06:02:51 +08:00
|
|
|
free_initmem_default(-1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
|
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
|
|
{
|
2019-05-14 08:18:30 +08:00
|
|
|
if (start == initrd_start)
|
|
|
|
start = round_down(start, PAGE_SIZE);
|
|
|
|
if (end == initrd_end)
|
|
|
|
end = round_up(end, PAGE_SIZE);
|
2014-09-26 10:07:09 +08:00
|
|
|
|
2019-05-14 08:18:30 +08:00
|
|
|
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
|
|
|
|
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#endif
|