mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 21:44:06 +08:00
9ffc1d19fc
The "sub-section memory hotplug" facility allows memremap_pages() users like libnvdimm to compensate for hardware platforms like x86 that have a section size larger than their hardware memory mapping granularity. The compensation that sub-section support affords is being tolerant of physical memory resources shifting by units smaller (64MiB on x86) than the memory-hotplug section size (128 MiB). Where the platform physical-memory mapping granularity is limited by the number and capability of address-decode-registers in the memory controller. While the sub-section support allows memremap_pages() to operate on sub-section (2MiB) granularity, the Power architecture may still require 16MiB alignment on "!radix_enabled()" platforms. In order for libnvdimm to be able to detect and manage this per-arch limitation, introduce memremap_compat_align() as a common minimum alignment across all driver-facing memory-mapping interfaces, and let Power override it to 16MiB in the "!radix_enabled()" case. The assumption / requirement for 16MiB to be a viable memremap_compat_align() value is that Power does not have platforms where its equivalent of address-decode-registers never hardware remaps a persistent memory resource on smaller than 16MiB boundaries. Note that I tried my best to not add a new Kconfig symbol, but header include entanglements defeated the #ifndef memremap_compat_align design pattern and the need to export it defeats the __weak design pattern for arch overrides. Based on an initial patch by Aneesh. Link: http://lore.kernel.org/r/CAPcyv4gBGNP95APYaBcsocEa50tQj9b5h__83vgngjq3ouGX_Q@mail.gmail.com Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Reported-by: Jeff Moyer <jmoyer@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Signed-off-by: Dan Williams <dan.j.williams@intel.com>
121 lines
3.0 KiB
C
121 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
#include <linux/io.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <asm/io-workarounds.h>
|
|
|
|
unsigned long ioremap_bot;
|
|
EXPORT_SYMBOL(ioremap_bot);
|
|
|
|
void __iomem *ioremap(phys_addr_t addr, unsigned long size)
|
|
{
|
|
pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, prot, caller);
|
|
return __ioremap_caller(addr, size, prot, caller);
|
|
}
|
|
EXPORT_SYMBOL(ioremap);
|
|
|
|
void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
|
|
{
|
|
pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, prot, caller);
|
|
return __ioremap_caller(addr, size, prot, caller);
|
|
}
|
|
EXPORT_SYMBOL(ioremap_wc);
|
|
|
|
void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
|
|
{
|
|
pgprot_t prot = pgprot_cached(PAGE_KERNEL);
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, prot, caller);
|
|
return __ioremap_caller(addr, size, prot, caller);
|
|
}
|
|
|
|
void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
|
|
{
|
|
pte_t pte = __pte(flags);
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
/* writeable implies dirty for kernel addresses */
|
|
if (pte_write(pte))
|
|
pte = pte_mkdirty(pte);
|
|
|
|
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
|
|
pte = pte_exprotect(pte);
|
|
pte = pte_mkprivileged(pte);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
|
|
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
|
|
}
|
|
EXPORT_SYMBOL(ioremap_prot);
|
|
|
|
int early_ioremap_range(unsigned long ea, phys_addr_t pa,
|
|
unsigned long size, pgprot_t prot)
|
|
{
|
|
unsigned long i;
|
|
|
|
for (i = 0; i < size; i += PAGE_SIZE) {
|
|
int err = map_kernel_page(ea + i, pa + i, prot);
|
|
|
|
if (WARN_ON_ONCE(err)) /* Should clean up */
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
|
|
pgprot_t prot, void *caller)
|
|
{
|
|
struct vm_struct *area;
|
|
int ret;
|
|
unsigned long va;
|
|
|
|
area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
|
|
if (area == NULL)
|
|
return NULL;
|
|
|
|
area->phys_addr = pa;
|
|
va = (unsigned long)area->addr;
|
|
|
|
ret = ioremap_page_range(va, va + size, pa, prot);
|
|
if (!ret)
|
|
return (void __iomem *)area->addr + offset;
|
|
|
|
unmap_kernel_range(va, size);
|
|
free_vm_area(area);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
#ifdef CONFIG_ZONE_DEVICE
|
|
/*
|
|
* Override the generic version in mm/memremap.c.
|
|
*
|
|
* With hash translation, the direct-map range is mapped with just one
|
|
* page size selected by htab_init_page_sizes(). Consult
|
|
* mmu_psize_defs[] to determine the minimum page size alignment.
|
|
*/
|
|
unsigned long memremap_compat_align(void)
|
|
{
|
|
unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
|
|
|
|
if (radix_enabled())
|
|
return SUBSECTION_SIZE;
|
|
return max(SUBSECTION_SIZE, 1UL << shift);
|
|
|
|
}
|
|
EXPORT_SYMBOL_GPL(memremap_compat_align);
|
|
#endif
|