mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-24 02:24:28 +08:00
f9bff0e318
Patch series "New page table range API", v6. This patchset changes the API used by the MM to set up page table entries. The four APIs are: set_ptes(mm, addr, ptep, pte, nr) update_mmu_cache_range(vma, addr, ptep, nr) flush_dcache_folio(folio) flush_icache_pages(vma, page, nr) flush_dcache_folio() isn't technically new, but no architecture implemented it, so I've done that for them. The old APIs remain around but are mostly implemented by calling the new interfaces. The new APIs are based around setting up N page table entries at once. The N entries belong to the same PMD, the same folio and the same VMA, so ptep++ is a legitimate operation, and locking is taken care of for you. Some architectures can do a better job of it than just a loop, but I have hesitated to make too deep a change to architectures I don't understand well. One thing I have changed in every architecture is that PG_arch_1 is now a per-folio bit instead of a per-page bit when used for dcache clean/dirty tracking. This was something that would have to happen eventually, and it makes sense to do it now rather than iterate over every page involved in a cache flush and figure out if it needs to happen. The point of all this is better performance, and Fengwei Yin has measured improvement on x86. I suspect you'll see improvement on your architecture too. Try the new will-it-scale test mentioned here: https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/ You'll need to run it on an XFS filesystem and have CONFIG_TRANSPARENT_HUGEPAGE set. This patchset is the basis for much of the anonymous large folio work being done by Ryan, so it's received quite a lot of testing over the last few months. This patch (of 38): Determine if a value lies within a range more efficiently (subtraction + comparison vs two comparisons and an AND). It also has useful (under some circumstances) behaviour if the range exceeds the maximum value of the type. Convert all the conflicting definitions of in_range() within the kernel; some can use the generic definition while others need their own definition. Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
318 lines
8.4 KiB
C
318 lines
8.4 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
|
|
* Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
|
|
* Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
|
|
* Author: John Garry <john.garry@huawei.com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "LOGIC PIO: " fmt
|
|
|
|
#include <linux/of.h>
|
|
#include <linux/io.h>
|
|
#include <linux/logic_pio.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/sizes.h>
|
|
#include <linux/slab.h>
|
|
|
|
/* The unique hardware address list */
|
|
static LIST_HEAD(io_range_list);
|
|
static DEFINE_MUTEX(io_range_mutex);
|
|
|
|
/**
|
|
* logic_pio_register_range - register logical PIO range for a host
|
|
* @new_range: pointer to the IO range to be registered.
|
|
*
|
|
* Returns 0 on success, the error code in case of failure.
|
|
* If the range already exists, -EEXIST will be returned, which should be
|
|
* considered a success.
|
|
*
|
|
* Register a new IO range node in the IO range list.
|
|
*/
|
|
int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
|
|
{
|
|
struct logic_pio_hwaddr *range;
|
|
resource_size_t start;
|
|
resource_size_t end;
|
|
resource_size_t mmio_end = 0;
|
|
resource_size_t iio_sz = MMIO_UPPER_LIMIT;
|
|
int ret = 0;
|
|
|
|
if (!new_range || !new_range->fwnode || !new_range->size ||
|
|
(new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
|
|
return -EINVAL;
|
|
|
|
start = new_range->hw_start;
|
|
end = new_range->hw_start + new_range->size;
|
|
|
|
mutex_lock(&io_range_mutex);
|
|
list_for_each_entry(range, &io_range_list, list) {
|
|
if (range->fwnode == new_range->fwnode) {
|
|
/* range already there */
|
|
ret = -EEXIST;
|
|
goto end_register;
|
|
}
|
|
if (range->flags == LOGIC_PIO_CPU_MMIO &&
|
|
new_range->flags == LOGIC_PIO_CPU_MMIO) {
|
|
/* for MMIO ranges we need to check for overlap */
|
|
if (start >= range->hw_start + range->size ||
|
|
end < range->hw_start) {
|
|
mmio_end = range->io_start + range->size;
|
|
} else {
|
|
ret = -EFAULT;
|
|
goto end_register;
|
|
}
|
|
} else if (range->flags == LOGIC_PIO_INDIRECT &&
|
|
new_range->flags == LOGIC_PIO_INDIRECT) {
|
|
iio_sz += range->size;
|
|
}
|
|
}
|
|
|
|
/* range not registered yet, check for available space */
|
|
if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
|
|
if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
|
|
/* if it's too big check if 64K space can be reserved */
|
|
if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
|
|
ret = -E2BIG;
|
|
goto end_register;
|
|
}
|
|
new_range->size = SZ_64K;
|
|
pr_warn("Requested IO range too big, new size set to 64K\n");
|
|
}
|
|
new_range->io_start = mmio_end;
|
|
} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
|
|
if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
|
|
ret = -E2BIG;
|
|
goto end_register;
|
|
}
|
|
new_range->io_start = iio_sz;
|
|
} else {
|
|
/* invalid flag */
|
|
ret = -EINVAL;
|
|
goto end_register;
|
|
}
|
|
|
|
list_add_tail_rcu(&new_range->list, &io_range_list);
|
|
|
|
end_register:
|
|
mutex_unlock(&io_range_mutex);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* logic_pio_unregister_range - unregister a logical PIO range for a host
|
|
* @range: pointer to the IO range which has been already registered.
|
|
*
|
|
* Unregister a previously-registered IO range node.
|
|
*/
|
|
void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
|
|
{
|
|
mutex_lock(&io_range_mutex);
|
|
list_del_rcu(&range->list);
|
|
mutex_unlock(&io_range_mutex);
|
|
synchronize_rcu();
|
|
}
|
|
|
|
/**
|
|
* find_io_range_by_fwnode - find logical PIO range for given FW node
|
|
* @fwnode: FW node handle associated with logical PIO range
|
|
*
|
|
* Returns pointer to node on success, NULL otherwise.
|
|
*
|
|
* Traverse the io_range_list to find the registered node for @fwnode.
|
|
*/
|
|
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
|
|
{
|
|
struct logic_pio_hwaddr *range, *found_range = NULL;
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(range, &io_range_list, list) {
|
|
if (range->fwnode == fwnode) {
|
|
found_range = range;
|
|
break;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
return found_range;
|
|
}
|
|
|
|
/* Return a registered range given an input PIO token */
|
|
static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
|
|
{
|
|
struct logic_pio_hwaddr *range, *found_range = NULL;
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(range, &io_range_list, list) {
|
|
if (in_range(pio, range->io_start, range->size)) {
|
|
found_range = range;
|
|
break;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
if (!found_range)
|
|
pr_err("PIO entry token 0x%lx invalid\n", pio);
|
|
|
|
return found_range;
|
|
}
|
|
|
|
/**
|
|
* logic_pio_to_hwaddr - translate logical PIO to HW address
|
|
* @pio: logical PIO value
|
|
*
|
|
* Returns HW address if valid, ~0 otherwise.
|
|
*
|
|
* Translate the input logical PIO to the corresponding hardware address.
|
|
* The input PIO should be unique in the whole logical PIO space.
|
|
*/
|
|
resource_size_t logic_pio_to_hwaddr(unsigned long pio)
|
|
{
|
|
struct logic_pio_hwaddr *range;
|
|
|
|
range = find_io_range(pio);
|
|
if (range)
|
|
return range->hw_start + pio - range->io_start;
|
|
|
|
return (resource_size_t)~0;
|
|
}
|
|
|
|
/**
|
|
* logic_pio_trans_hwaddr - translate HW address to logical PIO
|
|
* @fwnode: FW node reference for the host
|
|
* @addr: Host-relative HW address
|
|
* @size: size to translate
|
|
*
|
|
* Returns Logical PIO value if successful, ~0UL otherwise
|
|
*/
|
|
unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
|
|
resource_size_t addr, resource_size_t size)
|
|
{
|
|
struct logic_pio_hwaddr *range;
|
|
|
|
range = find_io_range_by_fwnode(fwnode);
|
|
if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
|
|
pr_err("IO range not found or invalid\n");
|
|
return ~0UL;
|
|
}
|
|
if (range->size < size) {
|
|
pr_err("resource size %pa cannot fit in IO range size %pa\n",
|
|
&size, &range->size);
|
|
return ~0UL;
|
|
}
|
|
return addr - range->hw_start + range->io_start;
|
|
}
|
|
|
|
unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
|
|
{
|
|
struct logic_pio_hwaddr *range;
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(range, &io_range_list, list) {
|
|
if (range->flags != LOGIC_PIO_CPU_MMIO)
|
|
continue;
|
|
if (in_range(addr, range->hw_start, range->size)) {
|
|
unsigned long cpuaddr;
|
|
|
|
cpuaddr = addr - range->hw_start + range->io_start;
|
|
|
|
rcu_read_unlock();
|
|
return cpuaddr;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
pr_err("addr %pa not registered in io_range_list\n", &addr);
|
|
|
|
return ~0UL;
|
|
}
|
|
|
|
#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
|
|
#define BUILD_LOGIC_IO(bwl, type) \
|
|
type logic_in##bwl(unsigned long addr) \
|
|
{ \
|
|
type ret = (type)~0; \
|
|
\
|
|
if (addr < MMIO_UPPER_LIMIT) { \
|
|
ret = _in##bwl(addr); \
|
|
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
|
|
struct logic_pio_hwaddr *entry = find_io_range(addr); \
|
|
\
|
|
if (entry) \
|
|
ret = entry->ops->in(entry->hostdata, \
|
|
addr, sizeof(type)); \
|
|
else \
|
|
WARN_ON_ONCE(1); \
|
|
} \
|
|
return ret; \
|
|
} \
|
|
\
|
|
void logic_out##bwl(type value, unsigned long addr) \
|
|
{ \
|
|
if (addr < MMIO_UPPER_LIMIT) { \
|
|
_out##bwl(value, addr); \
|
|
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
|
|
struct logic_pio_hwaddr *entry = find_io_range(addr); \
|
|
\
|
|
if (entry) \
|
|
entry->ops->out(entry->hostdata, \
|
|
addr, value, sizeof(type)); \
|
|
else \
|
|
WARN_ON_ONCE(1); \
|
|
} \
|
|
} \
|
|
\
|
|
void logic_ins##bwl(unsigned long addr, void *buffer, \
|
|
unsigned int count) \
|
|
{ \
|
|
if (addr < MMIO_UPPER_LIMIT) { \
|
|
reads##bwl(PCI_IOBASE + addr, buffer, count); \
|
|
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
|
|
struct logic_pio_hwaddr *entry = find_io_range(addr); \
|
|
\
|
|
if (entry) \
|
|
entry->ops->ins(entry->hostdata, \
|
|
addr, buffer, sizeof(type), count); \
|
|
else \
|
|
WARN_ON_ONCE(1); \
|
|
} \
|
|
\
|
|
} \
|
|
\
|
|
void logic_outs##bwl(unsigned long addr, const void *buffer, \
|
|
unsigned int count) \
|
|
{ \
|
|
if (addr < MMIO_UPPER_LIMIT) { \
|
|
writes##bwl(PCI_IOBASE + addr, buffer, count); \
|
|
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
|
|
struct logic_pio_hwaddr *entry = find_io_range(addr); \
|
|
\
|
|
if (entry) \
|
|
entry->ops->outs(entry->hostdata, \
|
|
addr, buffer, sizeof(type), count); \
|
|
else \
|
|
WARN_ON_ONCE(1); \
|
|
} \
|
|
}
|
|
|
|
BUILD_LOGIC_IO(b, u8)
|
|
EXPORT_SYMBOL(logic_inb);
|
|
EXPORT_SYMBOL(logic_insb);
|
|
EXPORT_SYMBOL(logic_outb);
|
|
EXPORT_SYMBOL(logic_outsb);
|
|
|
|
BUILD_LOGIC_IO(w, u16)
|
|
EXPORT_SYMBOL(logic_inw);
|
|
EXPORT_SYMBOL(logic_insw);
|
|
EXPORT_SYMBOL(logic_outw);
|
|
EXPORT_SYMBOL(logic_outsw);
|
|
|
|
BUILD_LOGIC_IO(l, u32)
|
|
EXPORT_SYMBOL(logic_inl);
|
|
EXPORT_SYMBOL(logic_insl);
|
|
EXPORT_SYMBOL(logic_outl);
|
|
EXPORT_SYMBOL(logic_outsl);
|
|
|
|
#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
|