mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
c993e07be0
- convert arm32 to the common dma-direct code (Arnd Bergmann, Robin Murphy, Christoph Hellwig) - restructure the PCIe peer to peer mapping support (Logan Gunthorpe) - allow the IOMMU code to communicate an optional DMA mapping length and use that in scsi and libata (John Garry) - split the global swiotlb lock (Tianyu Lan) - various fixes and cleanup (Chao Gao, Dan Carpenter, Dongli Zhang, Lukas Bulwahn, Robin Murphy) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmLuIYULHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPS5A//Ty1ZNyXExmwZ6J6g7/oIvQlpAHilDr22mCd8tR8Y Ne7TgLa/X+usFvJTxJfkvg/LNMDjD7qx0J/mhDGm4reOFcEL4/PBy0rDSOgnmntV k/fPhgwnpuztiAQ+s+WkJ3pkrmG1HaEId7GGj2JaoYdas6RX2mGX7vL8uvUFepjw lYPAqWMtJHkOfsDK0PqqyQsr7dcC6lyFLqnn/wqvHtTJeKCfGs6W/SIrlWme2SZY 3dNx84ZR1uPjaazAmtf2IWfjh/TBmd0ETRYycgUUKRP9iwsCkBQDBwsBGSIYXiWj BUKQ5oMvjAlUGRF0jYz9e77KuedE6GxWiXNQstitBmid142M37DHA5tvZRf65MPS THHcjTDmmoaO4YfFhhXOcFOrjG4/V8bF7fgHB6XkHDjhVVTcnIx8zuOAXIVBZvIV VAALmamBqEfIZZrCqgr7hzFssK2bip+TIMkdoD46Wcr+D7bAlujhuzWxubn9+ulT 23v/pAvC80ut6LvKj6EA+GpRm/pejfOtEbjXPoO2hguNxvuUKvPQqNh9hy0q+v1e 8n2Y/4lhy5bv02S7wKooNkfCoV753jBY1TIru45UmEYc3EkTQPii6okYe0DvW4QX VCnKgo156wSBfE+9eWdxCROv2SZqJFMV/wL3vw54dpJQMbDy7VkNsh4mGREdUkU1 uek= =Bv19 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - convert arm32 to the common dma-direct code (Arnd Bergmann, Robin Murphy, Christoph Hellwig) - restructure the PCIe peer to peer mapping support (Logan Gunthorpe) - allow the IOMMU code to communicate an optional DMA mapping length and use that in scsi and libata (John Garry) - split the global swiotlb lock (Tianyu Lan) - various fixes and cleanup (Chao Gao, Dan Carpenter, Dongli Zhang, Lukas Bulwahn, Robin Murphy) * tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping: (45 commits) swiotlb: fix passing local variable to debugfs_create_ulong() dma-mapping: reformat comment to suppress htmldoc warning PCI/P2PDMA: Remove pci_p2pdma_[un]map_sg() RDMA/rw: drop pci_p2pdma_[un]map_sg() RDMA/core: introduce ib_dma_pci_p2p_dma_supported() nvme-pci: convert to using dma_map_sgtable() nvme-pci: check DMA ops when indicating support for PCI P2PDMA iommu/dma: support PCI P2PDMA pages in dma-iommu map_sg iommu: Explicitly skip bus address marked segments in __iommu_map_sg() dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support dma-direct: support PCI P2PDMA pages in dma-direct map_sg dma-mapping: allow EREMOTEIO return code for P2PDMA transfers PCI/P2PDMA: Introduce helpers for dma_map_sg implementations PCI/P2PDMA: Attempt to set map_type if it has not been set lib/scatterlist: add flag for indicating P2PDMA segments in an SGL swiotlb: clean up some coding style and minor issues dma-mapping: update comment after dmabounce removal scsi: sd: Add a comment about limiting max_sectors to shost optimal limit ata: libata-scsi: cap ata_device->max_sectors according to shost->max_sectors scsi: scsi_transport_sas: cap shost opt_sectors according to DMA optimal limit ...
303 lines
7.9 KiB
C
303 lines
7.9 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP
|
|
* platforms.
|
|
*
|
|
* Copyright (C) 2012 Marvell
|
|
*
|
|
* Yehuda Yitschak <yehuday@marvell.com>
|
|
* Gregory Clement <gregory.clement@free-electrons.com>
|
|
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
|
*
|
|
* The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is
|
|
* responsible for ensuring hardware coherency between all CPUs and between
|
|
* CPUs and I/O masters. This file initializes the coherency fabric and
|
|
* supplies basic routines for configuring and controlling hardware coherency
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "mvebu-coherency: " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/io.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/dma-map-ops.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mbus.h>
|
|
#include <linux/pci.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/mach/map.h>
|
|
#include <asm/dma-mapping.h>
|
|
#include "coherency.h"
|
|
#include "mvebu-soc-id.h"
|
|
|
|
unsigned long coherency_phys_base;
|
|
void __iomem *coherency_base;
|
|
static void __iomem *coherency_cpu_base;
|
|
static void __iomem *cpu_config_base;
|
|
|
|
/* Coherency fabric registers */
|
|
#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
|
|
|
|
enum {
|
|
COHERENCY_FABRIC_TYPE_NONE,
|
|
COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
|
|
COHERENCY_FABRIC_TYPE_ARMADA_375,
|
|
COHERENCY_FABRIC_TYPE_ARMADA_380,
|
|
};
|
|
|
|
static const struct of_device_id of_coherency_table[] = {
|
|
{.compatible = "marvell,coherency-fabric",
|
|
.data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
|
|
{.compatible = "marvell,armada-375-coherency-fabric",
|
|
.data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
|
|
{.compatible = "marvell,armada-380-coherency-fabric",
|
|
.data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
|
|
{ /* end of list */ },
|
|
};
|
|
|
|
/* Functions defined in coherency_ll.S */
|
|
int ll_enable_coherency(void);
|
|
void ll_add_cpu_to_smp_group(void);
|
|
|
|
#define CPU_CONFIG_SHARED_L2 BIT(16)
|
|
|
|
/*
|
|
* Disable the "Shared L2 Present" bit in CPU Configuration register
|
|
* on Armada XP.
|
|
*
|
|
* The "Shared L2 Present" bit affects the "level of coherence" value
|
|
* in the clidr CP15 register. Cache operation functions such as
|
|
* "flush all" and "invalidate all" operate on all the cache levels
|
|
* that included in the defined level of coherence. When HW I/O
|
|
* coherency is used, this bit causes unnecessary flushes of the L2
|
|
* cache.
|
|
*/
|
|
static void armada_xp_clear_shared_l2(void)
|
|
{
|
|
u32 reg;
|
|
|
|
if (!cpu_config_base)
|
|
return;
|
|
|
|
reg = readl(cpu_config_base);
|
|
reg &= ~CPU_CONFIG_SHARED_L2;
|
|
writel(reg, cpu_config_base);
|
|
}
|
|
|
|
static int mvebu_hwcc_notifier(struct notifier_block *nb,
|
|
unsigned long event, void *__dev)
|
|
{
|
|
struct device *dev = __dev;
|
|
|
|
if (event != BUS_NOTIFY_ADD_DEVICE)
|
|
return NOTIFY_DONE;
|
|
dev->dma_coherent = true;
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block mvebu_hwcc_nb = {
|
|
.notifier_call = mvebu_hwcc_notifier,
|
|
};
|
|
|
|
static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = {
|
|
.notifier_call = mvebu_hwcc_notifier,
|
|
};
|
|
|
|
static int armada_xp_clear_l2_starting(unsigned int cpu)
|
|
{
|
|
armada_xp_clear_shared_l2();
|
|
return 0;
|
|
}
|
|
|
|
static void __init armada_370_coherency_init(struct device_node *np)
|
|
{
|
|
struct resource res;
|
|
struct device_node *cpu_config_np;
|
|
|
|
of_address_to_resource(np, 0, &res);
|
|
coherency_phys_base = res.start;
|
|
/*
|
|
* Ensure secondary CPUs will see the updated value,
|
|
* which they read before they join the coherency
|
|
* fabric, and therefore before they are coherent with
|
|
* the boot CPU cache.
|
|
*/
|
|
sync_cache_w(&coherency_phys_base);
|
|
coherency_base = of_iomap(np, 0);
|
|
coherency_cpu_base = of_iomap(np, 1);
|
|
|
|
cpu_config_np = of_find_compatible_node(NULL, NULL,
|
|
"marvell,armada-xp-cpu-config");
|
|
if (!cpu_config_np)
|
|
goto exit;
|
|
|
|
cpu_config_base = of_iomap(cpu_config_np, 0);
|
|
if (!cpu_config_base) {
|
|
of_node_put(cpu_config_np);
|
|
goto exit;
|
|
}
|
|
|
|
of_node_put(cpu_config_np);
|
|
|
|
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
|
|
"arm/mvebu/coherency:starting",
|
|
armada_xp_clear_l2_starting, NULL);
|
|
exit:
|
|
set_cpu_coherent();
|
|
}
|
|
|
|
/*
|
|
* This ioremap hook is used on Armada 375/38x to ensure that all MMIO
|
|
* areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
|
|
* needed for the HW I/O coherency mechanism to work properly without
|
|
* deadlock.
|
|
*/
|
|
static void __iomem *
|
|
armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
|
|
unsigned int mtype, void *caller)
|
|
{
|
|
mtype = MT_UNCACHED;
|
|
return __arm_ioremap_caller(phys_addr, size, mtype, caller);
|
|
}
|
|
|
|
static void __init armada_375_380_coherency_init(struct device_node *np)
|
|
{
|
|
struct device_node *cache_dn;
|
|
|
|
coherency_cpu_base = of_iomap(np, 0);
|
|
arch_ioremap_caller = armada_wa_ioremap_caller;
|
|
pci_ioremap_set_mem_type(MT_UNCACHED);
|
|
|
|
/*
|
|
* We should switch the PL310 to I/O coherency mode only if
|
|
* I/O coherency is actually enabled.
|
|
*/
|
|
if (!coherency_available())
|
|
return;
|
|
|
|
/*
|
|
* Add the PL310 property "arm,io-coherent". This makes sure the
|
|
* outer sync operation is not used, which allows to
|
|
* workaround the system erratum that causes deadlocks when
|
|
* doing PCIe in an SMP situation on Armada 375 and Armada
|
|
* 38x.
|
|
*/
|
|
for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") {
|
|
struct property *p;
|
|
|
|
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
|
p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
|
|
of_add_property(cache_dn, p);
|
|
}
|
|
}
|
|
|
|
static int coherency_type(void)
|
|
{
|
|
struct device_node *np;
|
|
const struct of_device_id *match;
|
|
int type;
|
|
|
|
/*
|
|
* The coherency fabric is needed:
|
|
* - For coherency between processors on Armada XP, so only
|
|
* when SMP is enabled.
|
|
* - For coherency between the processor and I/O devices, but
|
|
* this coherency requires many pre-requisites (write
|
|
* allocate cache policy, shareable pages, SMP bit set) that
|
|
* are only meant in SMP situations.
|
|
*
|
|
* Note that this means that on Armada 370, there is currently
|
|
* no way to use hardware I/O coherency, because even when
|
|
* CONFIG_SMP is enabled, is_smp() returns false due to the
|
|
* Armada 370 being a single-core processor. To lift this
|
|
* limitation, we would have to find a way to make the cache
|
|
* policy set to write-allocate (on all Armada SoCs), and to
|
|
* set the shareable attribute in page tables (on all Armada
|
|
* SoCs except the Armada 370). Unfortunately, such decisions
|
|
* are taken very early in the kernel boot process, at a point
|
|
* where we don't know yet on which SoC we are running.
|
|
|
|
*/
|
|
if (!is_smp())
|
|
return COHERENCY_FABRIC_TYPE_NONE;
|
|
|
|
np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
|
|
if (!np)
|
|
return COHERENCY_FABRIC_TYPE_NONE;
|
|
|
|
type = (int) match->data;
|
|
|
|
of_node_put(np);
|
|
|
|
return type;
|
|
}
|
|
|
|
int set_cpu_coherent(void)
|
|
{
|
|
int type = coherency_type();
|
|
|
|
if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) {
|
|
if (!coherency_base) {
|
|
pr_warn("Can't make current CPU cache coherent.\n");
|
|
pr_warn("Coherency fabric is not initialized\n");
|
|
return 1;
|
|
}
|
|
|
|
armada_xp_clear_shared_l2();
|
|
ll_add_cpu_to_smp_group();
|
|
return ll_enable_coherency();
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int coherency_available(void)
|
|
{
|
|
return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
|
|
}
|
|
|
|
int __init coherency_init(void)
|
|
{
|
|
int type = coherency_type();
|
|
struct device_node *np;
|
|
|
|
np = of_find_matching_node(NULL, of_coherency_table);
|
|
|
|
if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
|
|
armada_370_coherency_init(np);
|
|
else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
|
|
type == COHERENCY_FABRIC_TYPE_ARMADA_380)
|
|
armada_375_380_coherency_init(np);
|
|
|
|
of_node_put(np);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __init coherency_late_init(void)
|
|
{
|
|
if (coherency_available())
|
|
bus_register_notifier(&platform_bus_type,
|
|
&mvebu_hwcc_nb);
|
|
return 0;
|
|
}
|
|
|
|
postcore_initcall(coherency_late_init);
|
|
|
|
#if IS_ENABLED(CONFIG_PCI)
|
|
static int __init coherency_pci_init(void)
|
|
{
|
|
if (coherency_available())
|
|
bus_register_notifier(&pci_bus_type,
|
|
&mvebu_hwcc_pci_nb);
|
|
return 0;
|
|
}
|
|
|
|
arch_initcall(coherency_pci_init);
|
|
#endif
|