mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
Merge branches 'pci/misc', 'pci/pm', 'pci/host-designware', 'pci/host-imx6', 'pci/host-keystone', 'pci/host-tegra' and 'pci/host-xilinx' into next
* pci/misc: PCI/AER: Make <linux/aer.h> standalone includable PCI: Remove unnecessary variable in pci_add_dynid() * pci/pm: PCI/PM: Allow PCI devices to be put into D3cold during system suspend PCI/PM: Drop unused runtime PM support code for PCIe ports * pci/host-designware: PCI: designware: Check private_data validity in single place PCI: designware: Remove pci_assign_unassigned_resources() from dw_pcie_host_init() PCI: designware: Use pci_create_root_bus() instead of pci_scan_root_bus() PCI: designware: Parse bus-range property from devicetree PCI: designware: Add support for v3.65 hardware * pci/host-imx6: PCI: imx6: Probe in module_init(), not fs_initcall() PCI: designware: Remove pci_assign_unassigned_resources() from dw_pcie_host_init() PCI: designware: Use pci_create_root_bus() instead of pci_scan_root_bus() PCI: designware: Parse bus-range property from devicetree PCI: imx6: Put LTSSM in "Detect" state before disabling it MAINTAINERS: Add Lucas Stach as co-maintainer for i.MX6 PCI driver PCI: designware: Add support for v3.65 hardware * pci/host-keystone: PCI: keystone: Add TI Keystone PCIe driver PCI: designware: Add support for v3.65 hardware * pci/host-tegra: PCI: tegra: Implement a proper resource hierarchy PCI: tegra: Add missing cleanup in error path and tegra_msi_teardown_irq() resources: Add device-managed request/release_resource() * pci/host-xilinx: PCI: xilinx: Add Xilinx AXI PCIe Host Bridge IP driver Conflicts: drivers/pci/host/Kconfig drivers/pci/host/Makefile
This commit is contained in:
commit
a2351efeb3
@ -23,3 +23,6 @@ Required properties:
|
||||
|
||||
Optional properties:
|
||||
- reset-gpio: gpio pin number of power good signal
|
||||
- bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
|
||||
specify this property, to keep backwards compatibility a range of 0x00-0xff
|
||||
is assumed if not present)
|
||||
|
68
Documentation/devicetree/bindings/pci/pci-keystone.txt
Normal file
68
Documentation/devicetree/bindings/pci/pci-keystone.txt
Normal file
@ -0,0 +1,68 @@
|
||||
TI Keystone PCIe interface
|
||||
|
||||
Keystone PCI host Controller is based on Designware PCI h/w version 3.65.
|
||||
It shares common functions with PCIe Designware core driver and inherit
|
||||
common properties defined in
|
||||
Documentation/devicetree/bindings/pci/designware-pci.txt
|
||||
|
||||
Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt
|
||||
for the details of Designware DT bindings. Additional properties are
|
||||
described here as well as properties that are not applicable.
|
||||
|
||||
Required Properties:-
|
||||
|
||||
compatibility: "ti,keystone-pcie"
|
||||
reg: index 1 is the base address and length of DW application registers.
|
||||
index 2 is the base address and length of PCI mode configuration
|
||||
register.
|
||||
index 3 is the base address and length of PCI device ID register.
|
||||
|
||||
pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
|
||||
interrupt-cells: should be set to 1
|
||||
interrupt-parent: Parent interrupt controller phandle
|
||||
interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
|
||||
|
||||
Example:
|
||||
pcie_msi_intc: msi-interrupt-controller {
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 32 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 33 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 37 IRQ_TYPE_EDGE_RISING>;
|
||||
};
|
||||
|
||||
pcie_intc: Interrupt controller device node for Legacy IRQ chip
|
||||
interrupt-cells: should be set to 1
|
||||
interrupt-parent: Parent interrupt controller phandle
|
||||
interrupts: GIC interrupt lines connected to PCI Legacy interrupt lines
|
||||
|
||||
Example:
|
||||
pcie_intc: legacy-interrupt-controller {
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
|
||||
<GIC_SPI 29 IRQ_TYPE_EDGE_RISING>;
|
||||
};
|
||||
|
||||
Optional properties:-
|
||||
phys: phandle to Generic Keystone SerDes phy for PCI
|
||||
phy-names: name of the Generic Keystine SerDes phy for PCI
|
||||
- If boot loader already does PCI link establishment, then phys and
|
||||
phy-names shouldn't be present.
|
||||
|
||||
Designware DT Properties not applicable for Keystone PCI
|
||||
|
||||
1. pcie_bus clock-names not used. Instead, a phandle to phys is used.
|
||||
|
||||
Note for PCI driver usage
|
||||
=========================
|
||||
Driver requires pci=pcie_bus_perf in the bootargs for proper functioning.
|
62
Documentation/devicetree/bindings/pci/xilinx-pcie.txt
Normal file
62
Documentation/devicetree/bindings/pci/xilinx-pcie.txt
Normal file
@ -0,0 +1,62 @@
|
||||
* Xilinx AXI PCIe Root Port Bridge DT description
|
||||
|
||||
Required properties:
|
||||
- #address-cells: Address representation for root ports, set to <3>
|
||||
- #size-cells: Size representation for root ports, set to <2>
|
||||
- #interrupt-cells: specifies the number of cells needed to encode an
|
||||
interrupt source. The value must be 1.
|
||||
- compatible: Should contain "xlnx,axi-pcie-host-1.00.a"
|
||||
- reg: Should contain AXI PCIe registers location and length
|
||||
- device_type: must be "pci"
|
||||
- interrupts: Should contain AXI PCIe interrupt
|
||||
- interrupt-map-mask,
|
||||
interrupt-map: standard PCI properties to define the mapping of the
|
||||
PCI interface to interrupt numbers.
|
||||
- ranges: ranges for the PCI memory regions (I/O space region is not
|
||||
supported by hardware)
|
||||
Please refer to the standard PCI bus binding document for a more
|
||||
detailed explanation
|
||||
|
||||
Optional properties:
|
||||
- bus-range: PCI bus numbers covered
|
||||
|
||||
Interrupt controller child node
|
||||
+++++++++++++++++++++++++++++++
|
||||
Required properties:
|
||||
- interrupt-controller: identifies the node as an interrupt controller
|
||||
- #address-cells: specifies the number of cells needed to encode an
|
||||
address. The value must be 0.
|
||||
- #interrupt-cells: specifies the number of cells needed to encode an
|
||||
interrupt source. The value must be 1.
|
||||
|
||||
NOTE:
|
||||
The core provides a single interrupt for both INTx/MSI messages. So,
|
||||
created a interrupt controller node to support 'interrupt-map' DT
|
||||
functionality. The driver will create an IRQ domain for this map, decode
|
||||
the four INTx interrupts in ISR and route them to this domain.
|
||||
|
||||
|
||||
Example:
|
||||
++++++++
|
||||
|
||||
pci_express: axi-pcie@50000000 {
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "xlnx,axi-pcie-host-1.00.a";
|
||||
reg = < 0x50000000 0x10000000 >;
|
||||
device_type = "pci";
|
||||
interrupts = < 0 52 4 >;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc 1>,
|
||||
<0 0 0 2 &pcie_intc 2>,
|
||||
<0 0 0 3 &pcie_intc 3>,
|
||||
<0 0 0 4 &pcie_intc 4>;
|
||||
ranges = < 0x02000000 0 0x60000000 0x60000000 0 0x10000000 >;
|
||||
|
||||
pcie_intc: interrupt-controller {
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <1>;
|
||||
}
|
||||
};
|
@ -264,8 +264,10 @@ IIO
|
||||
IO region
|
||||
devm_release_mem_region()
|
||||
devm_release_region()
|
||||
devm_release_resource()
|
||||
devm_request_mem_region()
|
||||
devm_request_region()
|
||||
devm_request_resource()
|
||||
|
||||
IOMAP
|
||||
devm_ioport_map()
|
||||
|
@ -6870,12 +6870,19 @@ F: arch/x86/kernel/quirks.c
|
||||
|
||||
PCI DRIVER FOR IMX6
|
||||
M: Richard Zhu <r65037@freescale.com>
|
||||
M: Shawn Guo <shawn.guo@freescale.com>
|
||||
M: Lucas Stach <l.stach@pengutronix.de>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/pci/host/*imx6*
|
||||
|
||||
PCI DRIVER FOR TI KEYSTONE
|
||||
M: Murali Karicheri <m-karicheri2@ti.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/pci/host/*keystone*
|
||||
|
||||
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
|
||||
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
|
@ -63,4 +63,23 @@ config PCIE_SPEAR13XX
|
||||
help
|
||||
Say Y here if you want PCIe support on SPEAr13XX SoCs.
|
||||
|
||||
|
||||
config PCI_KEYSTONE
|
||||
bool "TI Keystone PCIe controller"
|
||||
depends on ARCH_KEYSTONE
|
||||
select PCIE_DW
|
||||
select PCIEPORTBUS
|
||||
help
|
||||
Say Y here if you want to enable PCI controller support on Keystone
|
||||
SoCs. The PCI controller on Keystone is based on Designware hardware
|
||||
and therefore the driver re-uses the Designware core functions to
|
||||
implement the driver.
|
||||
|
||||
config PCIE_XILINX
|
||||
bool "Xilinx AXI PCIe host bridge support"
|
||||
depends on ARCH_ZYNQ
|
||||
help
|
||||
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
|
||||
Host Bridge driver.
|
||||
|
||||
endmenu
|
||||
|
@ -8,3 +8,5 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
|
||||
obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
|
||||
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
|
||||
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
|
||||
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
|
||||
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
|
||||
|
@ -49,6 +49,9 @@ struct imx6_pcie {
|
||||
|
||||
/* PCIe Port Logic registers (memory-mapped) */
|
||||
#define PL_OFFSET 0x700
|
||||
#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
|
||||
#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
|
||||
#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
|
||||
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
|
||||
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
|
||||
#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
|
||||
@ -214,6 +217,32 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
|
||||
static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
|
||||
{
|
||||
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
|
||||
u32 val, gpr1, gpr12;
|
||||
|
||||
/*
|
||||
* If the bootloader already enabled the link we need some special
|
||||
* handling to get the core back into a state where it is safe to
|
||||
* touch it for configuration. As there is no dedicated reset signal
|
||||
* wired up for MX6QDL, we need to manually force LTSSM into "detect"
|
||||
* state before completely disabling LTSSM, which is a prerequisite
|
||||
* for core configuration.
|
||||
*
|
||||
* If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
|
||||
* indication that the bootloader activated the link.
|
||||
*/
|
||||
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
|
||||
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
|
||||
|
||||
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
|
||||
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
|
||||
val = readl(pp->dbi_base + PCIE_PL_PFLR);
|
||||
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
|
||||
val |= PCIE_PL_PFLR_FORCE_LINK;
|
||||
writel(val, pp->dbi_base + PCIE_PL_PFLR);
|
||||
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
|
||||
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
|
||||
}
|
||||
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
|
||||
IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
|
||||
@ -589,6 +618,14 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx6_pcie_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
|
||||
|
||||
/* bring down link, so bootloader gets clean state in case of reboot */
|
||||
imx6_pcie_assert_core_reset(&imx6_pcie->pp);
|
||||
}
|
||||
|
||||
static const struct of_device_id imx6_pcie_of_match[] = {
|
||||
{ .compatible = "fsl,imx6q-pcie", },
|
||||
{},
|
||||
@ -601,6 +638,7 @@ static struct platform_driver imx6_pcie_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = imx6_pcie_of_match,
|
||||
},
|
||||
.shutdown = imx6_pcie_shutdown,
|
||||
};
|
||||
|
||||
/* Freescale PCIe driver does not allow module unload */
|
||||
@ -609,7 +647,7 @@ static int __init imx6_pcie_init(void)
|
||||
{
|
||||
return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
|
||||
}
|
||||
fs_initcall(imx6_pcie_init);
|
||||
module_init(imx6_pcie_init);
|
||||
|
||||
MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
|
||||
MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
|
||||
|
516
drivers/pci/host/pci-keystone-dw.c
Normal file
516
drivers/pci/host/pci-keystone-dw.c
Normal file
@ -0,0 +1,516 @@
|
||||
/*
|
||||
* Designware application register space functions for Keystone PCI controller
|
||||
*
|
||||
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
|
||||
* http://www.ti.com
|
||||
*
|
||||
* Author: Murali Karicheri <m-karicheri2@ti.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
#include "pci-keystone.h"
|
||||
|
||||
/* Application register defines */
|
||||
#define LTSSM_EN_VAL 1
|
||||
#define LTSSM_STATE_MASK 0x1f
|
||||
#define LTSSM_STATE_L0 0x11
|
||||
#define DBI_CS2_EN_VAL 0x20
|
||||
#define OB_XLAT_EN_VAL 2
|
||||
|
||||
/* Application registers */
|
||||
#define CMD_STATUS 0x004
|
||||
#define CFG_SETUP 0x008
|
||||
#define OB_SIZE 0x030
|
||||
#define CFG_PCIM_WIN_SZ_IDX 3
|
||||
#define CFG_PCIM_WIN_CNT 32
|
||||
#define SPACE0_REMOTE_CFG_OFFSET 0x1000
|
||||
#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
|
||||
#define OB_OFFSET_HI(n) (0x204 + (8 * n))
|
||||
|
||||
/* IRQ register defines */
|
||||
#define IRQ_EOI 0x050
|
||||
#define IRQ_STATUS 0x184
|
||||
#define IRQ_ENABLE_SET 0x188
|
||||
#define IRQ_ENABLE_CLR 0x18c
|
||||
|
||||
#define MSI_IRQ 0x054
|
||||
#define MSI0_IRQ_STATUS 0x104
|
||||
#define MSI0_IRQ_ENABLE_SET 0x108
|
||||
#define MSI0_IRQ_ENABLE_CLR 0x10c
|
||||
#define IRQ_STATUS 0x184
|
||||
#define MSI_IRQ_OFFSET 4
|
||||
|
||||
/* Config space registers */
|
||||
#define DEBUG0 0x728
|
||||
|
||||
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
|
||||
|
||||
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
|
||||
{
|
||||
return sys->private_data;
|
||||
}
|
||||
|
||||
static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
|
||||
u32 *bit_pos)
|
||||
{
|
||||
*reg_offset = offset % 8;
|
||||
*bit_pos = offset >> 3;
|
||||
}
|
||||
|
||||
u32 ks_dw_pcie_get_msi_data(struct pcie_port *pp)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
|
||||
return ks_pcie->app.start + MSI_IRQ;
|
||||
}
|
||||
|
||||
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
u32 pending, vector;
|
||||
int src, virq;
|
||||
|
||||
pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
|
||||
|
||||
/*
|
||||
* MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
|
||||
* shows 1, 9, 17, 25 and so forth
|
||||
*/
|
||||
for (src = 0; src < 4; src++) {
|
||||
if (BIT(src) & pending) {
|
||||
vector = offset + (src << 3);
|
||||
virq = irq_linear_revmap(pp->irq_domain, vector);
|
||||
dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
|
||||
src, vector, virq);
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
|
||||
{
|
||||
u32 offset, reg_offset, bit_pos;
|
||||
struct keystone_pcie *ks_pcie;
|
||||
unsigned int irq = d->irq;
|
||||
struct msi_desc *msi;
|
||||
struct pcie_port *pp;
|
||||
|
||||
msi = irq_get_msi_desc(irq);
|
||||
pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
ks_pcie = to_keystone_pcie(pp);
|
||||
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
update_reg_offset_bit_pos(offset, ®_offset, &bit_pos);
|
||||
|
||||
writel(BIT(bit_pos),
|
||||
ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
|
||||
writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
|
||||
}
|
||||
|
||||
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
|
||||
{
|
||||
u32 reg_offset, bit_pos;
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
|
||||
update_reg_offset_bit_pos(irq, ®_offset, &bit_pos);
|
||||
writel(BIT(bit_pos),
|
||||
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
|
||||
}
|
||||
|
||||
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
|
||||
{
|
||||
u32 reg_offset, bit_pos;
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
|
||||
update_reg_offset_bit_pos(irq, ®_offset, &bit_pos);
|
||||
writel(BIT(bit_pos),
|
||||
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
|
||||
}
|
||||
|
||||
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie;
|
||||
unsigned int irq = d->irq;
|
||||
struct msi_desc *msi;
|
||||
struct pcie_port *pp;
|
||||
u32 offset;
|
||||
|
||||
msi = irq_get_msi_desc(irq);
|
||||
pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
ks_pcie = to_keystone_pcie(pp);
|
||||
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
|
||||
/* Mask the end point if PVM implemented */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
if (msi->msi_attrib.maskbit)
|
||||
mask_msi_irq(d);
|
||||
}
|
||||
|
||||
ks_dw_pcie_msi_clear_irq(pp, offset);
|
||||
}
|
||||
|
||||
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie;
|
||||
unsigned int irq = d->irq;
|
||||
struct msi_desc *msi;
|
||||
struct pcie_port *pp;
|
||||
u32 offset;
|
||||
|
||||
msi = irq_get_msi_desc(irq);
|
||||
pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
ks_pcie = to_keystone_pcie(pp);
|
||||
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
|
||||
/* Mask the end point if PVM implemented */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
if (msi->msi_attrib.maskbit)
|
||||
unmask_msi_irq(d);
|
||||
}
|
||||
|
||||
ks_dw_pcie_msi_set_irq(pp, offset);
|
||||
}
|
||||
|
||||
static struct irq_chip ks_dw_pcie_msi_irq_chip = {
|
||||
.name = "Keystone-PCIe-MSI-IRQ",
|
||||
.irq_ack = ks_dw_pcie_msi_irq_ack,
|
||||
.irq_mask = ks_dw_pcie_msi_irq_mask,
|
||||
.irq_unmask = ks_dw_pcie_msi_irq_unmask,
|
||||
};
|
||||
|
||||
static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
|
||||
handle_level_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
|
||||
.map = ks_dw_pcie_msi_map,
|
||||
};
|
||||
|
||||
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
int i;
|
||||
|
||||
pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
|
||||
MAX_MSI_IRQS,
|
||||
&ks_dw_pcie_msi_domain_ops,
|
||||
chip);
|
||||
if (!pp->irq_domain) {
|
||||
dev_err(pp->dev, "irq domain init failed\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_MSI_IRQS; i++)
|
||||
irq_create_mapping(pp->irq_domain, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_LEGACY_IRQS; i++)
|
||||
writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
|
||||
}
|
||||
|
||||
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
u32 pending;
|
||||
int virq;
|
||||
|
||||
pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
|
||||
|
||||
if (BIT(0) & pending) {
|
||||
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
|
||||
dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
|
||||
virq);
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
|
||||
/* EOI the INTx interrupt */
|
||||
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
|
||||
}
|
||||
|
||||
static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
|
||||
{
|
||||
}
|
||||
|
||||
static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
|
||||
{
|
||||
}
|
||||
|
||||
static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
|
||||
{
|
||||
}
|
||||
|
||||
static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
|
||||
.name = "Keystone-PCI-Legacy-IRQ",
|
||||
.irq_ack = ks_dw_pcie_ack_legacy_irq,
|
||||
.irq_mask = ks_dw_pcie_mask_legacy_irq,
|
||||
.irq_unmask = ks_dw_pcie_unmask_legacy_irq,
|
||||
};
|
||||
|
||||
static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
|
||||
unsigned int irq, irq_hw_number_t hw_irq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
|
||||
handle_level_irq);
|
||||
irq_set_chip_data(irq, d->host_data);
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
|
||||
.map = ks_dw_pcie_init_legacy_irq_map,
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
|
||||
/**
|
||||
* ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
|
||||
* registers
|
||||
*
|
||||
* Since modification of dbi_cs2 involves different clock domain, read the
|
||||
* status back to ensure the transition is complete.
|
||||
*/
|
||||
static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
|
||||
reg_virt + CMD_STATUS);
|
||||
|
||||
do {
|
||||
val = readl(reg_virt + CMD_STATUS);
|
||||
} while (!(val & DBI_CS2_EN_VAL));
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
|
||||
*
|
||||
* Since modification of dbi_cs2 involves different clock domain, read the
|
||||
* status back to ensure the transition is complete.
|
||||
*/
|
||||
static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
|
||||
reg_virt + CMD_STATUS);
|
||||
|
||||
do {
|
||||
val = readl(reg_virt + CMD_STATUS);
|
||||
} while (val & DBI_CS2_EN_VAL);
|
||||
}
|
||||
|
||||
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
u32 start = pp->mem.start, end = pp->mem.end;
|
||||
int i, tr_size;
|
||||
|
||||
/* Disable BARs for inbound access */
|
||||
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
|
||||
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
|
||||
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
|
||||
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
|
||||
|
||||
/* Set outbound translation size per window division */
|
||||
writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
|
||||
|
||||
tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
|
||||
|
||||
/* Using Direct 1:1 mapping of RC <-> PCI memory space */
|
||||
for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
|
||||
writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
|
||||
writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
|
||||
start += tr_size;
|
||||
}
|
||||
|
||||
/* Enable OB translation */
|
||||
writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
|
||||
ks_pcie->va_app_base + CMD_STATUS);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_pcie_cfg_setup() - Set up configuration space address for a device
|
||||
*
|
||||
* @ks_pcie: ptr to keystone_pcie structure
|
||||
* @bus: Bus number the device is residing on
|
||||
* @devfn: device, function number info
|
||||
*
|
||||
* Forms and returns the address of configuration space mapped in PCIESS
|
||||
* address space 0. Also configures CFG_SETUP for remote configuration space
|
||||
* access.
|
||||
*
|
||||
* The address space has two regions to access configuration - local and remote.
|
||||
* We access local region for bus 0 (as RC is attached on bus 0) and remote
|
||||
* region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
|
||||
* we will do TYPE 0 access as it will be on our secondary bus (logical).
|
||||
* CFG_SETUP is needed only for remote configuration access.
|
||||
*/
|
||||
static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
|
||||
unsigned int devfn)
|
||||
{
|
||||
u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
u32 regval;
|
||||
|
||||
if (bus == 0)
|
||||
return pp->dbi_base;
|
||||
|
||||
regval = (bus << 16) | (device << 8) | function;
|
||||
|
||||
/*
|
||||
* Since Bus#1 will be a virtual bus, we need to have TYPE0
|
||||
* access only.
|
||||
* TYPE 1
|
||||
*/
|
||||
if (bus != 1)
|
||||
regval |= BIT(24);
|
||||
|
||||
writel(regval, ks_pcie->va_app_base + CFG_SETUP);
|
||||
return pp->va_cfg0_base;
|
||||
}
|
||||
|
||||
int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
||||
unsigned int devfn, int where, int size, u32 *val)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
u8 bus_num = bus->number;
|
||||
void __iomem *addr;
|
||||
|
||||
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
|
||||
|
||||
return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
|
||||
}
|
||||
|
||||
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
||||
unsigned int devfn, int where, int size, u32 val)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
u8 bus_num = bus->number;
|
||||
void __iomem *addr;
|
||||
|
||||
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
|
||||
|
||||
return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
|
||||
*
|
||||
* This sets BAR0 to enable inbound access for MSI_IRQ register
|
||||
*/
|
||||
void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
|
||||
/* Configure and set up BAR0 */
|
||||
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
|
||||
|
||||
/* Enable BAR0 */
|
||||
writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
|
||||
writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
|
||||
|
||||
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
|
||||
|
||||
/*
|
||||
* For BAR0, just setting bus address for inbound writes (MSI) should
|
||||
* be sufficient. Use physical address to avoid any conflicts.
|
||||
*/
|
||||
writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_dw_pcie_link_up() - Check if link up
|
||||
*/
|
||||
int ks_dw_pcie_link_up(struct pcie_port *pp)
|
||||
{
|
||||
u32 val = readl(pp->dbi_base + DEBUG0);
|
||||
|
||||
return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
|
||||
}
|
||||
|
||||
void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Disable Link training */
|
||||
val = readl(ks_pcie->va_app_base + CMD_STATUS);
|
||||
val &= ~LTSSM_EN_VAL;
|
||||
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
|
||||
|
||||
/* Initiate Link Training */
|
||||
val = readl(ks_pcie->va_app_base + CMD_STATUS);
|
||||
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
|
||||
*
|
||||
* Ioremap the register resources, initialize legacy irq domain
|
||||
* and call dw_pcie_v3_65_host_init() API to initialize the Keystone
|
||||
* PCI host controller.
|
||||
*/
|
||||
int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
|
||||
struct device_node *msi_intc_np)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
struct platform_device *pdev = to_platform_device(pp->dev);
|
||||
struct resource *res;
|
||||
|
||||
/* Index 0 is the config reg. space address */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
pp->dbi_base = devm_ioremap_resource(pp->dev, res);
|
||||
if (IS_ERR(pp->dbi_base))
|
||||
return PTR_ERR(pp->dbi_base);
|
||||
|
||||
/*
|
||||
* We set these same and is used in pcie rd/wr_other_conf
|
||||
* functions
|
||||
*/
|
||||
pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
|
||||
pp->va_cfg1_base = pp->va_cfg0_base;
|
||||
|
||||
/* Index 1 is the application reg. space address */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
ks_pcie->app = *res;
|
||||
ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
|
||||
if (IS_ERR(ks_pcie->va_app_base))
|
||||
return PTR_ERR(ks_pcie->va_app_base);
|
||||
|
||||
/* Create legacy IRQ domain */
|
||||
ks_pcie->legacy_irq_domain =
|
||||
irq_domain_add_linear(ks_pcie->legacy_intc_np,
|
||||
MAX_LEGACY_IRQS,
|
||||
&ks_dw_pcie_legacy_irq_domain_ops,
|
||||
NULL);
|
||||
if (!ks_pcie->legacy_irq_domain) {
|
||||
dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return dw_pcie_host_init(pp);
|
||||
}
|
386
drivers/pci/host/pci-keystone.c
Normal file
386
drivers/pci/host/pci-keystone.c
Normal file
@ -0,0 +1,386 @@
|
||||
/*
|
||||
* PCIe host controller driver for Texas Instruments Keystone SoCs
|
||||
*
|
||||
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
|
||||
* http://www.ti.com
|
||||
*
|
||||
* Author: Murali Karicheri <m-karicheri2@ti.com>
|
||||
* Implementation based on pci-exynos.c and pcie-designware.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/signal.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
#include "pci-keystone.h"
|
||||
|
||||
#define DRIVER_NAME "keystone-pcie"
|
||||
|
||||
/* driver specific constants */
|
||||
#define MAX_MSI_HOST_IRQS 8
|
||||
#define MAX_LEGACY_HOST_IRQS 4
|
||||
|
||||
/* RC mode settings masks */
|
||||
#define PCIE_RC_MODE BIT(2)
|
||||
#define PCIE_MODE_MASK (BIT(1) | BIT(2))
|
||||
|
||||
/* DEV_STAT_CTRL */
|
||||
#define PCIE_CAP_BASE 0x70
|
||||
|
||||
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
|
||||
|
||||
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
int count = 200;
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
if (dw_pcie_link_up(pp)) {
|
||||
dev_err(pp->dev, "Link already up\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ks_dw_pcie_initiate_link_train(ks_pcie);
|
||||
/* check if the link is up or not */
|
||||
while (!dw_pcie_link_up(pp)) {
|
||||
usleep_range(100, 1000);
|
||||
if (--count) {
|
||||
ks_dw_pcie_initiate_link_train(ks_pcie);
|
||||
continue;
|
||||
}
|
||||
dev_err(pp->dev, "phy link never came up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
|
||||
u32 offset = irq - ks_pcie->msi_host_irqs[0];
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
|
||||
|
||||
/*
|
||||
* The chained irq handler installation would have replaced normal
|
||||
* interrupt driver handler so we need to take care of mask/unmask and
|
||||
* ack operation.
|
||||
*/
|
||||
chained_irq_enter(chip, desc);
|
||||
ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks_pcie_legacy_irq_handler() - Handle legacy interrupt
|
||||
* @irq: IRQ line for legacy interrupts
|
||||
* @desc: Pointer to irq descriptor
|
||||
*
|
||||
* Traverse through pending legacy interrupts and invoke handler for each. Also
|
||||
* takes care of interrupt controller level mask/ack operation.
|
||||
*/
|
||||
static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
|
||||
|
||||
/*
|
||||
* The chained irq handler installation would have replaced normal
|
||||
* interrupt driver handler so we need to take care of mask/unmask and
|
||||
* ack operation.
|
||||
*/
|
||||
chained_irq_enter(chip, desc);
|
||||
ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
|
||||
char *controller, int *num_irqs)
|
||||
{
|
||||
int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
|
||||
struct device *dev = ks_pcie->pp.dev;
|
||||
struct device_node *np_pcie = dev->of_node, **np_temp;
|
||||
|
||||
if (!strcmp(controller, "msi-interrupt-controller"))
|
||||
legacy = 0;
|
||||
|
||||
if (legacy) {
|
||||
np_temp = &ks_pcie->legacy_intc_np;
|
||||
max_host_irqs = MAX_LEGACY_HOST_IRQS;
|
||||
host_irqs = &ks_pcie->legacy_host_irqs[0];
|
||||
} else {
|
||||
np_temp = &ks_pcie->msi_intc_np;
|
||||
max_host_irqs = MAX_MSI_HOST_IRQS;
|
||||
host_irqs = &ks_pcie->msi_host_irqs[0];
|
||||
}
|
||||
|
||||
/* interrupt controller is in a child node */
|
||||
*np_temp = of_find_node_by_name(np_pcie, controller);
|
||||
if (!(*np_temp)) {
|
||||
dev_err(dev, "Node for %s is absent\n", controller);
|
||||
goto out;
|
||||
}
|
||||
temp = of_irq_count(*np_temp);
|
||||
if (!temp)
|
||||
goto out;
|
||||
if (temp > max_host_irqs)
|
||||
dev_warn(dev, "Too many %s interrupts defined %u\n",
|
||||
(legacy ? "legacy" : "MSI"), temp);
|
||||
|
||||
/*
|
||||
* support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
|
||||
* 7 (MSI)
|
||||
*/
|
||||
for (temp = 0; temp < max_host_irqs; temp++) {
|
||||
host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
|
||||
if (host_irqs[temp] < 0)
|
||||
break;
|
||||
}
|
||||
if (temp) {
|
||||
*num_irqs = temp;
|
||||
ret = 0;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Legacy IRQ */
|
||||
for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
|
||||
irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
|
||||
irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
|
||||
ks_pcie_legacy_irq_handler);
|
||||
}
|
||||
ks_dw_pcie_enable_legacy_irqs(ks_pcie);
|
||||
|
||||
/* MSI IRQ */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
|
||||
irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
|
||||
ks_pcie_msi_irq_handler);
|
||||
irq_set_handler_data(ks_pcie->msi_host_irqs[i],
|
||||
ks_pcie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* When a PCI device does not exist during config cycles, keystone host gets a
|
||||
* bus error instead of returning 0xffffffff. This handler always returns 0
|
||||
* for this kind of faults.
|
||||
*/
|
||||
static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long instr = *(unsigned long *) instruction_pointer(regs);
|
||||
|
||||
if ((instr & 0x0e100090) == 0x00100090) {
|
||||
int reg = (instr >> 12) & 15;
|
||||
|
||||
regs->uregs[reg] = -1;
|
||||
regs->ARM_pc += 4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init ks_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
u32 vendor_device_id, val;
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
|
||||
|
||||
ks_pcie_establish_link(ks_pcie);
|
||||
ks_dw_pcie_setup_rc_app_regs(ks_pcie);
|
||||
ks_pcie_setup_interrupts(ks_pcie);
|
||||
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
|
||||
pp->dbi_base + PCI_IO_BASE);
|
||||
|
||||
/* update the Vendor ID */
|
||||
vendor_device_id = readl(ks_pcie->va_reg_pciid);
|
||||
writew((vendor_device_id >> 16), pp->dbi_base + PCI_DEVICE_ID);
|
||||
|
||||
/* update the DEV_STAT_CTRL to publish right mrrs */
|
||||
val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
|
||||
val &= ~PCI_EXP_DEVCTL_READRQ;
|
||||
/* set the mrrs to 256 bytes */
|
||||
val |= BIT(12);
|
||||
writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
|
||||
|
||||
/*
|
||||
* PCIe access errors that result into OCP errors are caught by ARM as
|
||||
* "External aborts"
|
||||
*/
|
||||
hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
|
||||
"Asynchronous external abort");
|
||||
}
|
||||
|
||||
static struct pcie_host_ops keystone_pcie_host_ops = {
|
||||
.rd_other_conf = ks_dw_pcie_rd_other_conf,
|
||||
.wr_other_conf = ks_dw_pcie_wr_other_conf,
|
||||
.link_up = ks_dw_pcie_link_up,
|
||||
.host_init = ks_pcie_host_init,
|
||||
.msi_set_irq = ks_dw_pcie_msi_set_irq,
|
||||
.msi_clear_irq = ks_dw_pcie_msi_clear_irq,
|
||||
.get_msi_data = ks_dw_pcie_get_msi_data,
|
||||
.msi_host_init = ks_dw_pcie_msi_host_init,
|
||||
.scan_bus = ks_dw_pcie_v3_65_scan_bus,
|
||||
};
|
||||
|
||||
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
int ret;
|
||||
|
||||
ret = ks_pcie_get_irq_controller_info(ks_pcie,
|
||||
"legacy-interrupt-controller",
|
||||
&ks_pcie->num_legacy_host_irqs);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
ret = ks_pcie_get_irq_controller_info(ks_pcie,
|
||||
"msi-interrupt-controller",
|
||||
&ks_pcie->num_msi_host_irqs);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &keystone_pcie_host_ops;
|
||||
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id ks_pcie_of_match[] = {
|
||||
{
|
||||
.type = "pci",
|
||||
.compatible = "ti,keystone-pcie",
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
|
||||
|
||||
static int __exit ks_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
|
||||
|
||||
clk_disable_unprepare(ks_pcie->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ks_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct keystone_pcie *ks_pcie;
|
||||
struct pcie_port *pp;
|
||||
struct resource *res;
|
||||
void __iomem *reg_p;
|
||||
struct phy *phy;
|
||||
int ret = 0;
|
||||
u32 val;
|
||||
|
||||
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
|
||||
GFP_KERNEL);
|
||||
if (!ks_pcie) {
|
||||
dev_err(dev, "no memory for keystone pcie\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
pp = &ks_pcie->pp;
|
||||
|
||||
/* index 2 is the devcfg register for RC mode settings */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
||||
reg_p = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(reg_p))
|
||||
return PTR_ERR(reg_p);
|
||||
|
||||
/* enable RC mode in devcfg */
|
||||
val = readl(reg_p);
|
||||
val &= ~PCIE_MODE_MASK;
|
||||
val |= PCIE_RC_MODE;
|
||||
writel(val, reg_p);
|
||||
|
||||
/* initialize SerDes Phy if present */
|
||||
phy = devm_phy_get(dev, "pcie-phy");
|
||||
if (!IS_ERR_OR_NULL(phy)) {
|
||||
ret = phy_init(phy);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* index 3 is to read PCI DEVICE_ID */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
|
||||
reg_p = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(reg_p))
|
||||
return PTR_ERR(reg_p);
|
||||
ks_pcie->va_reg_pciid = reg_p;
|
||||
|
||||
pp->dev = dev;
|
||||
platform_set_drvdata(pdev, ks_pcie);
|
||||
ks_pcie->clk = devm_clk_get(dev, "pcie");
|
||||
if (IS_ERR(ks_pcie->clk)) {
|
||||
dev_err(dev, "Failed to get pcie rc clock\n");
|
||||
return PTR_ERR(ks_pcie->clk);
|
||||
}
|
||||
ret = clk_prepare_enable(ks_pcie->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ks_add_pcie_port(ks_pcie, pdev);
|
||||
if (ret < 0)
|
||||
goto fail_clk;
|
||||
|
||||
return 0;
|
||||
fail_clk:
|
||||
clk_disable_unprepare(ks_pcie->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver ks_pcie_driver __refdata = {
|
||||
.probe = ks_pcie_probe,
|
||||
.remove = __exit_p(ks_pcie_remove),
|
||||
.driver = {
|
||||
.name = "keystone-pcie",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(ks_pcie_of_match),
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(ks_pcie_driver);
|
||||
|
||||
MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
|
||||
MODULE_DESCRIPTION("Keystone PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
58
drivers/pci/host/pci-keystone.h
Normal file
58
drivers/pci/host/pci-keystone.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Keystone PCI Controller's common includes
|
||||
*
|
||||
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
|
||||
* http://www.ti.com
|
||||
*
|
||||
* Author: Murali Karicheri <m-karicheri2@ti.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define MAX_LEGACY_IRQS 4
|
||||
#define MAX_MSI_HOST_IRQS 8
|
||||
#define MAX_LEGACY_HOST_IRQS 4
|
||||
|
||||
struct keystone_pcie {
|
||||
struct clk *clk;
|
||||
struct pcie_port pp;
|
||||
void __iomem *va_reg_pciid;
|
||||
|
||||
int num_legacy_host_irqs;
|
||||
int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
|
||||
struct device_node *legacy_intc_np;
|
||||
|
||||
int num_msi_host_irqs;
|
||||
int msi_host_irqs[MAX_MSI_HOST_IRQS];
|
||||
struct device_node *msi_intc_np;
|
||||
struct irq_domain *legacy_irq_domain;
|
||||
|
||||
/* Application register space */
|
||||
void __iomem *va_app_base;
|
||||
struct resource app;
|
||||
};
|
||||
|
||||
/* Keystone DW specific MSI controller APIs/definitions */
|
||||
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
|
||||
u32 ks_dw_pcie_get_msi_data(struct pcie_port *pp);
|
||||
|
||||
/* Keystone specific PCI controller APIs */
|
||||
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
|
||||
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
|
||||
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
|
||||
struct device_node *msi_intc_np);
|
||||
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
||||
unsigned int devfn, int where, int size, u32 val);
|
||||
int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
||||
unsigned int devfn, int where, int size, u32 *val);
|
||||
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
|
||||
int ks_dw_pcie_link_up(struct pcie_port *pp);
|
||||
void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
|
||||
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
|
||||
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
|
||||
void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
|
||||
int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
|
||||
struct msi_chip *chip);
|
@ -253,6 +253,7 @@ struct tegra_pcie {
|
||||
struct list_head buses;
|
||||
struct resource *cs;
|
||||
|
||||
struct resource all;
|
||||
struct resource io;
|
||||
struct resource mem;
|
||||
struct resource prefetch;
|
||||
@ -626,6 +627,15 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
|
||||
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
|
||||
{
|
||||
struct tegra_pcie *pcie = sys_to_pcie(sys);
|
||||
int err;
|
||||
|
||||
err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
|
||||
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
|
||||
@ -1170,8 +1180,10 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
|
||||
return hwirq;
|
||||
|
||||
irq = irq_create_mapping(msi->domain, hwirq);
|
||||
if (!irq)
|
||||
if (!irq) {
|
||||
tegra_msi_free(msi, hwirq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irq_set_msi_desc(irq, desc);
|
||||
|
||||
@ -1189,8 +1201,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
|
||||
{
|
||||
struct tegra_msi *msi = to_tegra_msi(chip);
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
tegra_msi_free(msi, d->hwirq);
|
||||
irq_dispose_mapping(irq);
|
||||
tegra_msi_free(msi, hwirq);
|
||||
}
|
||||
|
||||
static struct irq_chip tegra_msi_irq_chip = {
|
||||
@ -1514,6 +1528,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
||||
struct resource res;
|
||||
int err;
|
||||
|
||||
memset(&pcie->all, 0, sizeof(pcie->all));
|
||||
pcie->all.flags = IORESOURCE_MEM;
|
||||
pcie->all.name = np->full_name;
|
||||
pcie->all.start = ~0;
|
||||
pcie->all.end = 0;
|
||||
|
||||
if (of_pci_range_parser_init(&parser, np)) {
|
||||
dev_err(pcie->dev, "missing \"ranges\" property\n");
|
||||
return -EINVAL;
|
||||
@ -1525,21 +1545,31 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
||||
switch (res.flags & IORESOURCE_TYPE_BITS) {
|
||||
case IORESOURCE_IO:
|
||||
memcpy(&pcie->io, &res, sizeof(res));
|
||||
pcie->io.name = "I/O";
|
||||
pcie->io.name = np->full_name;
|
||||
break;
|
||||
|
||||
case IORESOURCE_MEM:
|
||||
if (res.flags & IORESOURCE_PREFETCH) {
|
||||
memcpy(&pcie->prefetch, &res, sizeof(res));
|
||||
pcie->prefetch.name = "PREFETCH";
|
||||
pcie->prefetch.name = "prefetchable";
|
||||
} else {
|
||||
memcpy(&pcie->mem, &res, sizeof(res));
|
||||
pcie->mem.name = "MEM";
|
||||
pcie->mem.name = "non-prefetchable";
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (res.start <= pcie->all.start)
|
||||
pcie->all.start = res.start;
|
||||
|
||||
if (res.end >= pcie->all.end)
|
||||
pcie->all.end = res.end;
|
||||
}
|
||||
|
||||
err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = of_pci_parse_bus_range(np, &pcie->busn);
|
||||
if (err < 0) {
|
||||
dev_err(pcie->dev, "failed to parse ranges property: %d\n",
|
||||
|
@ -73,6 +73,8 @@ static unsigned long global_io_offset;
|
||||
|
||||
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
|
||||
{
|
||||
BUG_ON(!sys->private_data);
|
||||
|
||||
return sys->private_data;
|
||||
}
|
||||
|
||||
@ -261,11 +263,6 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
|
||||
int irq, pos0, pos1, i;
|
||||
struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
|
||||
|
||||
if (!pp) {
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pos0 = find_first_zero_bit(pp->msi_irq_in_use,
|
||||
MAX_MSI_IRQS);
|
||||
if (pos0 % no_irqs) {
|
||||
@ -326,10 +323,6 @@ static void clear_irq(unsigned int irq)
|
||||
/* get the port structure */
|
||||
msi = irq_data_get_msi(data);
|
||||
pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
if (!pp) {
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
/* undo what was done in assign_irq */
|
||||
pos = data->hwirq;
|
||||
@ -350,11 +343,6 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
|
||||
struct msi_msg msg;
|
||||
struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
|
||||
|
||||
if (!pp) {
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
|
||||
&msg_ctr);
|
||||
msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
|
||||
@ -425,7 +413,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
||||
struct resource *cfg_res;
|
||||
u32 val, na, ns;
|
||||
const __be32 *addrp;
|
||||
int i, index;
|
||||
int i, index, ret;
|
||||
|
||||
/* Find the address cell size and the number of cells in order to get
|
||||
* the untranslated address.
|
||||
@ -500,6 +488,16 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
||||
}
|
||||
}
|
||||
|
||||
ret = of_pci_parse_bus_range(np, &pp->busn);
|
||||
if (ret < 0) {
|
||||
pp->busn.name = np->name;
|
||||
pp->busn.start = 0;
|
||||
pp->busn.end = 0xff;
|
||||
pp->busn.flags = IORESOURCE_BUS;
|
||||
dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
|
||||
ret, &pp->busn);
|
||||
}
|
||||
|
||||
if (!pp->dbi_base) {
|
||||
pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
|
||||
resource_size(&pp->cfg));
|
||||
@ -511,17 +509,24 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
||||
|
||||
pp->mem_base = pp->mem.start;
|
||||
|
||||
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
|
||||
pp->config.cfg0_size);
|
||||
if (!pp->va_cfg0_base) {
|
||||
dev_err(pp->dev, "error with ioremap in function\n");
|
||||
return -ENOMEM;
|
||||
pp->cfg0_base = pp->cfg.start;
|
||||
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
|
||||
pp->config.cfg0_size);
|
||||
if (!pp->va_cfg0_base) {
|
||||
dev_err(pp->dev, "error with ioremap in function\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
|
||||
pp->config.cfg1_size);
|
||||
|
||||
if (!pp->va_cfg1_base) {
|
||||
dev_err(pp->dev, "error with ioremap\n");
|
||||
return -ENOMEM;
|
||||
pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
|
||||
pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
|
||||
pp->config.cfg1_size);
|
||||
if (!pp->va_cfg1_base) {
|
||||
dev_err(pp->dev, "error with ioremap\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
|
||||
@ -530,16 +535,22 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
|
||||
MAX_MSI_IRQS, &msi_domain_ops,
|
||||
&dw_pcie_msi_chip);
|
||||
if (!pp->irq_domain) {
|
||||
dev_err(pp->dev, "irq domain init failed\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
if (!pp->ops->msi_host_init) {
|
||||
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
|
||||
MAX_MSI_IRQS, &msi_domain_ops,
|
||||
&dw_pcie_msi_chip);
|
||||
if (!pp->irq_domain) {
|
||||
dev_err(pp->dev, "irq domain init failed\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_MSI_IRQS; i++)
|
||||
irq_create_mapping(pp->irq_domain, i);
|
||||
for (i = 0; i < MAX_MSI_IRQS; i++)
|
||||
irq_create_mapping(pp->irq_domain, i);
|
||||
} else {
|
||||
ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (pp->ops->host_init)
|
||||
@ -558,7 +569,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
|
||||
dw_pci.private_data = (void **)&pp;
|
||||
|
||||
pci_common_init_dev(pp->dev, &dw_pci);
|
||||
pci_assign_unassigned_resources();
|
||||
#ifdef CONFIG_PCI_DOMAINS
|
||||
dw_pci.domain++;
|
||||
#endif
|
||||
@ -707,11 +717,6 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
|
||||
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
|
||||
int ret;
|
||||
|
||||
if (!pp) {
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
|
||||
*val = 0xffffffff;
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
@ -736,11 +741,6 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
||||
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
|
||||
int ret;
|
||||
|
||||
if (!pp) {
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
|
||||
@ -768,9 +768,6 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
|
||||
|
||||
pp = sys_to_pcie(sys);
|
||||
|
||||
if (!pp)
|
||||
return 0;
|
||||
|
||||
if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
|
||||
sys->io_offset = global_io_offset - pp->config.io_bus_addr;
|
||||
pci_ioremap_io(global_io_offset, pp->io_base);
|
||||
@ -781,6 +778,7 @@ static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
|
||||
|
||||
sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
|
||||
pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
|
||||
pci_add_resource(&sys->resources, &pp->busn);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -790,14 +788,16 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
|
||||
struct pci_bus *bus;
|
||||
struct pcie_port *pp = sys_to_pcie(sys);
|
||||
|
||||
if (pp) {
|
||||
pp->root_bus_nr = sys->busnr;
|
||||
bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
|
||||
sys, &sys->resources);
|
||||
} else {
|
||||
bus = NULL;
|
||||
BUG();
|
||||
}
|
||||
pp->root_bus_nr = sys->busnr;
|
||||
bus = pci_create_root_bus(pp->dev, sys->busnr,
|
||||
&dw_pcie_ops, sys, &sys->resources);
|
||||
if (!bus)
|
||||
return NULL;
|
||||
|
||||
pci_scan_child_bus(bus);
|
||||
|
||||
if (bus && pp->ops->scan_bus)
|
||||
pp->ops->scan_bus(pp);
|
||||
|
||||
return bus;
|
||||
}
|
||||
|
@ -48,6 +48,7 @@ struct pcie_port {
|
||||
struct resource cfg;
|
||||
struct resource io;
|
||||
struct resource mem;
|
||||
struct resource busn;
|
||||
struct pcie_port_info config;
|
||||
int irq;
|
||||
u32 lanes;
|
||||
@ -74,6 +75,8 @@ struct pcie_host_ops {
|
||||
void (*msi_set_irq)(struct pcie_port *pp, int irq);
|
||||
void (*msi_clear_irq)(struct pcie_port *pp, int irq);
|
||||
u32 (*get_msi_data)(struct pcie_port *pp);
|
||||
void (*scan_bus)(struct pcie_port *pp);
|
||||
int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
|
||||
};
|
||||
|
||||
int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
|
||||
|
970
drivers/pci/host/pcie-xilinx.c
Normal file
970
drivers/pci/host/pcie-xilinx.c
Normal file
@ -0,0 +1,970 @@
|
||||
/*
|
||||
* PCIe host controller driver for Xilinx AXI PCIe Bridge
|
||||
*
|
||||
* Copyright (c) 2012 - 2014 Xilinx, Inc.
|
||||
*
|
||||
* Based on the Tegra PCIe driver
|
||||
*
|
||||
* Bits taken from Synopsys Designware Host controller driver and
|
||||
* ARM PCI Host generic driver.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/* Register definitions */
|
||||
#define XILINX_PCIE_REG_BIR 0x00000130
|
||||
#define XILINX_PCIE_REG_IDR 0x00000138
|
||||
#define XILINX_PCIE_REG_IMR 0x0000013c
|
||||
#define XILINX_PCIE_REG_PSCR 0x00000144
|
||||
#define XILINX_PCIE_REG_RPSC 0x00000148
|
||||
#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
|
||||
#define XILINX_PCIE_REG_MSIBASE2 0x00000150
|
||||
#define XILINX_PCIE_REG_RPEFR 0x00000154
|
||||
#define XILINX_PCIE_REG_RPIFR1 0x00000158
|
||||
#define XILINX_PCIE_REG_RPIFR2 0x0000015c
|
||||
|
||||
/* Interrupt registers definitions */
|
||||
#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
|
||||
#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
|
||||
#define XILINX_PCIE_INTR_STR_ERR BIT(2)
|
||||
#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
|
||||
#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
|
||||
#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
|
||||
#define XILINX_PCIE_INTR_NONFATAL BIT(10)
|
||||
#define XILINX_PCIE_INTR_FATAL BIT(11)
|
||||
#define XILINX_PCIE_INTR_INTX BIT(16)
|
||||
#define XILINX_PCIE_INTR_MSI BIT(17)
|
||||
#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
|
||||
#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
|
||||
#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
|
||||
#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
|
||||
#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
|
||||
#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
|
||||
#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
|
||||
#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
|
||||
#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
|
||||
#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
|
||||
#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
|
||||
|
||||
/* Root Port Error FIFO Read Register definitions */
|
||||
#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
|
||||
#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
|
||||
#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
|
||||
|
||||
/* Root Port Interrupt FIFO Read Register 1 definitions */
|
||||
#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
|
||||
#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
|
||||
#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
|
||||
#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
|
||||
#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
|
||||
|
||||
/* Bridge Info Register definitions */
|
||||
#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
|
||||
#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
|
||||
|
||||
/* Root Port Interrupt FIFO Read Register 2 definitions */
|
||||
#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
|
||||
|
||||
/* Root Port Status/control Register definitions */
|
||||
#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
|
||||
|
||||
/* Phy Status/Control Register definitions */
|
||||
#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
|
||||
|
||||
/* ECAM definitions */
|
||||
#define ECAM_BUS_NUM_SHIFT 20
|
||||
#define ECAM_DEV_NUM_SHIFT 12
|
||||
|
||||
/* Number of MSI IRQs */
|
||||
#define XILINX_NUM_MSI_IRQS 128
|
||||
|
||||
/* Number of Memory Resources */
|
||||
#define XILINX_MAX_NUM_RESOURCES 3
|
||||
|
||||
/**
|
||||
* struct xilinx_pcie_port - PCIe port information
|
||||
* @reg_base: IO Mapped Register Base
|
||||
* @irq: Interrupt number
|
||||
* @msi_pages: MSI pages
|
||||
* @root_busno: Root Bus number
|
||||
* @dev: Device pointer
|
||||
* @irq_domain: IRQ domain pointer
|
||||
* @bus_range: Bus range
|
||||
* @resources: Bus Resources
|
||||
*/
|
||||
struct xilinx_pcie_port {
|
||||
void __iomem *reg_base;
|
||||
u32 irq;
|
||||
unsigned long msi_pages;
|
||||
u8 root_busno;
|
||||
struct device *dev;
|
||||
struct irq_domain *irq_domain;
|
||||
struct resource bus_range;
|
||||
struct list_head resources;
|
||||
};
|
||||
|
||||
static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
|
||||
|
||||
static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
|
||||
{
|
||||
return sys->private_data;
|
||||
}
|
||||
|
||||
static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
|
||||
{
|
||||
return readl(port->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
|
||||
{
|
||||
writel(val, port->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
|
||||
{
|
||||
return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
|
||||
XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
|
||||
* @port: PCIe port information
|
||||
*/
|
||||
static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
|
||||
{
|
||||
u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
|
||||
|
||||
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
|
||||
dev_dbg(port->dev, "Requester ID %d\n",
|
||||
val & XILINX_PCIE_RPEFR_REQ_ID);
|
||||
pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
|
||||
XILINX_PCIE_REG_RPEFR);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_valid_device - Check if a valid device is present on bus
|
||||
* @bus: PCI Bus structure
|
||||
* @devfn: device/function
|
||||
*
|
||||
* Return: 'true' on success and 'false' if invalid device is found
|
||||
*/
|
||||
static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
|
||||
{
|
||||
struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
|
||||
|
||||
/* Check if link is up when trying to access downstream ports */
|
||||
if (bus->number != port->root_busno)
|
||||
if (!xilinx_pcie_link_is_up(port))
|
||||
return false;
|
||||
|
||||
/* Only one device down on each root port */
|
||||
if (bus->number == port->root_busno && devfn > 0)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Do not read more than one device on the bus directly attached
|
||||
* to RC.
|
||||
*/
|
||||
if (bus->primary == port->root_busno && devfn > 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_config_base - Get configuration base
|
||||
* @bus: PCI Bus structure
|
||||
* @devfn: Device/function
|
||||
* @where: Offset from base
|
||||
*
|
||||
* Return: Base address of the configuration space needed to be
|
||||
* accessed.
|
||||
*/
|
||||
static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
|
||||
unsigned int devfn, int where)
|
||||
{
|
||||
struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
|
||||
int relbus;
|
||||
|
||||
relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
|
||||
(devfn << ECAM_DEV_NUM_SHIFT);
|
||||
|
||||
return port->reg_base + relbus + where;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_read_config - Read configuration space
|
||||
* @bus: PCI Bus structure
|
||||
* @devfn: Device/function
|
||||
* @where: Offset from base
|
||||
* @size: Byte/word/dword
|
||||
* @val: Value to be read
|
||||
*
|
||||
* Return: PCIBIOS_SUCCESSFUL on success
|
||||
* PCIBIOS_DEVICE_NOT_FOUND on failure
|
||||
*/
|
||||
static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
|
||||
int where, int size, u32 *val)
|
||||
{
|
||||
void __iomem *addr;
|
||||
|
||||
if (!xilinx_pcie_valid_device(bus, devfn)) {
|
||||
*val = 0xFFFFFFFF;
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
}
|
||||
|
||||
addr = xilinx_pcie_config_base(bus, devfn, where);
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
*val = readb(addr);
|
||||
break;
|
||||
case 2:
|
||||
*val = readw(addr);
|
||||
break;
|
||||
default:
|
||||
*val = readl(addr);
|
||||
break;
|
||||
}
|
||||
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_write_config - Write configuration space
|
||||
* @bus: PCI Bus structure
|
||||
* @devfn: Device/function
|
||||
* @where: Offset from base
|
||||
* @size: Byte/word/dword
|
||||
* @val: Value to be written to device
|
||||
*
|
||||
* Return: PCIBIOS_SUCCESSFUL on success
|
||||
* PCIBIOS_DEVICE_NOT_FOUND on failure
|
||||
*/
|
||||
static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
|
||||
int where, int size, u32 val)
|
||||
{
|
||||
void __iomem *addr;
|
||||
|
||||
if (!xilinx_pcie_valid_device(bus, devfn))
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
|
||||
addr = xilinx_pcie_config_base(bus, devfn, where);
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
writeb(val, addr);
|
||||
break;
|
||||
case 2:
|
||||
writew(val, addr);
|
||||
break;
|
||||
default:
|
||||
writel(val, addr);
|
||||
break;
|
||||
}
|
||||
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
/* PCIe operations */
|
||||
static struct pci_ops xilinx_pcie_ops = {
|
||||
.read = xilinx_pcie_read_config,
|
||||
.write = xilinx_pcie_write_config,
|
||||
};
|
||||
|
||||
/* MSI functions */
|
||||
|
||||
/**
|
||||
* xilinx_pcie_destroy_msi - Free MSI number
|
||||
* @irq: IRQ to be freed
|
||||
*/
|
||||
static void xilinx_pcie_destroy_msi(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
struct msi_desc *msi;
|
||||
struct xilinx_pcie_port *port;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
msi = irq_desc_get_msi_desc(desc);
|
||||
port = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
|
||||
if (!test_bit(irq, msi_irq_in_use))
|
||||
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
|
||||
else
|
||||
clear_bit(irq, msi_irq_in_use);
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_assign_msi - Allocate MSI number
|
||||
* @port: PCIe port structure
|
||||
*
|
||||
* Return: A valid IRQ on success and error value on failure.
|
||||
*/
|
||||
static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
|
||||
{
|
||||
int pos;
|
||||
|
||||
pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
|
||||
if (pos < XILINX_NUM_MSI_IRQS)
|
||||
set_bit(pos, msi_irq_in_use);
|
||||
else
|
||||
return -ENOSPC;
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_msi_teardown_irq - Destroy the MSI
|
||||
* @chip: MSI Chip descriptor
|
||||
* @irq: MSI IRQ to destroy
|
||||
*/
|
||||
static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
|
||||
{
|
||||
xilinx_pcie_destroy_msi(irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_msi_setup_irq - Setup MSI request
|
||||
* @chip: MSI chip pointer
|
||||
* @pdev: PCIe device pointer
|
||||
* @desc: MSI descriptor pointer
|
||||
*
|
||||
* Return: '0' on success and error value on failure
|
||||
*/
|
||||
static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
|
||||
struct pci_dev *pdev,
|
||||
struct msi_desc *desc)
|
||||
{
|
||||
struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
|
||||
unsigned int irq;
|
||||
int hwirq;
|
||||
struct msi_msg msg;
|
||||
phys_addr_t msg_addr;
|
||||
|
||||
hwirq = xilinx_pcie_assign_msi(port);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
irq = irq_create_mapping(port->irq_domain, hwirq);
|
||||
if (!irq)
|
||||
return -EINVAL;
|
||||
|
||||
irq_set_msi_desc(irq, desc);
|
||||
|
||||
msg_addr = virt_to_phys((void *)port->msi_pages);
|
||||
|
||||
msg.address_hi = 0;
|
||||
msg.address_lo = msg_addr;
|
||||
msg.data = irq;
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* MSI Chip Descriptor */
|
||||
static struct msi_chip xilinx_pcie_msi_chip = {
|
||||
.setup_irq = xilinx_pcie_msi_setup_irq,
|
||||
.teardown_irq = xilinx_msi_teardown_irq,
|
||||
};
|
||||
|
||||
/* HW Interrupt Chip Descriptor */
|
||||
static struct irq_chip xilinx_msi_irq_chip = {
|
||||
.name = "Xilinx PCIe MSI",
|
||||
.irq_enable = unmask_msi_irq,
|
||||
.irq_disable = mask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
};
|
||||
|
||||
/**
|
||||
* xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
|
||||
* @domain: IRQ domain
|
||||
* @irq: Virtual IRQ number
|
||||
* @hwirq: HW interrupt number
|
||||
*
|
||||
* Return: Always returns 0.
|
||||
*/
|
||||
static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* IRQ Domain operations */
|
||||
static const struct irq_domain_ops msi_domain_ops = {
|
||||
.map = xilinx_pcie_msi_map,
|
||||
};
|
||||
|
||||
/**
|
||||
* xilinx_pcie_enable_msi - Enable MSI support
|
||||
* @port: PCIe port information
|
||||
*/
|
||||
static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
|
||||
{
|
||||
phys_addr_t msg_addr;
|
||||
|
||||
port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
|
||||
msg_addr = virt_to_phys((void *)port->msi_pages);
|
||||
pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
|
||||
pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
|
||||
* @bus: PCIe bus
|
||||
*/
|
||||
static void xilinx_pcie_add_bus(struct pci_bus *bus)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
|
||||
|
||||
xilinx_pcie_msi_chip.dev = port->dev;
|
||||
bus->msi = &xilinx_pcie_msi_chip;
|
||||
}
|
||||
}
|
||||
|
||||
/* INTx Functions */
|
||||
|
||||
/**
|
||||
* xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
|
||||
* @domain: IRQ domain
|
||||
* @irq: Virtual IRQ number
|
||||
* @hwirq: HW interrupt number
|
||||
*
|
||||
* Return: Always returns 0.
|
||||
*/
|
||||
static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* INTx IRQ Domain operations */
|
||||
static const struct irq_domain_ops intx_domain_ops = {
|
||||
.map = xilinx_pcie_intx_map,
|
||||
};
|
||||
|
||||
/* PCIe HW Functions */
|
||||
|
||||
/**
|
||||
* xilinx_pcie_intr_handler - Interrupt Service Handler
|
||||
* @irq: IRQ number
|
||||
* @data: PCIe port information
|
||||
*
|
||||
* Return: IRQ_HANDLED on success and IRQ_NONE on failure
|
||||
*/
|
||||
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
|
||||
{
|
||||
struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
|
||||
u32 val, mask, status, msi_data;
|
||||
|
||||
/* Read interrupt decode and mask registers */
|
||||
val = pcie_read(port, XILINX_PCIE_REG_IDR);
|
||||
mask = pcie_read(port, XILINX_PCIE_REG_IMR);
|
||||
|
||||
status = val & mask;
|
||||
if (!status)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (status & XILINX_PCIE_INTR_LINK_DOWN)
|
||||
dev_warn(port->dev, "Link Down\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_ECRC_ERR)
|
||||
dev_warn(port->dev, "ECRC failed\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_STR_ERR)
|
||||
dev_warn(port->dev, "Streaming error\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_HOT_RESET)
|
||||
dev_info(port->dev, "Hot reset\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
|
||||
dev_warn(port->dev, "ECAM access timeout\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
|
||||
dev_warn(port->dev, "Correctable error message\n");
|
||||
xilinx_pcie_clear_err_interrupts(port);
|
||||
}
|
||||
|
||||
if (status & XILINX_PCIE_INTR_NONFATAL) {
|
||||
dev_warn(port->dev, "Non fatal error message\n");
|
||||
xilinx_pcie_clear_err_interrupts(port);
|
||||
}
|
||||
|
||||
if (status & XILINX_PCIE_INTR_FATAL) {
|
||||
dev_warn(port->dev, "Fatal error message\n");
|
||||
xilinx_pcie_clear_err_interrupts(port);
|
||||
}
|
||||
|
||||
if (status & XILINX_PCIE_INTR_INTX) {
|
||||
/* INTx interrupt received */
|
||||
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
|
||||
|
||||
/* Check whether interrupt valid */
|
||||
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
|
||||
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Clear interrupt FIFO register 1 */
|
||||
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
|
||||
XILINX_PCIE_REG_RPIFR1);
|
||||
|
||||
/* Handle INTx Interrupt */
|
||||
val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
|
||||
XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
|
||||
generic_handle_irq(irq_find_mapping(port->irq_domain, val));
|
||||
}
|
||||
|
||||
if (status & XILINX_PCIE_INTR_MSI) {
|
||||
/* MSI Interrupt */
|
||||
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
|
||||
|
||||
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
|
||||
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
|
||||
msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
|
||||
XILINX_PCIE_RPIFR2_MSG_DATA;
|
||||
|
||||
/* Clear interrupt FIFO register 1 */
|
||||
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
|
||||
XILINX_PCIE_REG_RPIFR1);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
/* Handle MSI Interrupt */
|
||||
generic_handle_irq(msi_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
|
||||
dev_warn(port->dev, "Slave unsupported request\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_SLV_UNEXP)
|
||||
dev_warn(port->dev, "Slave unexpected completion\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_SLV_COMPL)
|
||||
dev_warn(port->dev, "Slave completion timeout\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_SLV_ERRP)
|
||||
dev_warn(port->dev, "Slave Error Poison\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_SLV_CMPABT)
|
||||
dev_warn(port->dev, "Slave Completer Abort\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
|
||||
dev_warn(port->dev, "Slave Illegal Burst\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_MST_DECERR)
|
||||
dev_warn(port->dev, "Master decode error\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_MST_SLVERR)
|
||||
dev_warn(port->dev, "Master slave error\n");
|
||||
|
||||
if (status & XILINX_PCIE_INTR_MST_ERRP)
|
||||
dev_warn(port->dev, "Master error poison\n");
|
||||
|
||||
/* Clear the Interrupt Decode register */
|
||||
pcie_write(port, status, XILINX_PCIE_REG_IDR);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_free_irq_domain - Free IRQ domain
|
||||
* @port: PCIe port information
|
||||
*/
|
||||
static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
|
||||
{
|
||||
int i;
|
||||
u32 irq, num_irqs;
|
||||
|
||||
/* Free IRQ Domain */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
|
||||
free_pages(port->msi_pages, 0);
|
||||
|
||||
num_irqs = XILINX_NUM_MSI_IRQS;
|
||||
} else {
|
||||
/* INTx */
|
||||
num_irqs = 4;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_irqs; i++) {
|
||||
irq = irq_find_mapping(port->irq_domain, i);
|
||||
if (irq > 0)
|
||||
irq_dispose_mapping(irq);
|
||||
}
|
||||
|
||||
irq_domain_remove(port->irq_domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_init_irq_domain - Initialize IRQ domain
|
||||
* @port: PCIe port information
|
||||
*
|
||||
* Return: '0' on success and error value on failure
|
||||
*/
|
||||
static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct device_node *pcie_intc_node;
|
||||
|
||||
/* Setup INTx */
|
||||
pcie_intc_node = of_get_next_child(node, NULL);
|
||||
if (!pcie_intc_node) {
|
||||
dev_err(dev, "No PCIe Intc node found\n");
|
||||
return PTR_ERR(pcie_intc_node);
|
||||
}
|
||||
|
||||
port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
|
||||
&intx_domain_ops,
|
||||
port);
|
||||
if (!port->irq_domain) {
|
||||
dev_err(dev, "Failed to get a INTx IRQ domain\n");
|
||||
return PTR_ERR(port->irq_domain);
|
||||
}
|
||||
|
||||
/* Setup MSI */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
port->irq_domain = irq_domain_add_linear(node,
|
||||
XILINX_NUM_MSI_IRQS,
|
||||
&msi_domain_ops,
|
||||
&xilinx_pcie_msi_chip);
|
||||
if (!port->irq_domain) {
|
||||
dev_err(dev, "Failed to get a MSI IRQ domain\n");
|
||||
return PTR_ERR(port->irq_domain);
|
||||
}
|
||||
|
||||
xilinx_pcie_enable_msi(port);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_init_port - Initialize hardware
|
||||
* @port: PCIe port information
|
||||
*/
|
||||
static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
|
||||
{
|
||||
if (xilinx_pcie_link_is_up(port))
|
||||
dev_info(port->dev, "PCIe Link is UP\n");
|
||||
else
|
||||
dev_info(port->dev, "PCIe Link is DOWN\n");
|
||||
|
||||
/* Disable all interrupts */
|
||||
pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
|
||||
XILINX_PCIE_REG_IMR);
|
||||
|
||||
/* Clear pending interrupts */
|
||||
pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
|
||||
XILINX_PCIE_IMR_ALL_MASK,
|
||||
XILINX_PCIE_REG_IDR);
|
||||
|
||||
/* Enable all interrupts */
|
||||
pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
|
||||
|
||||
/* Enable the Bridge enable bit */
|
||||
pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
|
||||
XILINX_PCIE_REG_RPSC_BEN,
|
||||
XILINX_PCIE_REG_RPSC);
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_setup - Setup memory resources
|
||||
* @nr: Bus number
|
||||
* @sys: Per controller structure
|
||||
*
|
||||
* Return: '1' on success and error value on failure
|
||||
*/
|
||||
static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
|
||||
{
|
||||
struct xilinx_pcie_port *port = sys_to_pcie(sys);
|
||||
|
||||
list_splice_init(&port->resources, &sys->resources);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_scan_bus - Scan PCIe bus for devices
|
||||
* @nr: Bus number
|
||||
* @sys: Per controller structure
|
||||
*
|
||||
* Return: Valid Bus pointer on success and NULL on failure
|
||||
*/
|
||||
static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
|
||||
{
|
||||
struct xilinx_pcie_port *port = sys_to_pcie(sys);
|
||||
struct pci_bus *bus;
|
||||
|
||||
port->root_busno = sys->busnr;
|
||||
bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
|
||||
sys, &sys->resources);
|
||||
|
||||
return bus;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
|
||||
* @port: PCIe port information
|
||||
*
|
||||
* Return: '0' on success and error value on failure
|
||||
*/
|
||||
static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct resource *mem;
|
||||
resource_size_t offset;
|
||||
struct of_pci_range_parser parser;
|
||||
struct of_pci_range range;
|
||||
struct pci_host_bridge_window *win;
|
||||
int err = 0, mem_resno = 0;
|
||||
|
||||
/* Get the ranges */
|
||||
if (of_pci_range_parser_init(&parser, node)) {
|
||||
dev_err(dev, "missing \"ranges\" property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Parse the ranges and add the resources found to the list */
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
|
||||
if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
|
||||
dev_err(dev, "Maximum memory resources exceeded\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
of_pci_range_to_resource(&range, node, mem);
|
||||
|
||||
switch (mem->flags & IORESOURCE_TYPE_BITS) {
|
||||
case IORESOURCE_MEM:
|
||||
offset = range.cpu_addr - range.pci_addr;
|
||||
mem_resno++;
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (err < 0) {
|
||||
dev_warn(dev, "Invalid resource found %pR\n", mem);
|
||||
continue;
|
||||
}
|
||||
|
||||
err = request_resource(&iomem_resource, mem);
|
||||
if (err)
|
||||
goto free_resources;
|
||||
|
||||
pci_add_resource_offset(&port->resources, mem, offset);
|
||||
}
|
||||
|
||||
/* Get the bus range */
|
||||
if (of_pci_parse_bus_range(node, &port->bus_range)) {
|
||||
u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
|
||||
u8 last;
|
||||
|
||||
last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
|
||||
XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
|
||||
|
||||
port->bus_range = (struct resource) {
|
||||
.name = node->name,
|
||||
.start = 0,
|
||||
.end = last,
|
||||
.flags = IORESOURCE_BUS,
|
||||
};
|
||||
}
|
||||
|
||||
/* Register bus resource */
|
||||
pci_add_resource(&port->resources, &port->bus_range);
|
||||
|
||||
return 0;
|
||||
|
||||
free_resources:
|
||||
release_child_resources(&iomem_resource);
|
||||
list_for_each_entry(win, &port->resources, list)
|
||||
devm_kfree(dev, win->res);
|
||||
pci_free_resource_list(&port->resources);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_parse_dt - Parse Device tree
|
||||
* @port: PCIe port information
|
||||
*
|
||||
* Return: '0' on success and error value on failure
|
||||
*/
|
||||
static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct resource regs;
|
||||
const char *type;
|
||||
int err;
|
||||
|
||||
type = of_get_property(node, "device_type", NULL);
|
||||
if (!type || strcmp(type, "pci")) {
|
||||
dev_err(dev, "invalid \"device_type\" %s\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = of_address_to_resource(node, 0, ®s);
|
||||
if (err) {
|
||||
dev_err(dev, "missing \"reg\" property\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
port->reg_base = devm_ioremap_resource(dev, ®s);
|
||||
if (IS_ERR(port->reg_base))
|
||||
return PTR_ERR(port->reg_base);
|
||||
|
||||
port->irq = irq_of_parse_and_map(node, 0);
|
||||
err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
|
||||
IRQF_SHARED, "xilinx-pcie", port);
|
||||
if (err) {
|
||||
dev_err(dev, "unable to request irq %d\n", port->irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_probe - Probe function
|
||||
* @pdev: Platform device pointer
|
||||
*
|
||||
* Return: '0' on success and error value on failure
|
||||
*/
|
||||
static int xilinx_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct xilinx_pcie_port *port;
|
||||
struct hw_pci hw;
|
||||
struct device *dev = &pdev->dev;
|
||||
int err;
|
||||
|
||||
if (!dev->of_node)
|
||||
return -ENODEV;
|
||||
|
||||
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
|
||||
if (!port)
|
||||
return -ENOMEM;
|
||||
|
||||
port->dev = dev;
|
||||
|
||||
err = xilinx_pcie_parse_dt(port);
|
||||
if (err) {
|
||||
dev_err(dev, "Parsing DT failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
xilinx_pcie_init_port(port);
|
||||
|
||||
err = xilinx_pcie_init_irq_domain(port);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed creating IRQ Domain\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse PCI ranges, configuration bus range and
|
||||
* request their resources
|
||||
*/
|
||||
INIT_LIST_HEAD(&port->resources);
|
||||
err = xilinx_pcie_parse_and_add_res(port);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed adding resources\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, port);
|
||||
|
||||
/* Register the device */
|
||||
memset(&hw, 0, sizeof(hw));
|
||||
hw = (struct hw_pci) {
|
||||
.nr_controllers = 1,
|
||||
.private_data = (void **)&port,
|
||||
.setup = xilinx_pcie_setup,
|
||||
.map_irq = of_irq_parse_and_map_pci,
|
||||
.add_bus = xilinx_pcie_add_bus,
|
||||
.scan = xilinx_pcie_scan_bus,
|
||||
.ops = &xilinx_pcie_ops,
|
||||
};
|
||||
pci_common_init_dev(dev, &hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_pcie_remove - Remove function
|
||||
* @pdev: Platform device pointer
|
||||
*
|
||||
* Return: '0' always
|
||||
*/
|
||||
static int xilinx_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
|
||||
|
||||
xilinx_pcie_free_irq_domain(port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id xilinx_pcie_of_match[] = {
|
||||
{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver xilinx_pcie_driver = {
|
||||
.driver = {
|
||||
.name = "xilinx-pcie",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = xilinx_pcie_of_match,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = xilinx_pcie_probe,
|
||||
.remove = xilinx_pcie_remove,
|
||||
};
|
||||
module_platform_driver(xilinx_pcie_driver);
|
||||
|
||||
MODULE_AUTHOR("Xilinx Inc");
|
||||
MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -55,7 +55,6 @@ int pci_add_dynid(struct pci_driver *drv,
|
||||
unsigned long driver_data)
|
||||
{
|
||||
struct pci_dynid *dynid;
|
||||
int retval;
|
||||
|
||||
dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
|
||||
if (!dynid)
|
||||
@ -73,9 +72,7 @@ int pci_add_dynid(struct pci_driver *drv,
|
||||
list_add_tail(&dynid->node, &drv->dynids.list);
|
||||
spin_unlock(&drv->dynids.lock);
|
||||
|
||||
retval = driver_attach(&drv->driver);
|
||||
|
||||
return retval;
|
||||
return driver_attach(&drv->driver);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_add_dynid);
|
||||
|
||||
|
@ -1907,10 +1907,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
|
||||
if (target_state == PCI_POWER_ERROR)
|
||||
return -EIO;
|
||||
|
||||
/* D3cold during system suspend/hibernate is not supported */
|
||||
if (target_state > PCI_D3hot)
|
||||
target_state = PCI_D3hot;
|
||||
|
||||
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
|
||||
|
||||
error = pci_set_power_state(dev, target_state);
|
||||
|
@ -93,77 +93,6 @@ static int pcie_port_resume_noirq(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
struct d3cold_info {
|
||||
bool no_d3cold;
|
||||
unsigned int d3cold_delay;
|
||||
};
|
||||
|
||||
static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct d3cold_info *info = data;
|
||||
|
||||
info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
|
||||
info->d3cold_delay);
|
||||
if (pdev->no_d3cold)
|
||||
info->no_d3cold = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pcie_port_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct d3cold_info d3cold_info = {
|
||||
.no_d3cold = false,
|
||||
.d3cold_delay = PCI_PM_D3_WAIT,
|
||||
};
|
||||
|
||||
/*
|
||||
* If any subordinate device disable D3cold, we should not put
|
||||
* the port into D3cold. The D3cold delay of port should be
|
||||
* the max of that of all subordinate devices.
|
||||
*/
|
||||
pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
|
||||
pdev->no_d3cold = d3cold_info.no_d3cold;
|
||||
pdev->d3cold_delay = d3cold_info.d3cold_delay;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pcie_port_runtime_resume(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
bool *pme_poll = data;
|
||||
|
||||
if (pdev->pme_poll)
|
||||
*pme_poll = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pcie_port_runtime_idle(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
bool pme_poll = false;
|
||||
|
||||
/*
|
||||
* If any subordinate device needs pme poll, we should keep
|
||||
* the port in D0, because we need port in D0 to poll it.
|
||||
*/
|
||||
pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
|
||||
/* Delay for a short while to prevent too frequent suspend/resume */
|
||||
if (!pme_poll)
|
||||
pm_schedule_suspend(dev, 10);
|
||||
return -EBUSY;
|
||||
}
|
||||
#else
|
||||
#define pcie_port_runtime_suspend NULL
|
||||
#define pcie_port_runtime_resume NULL
|
||||
#define pcie_port_runtime_idle NULL
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
|
||||
.suspend = pcie_port_device_suspend,
|
||||
.resume = pcie_port_device_resume,
|
||||
@ -172,9 +101,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
|
||||
.poweroff = pcie_port_device_suspend,
|
||||
.restore = pcie_port_device_resume,
|
||||
.resume_noirq = pcie_port_resume_noirq,
|
||||
.runtime_suspend = pcie_port_runtime_suspend,
|
||||
.runtime_resume = pcie_port_runtime_resume,
|
||||
.runtime_idle = pcie_port_runtime_idle,
|
||||
};
|
||||
|
||||
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
|
||||
|
@ -7,6 +7,8 @@
|
||||
#ifndef _AER_H_
|
||||
#define _AER_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define AER_NONFATAL 0
|
||||
#define AER_FATAL 1
|
||||
#define AER_CORRECTABLE 2
|
||||
|
@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s,
|
||||
|
||||
/* Wrappers for managed devices */
|
||||
struct device;
|
||||
|
||||
extern int devm_request_resource(struct device *dev, struct resource *root,
|
||||
struct resource *new);
|
||||
extern void devm_release_resource(struct device *dev, struct resource *new);
|
||||
|
||||
#define devm_request_region(dev,start,n,name) \
|
||||
__devm_request_region(dev, &ioport_resource, (start), (n), (name))
|
||||
#define devm_request_mem_region(dev,start,n,name) \
|
||||
|
@ -1248,6 +1248,76 @@ int release_mem_region_adjustable(struct resource *parent,
|
||||
/*
|
||||
* Managed region resource
|
||||
*/
|
||||
static void devm_resource_release(struct device *dev, void *ptr)
|
||||
{
|
||||
struct resource **r = ptr;
|
||||
|
||||
release_resource(*r);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_request_resource() - request and reserve an I/O or memory resource
|
||||
* @dev: device for which to request the resource
|
||||
* @root: root of the resource tree from which to request the resource
|
||||
* @new: descriptor of the resource to request
|
||||
*
|
||||
* This is a device-managed version of request_resource(). There is usually
|
||||
* no need to release resources requested by this function explicitly since
|
||||
* that will be taken care of when the device is unbound from its driver.
|
||||
* If for some reason the resource needs to be released explicitly, because
|
||||
* of ordering issues for example, drivers must call devm_release_resource()
|
||||
* rather than the regular release_resource().
|
||||
*
|
||||
* When a conflict is detected between any existing resources and the newly
|
||||
* requested resource, an error message will be printed.
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int devm_request_resource(struct device *dev, struct resource *root,
|
||||
struct resource *new)
|
||||
{
|
||||
struct resource *conflict, **ptr;
|
||||
|
||||
ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
*ptr = new;
|
||||
|
||||
conflict = request_resource_conflict(root, new);
|
||||
if (conflict) {
|
||||
dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
|
||||
new, conflict->name, conflict);
|
||||
devres_free(ptr);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
devres_add(dev, ptr);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_request_resource);
|
||||
|
||||
static int devm_resource_match(struct device *dev, void *res, void *data)
|
||||
{
|
||||
struct resource **ptr = res;
|
||||
|
||||
return *ptr == data;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_release_resource() - release a previously requested resource
|
||||
* @dev: device for which to release the resource
|
||||
* @new: descriptor of the resource to release
|
||||
*
|
||||
* Releases a resource previously requested using devm_request_resource().
|
||||
*/
|
||||
void devm_release_resource(struct device *dev, struct resource *new)
|
||||
{
|
||||
WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
|
||||
new));
|
||||
}
|
||||
EXPORT_SYMBOL(devm_release_resource);
|
||||
|
||||
struct region_devres {
|
||||
struct resource *parent;
|
||||
resource_size_t start;
|
||||
|
Loading…
Reference in New Issue
Block a user