mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
pci-v4.21-changes
-----BEGIN PGP SIGNATURE----- iQJIBAABCgAyFiEEgMe7l+5h9hnxdsnuWYigwDrT+vwFAlwtMCIUHGJoZWxnYWFz QGdvb2dsZS5jb20ACgkQWYigwDrT+vwQUQ/+P5/VDpo4abjudGO2c7FU1bJOwvfN cfV5dvDCw0kpx0Em5SmnpAD7Punllxxvb/04K75lqarGyx/Txqaw+lbIF+qSj6my GsQ16Iy8T48x5hr+Pf6vTh1eE+NaAVZfOPDOt7CyTNAgwfzHeVNyfNvz7pfKTIIJ Mk/jRE4kkeWo60jsY5p3sFo3OVOxBOsRdN+2sruaQuWFXrKHLyNDR+7Z9ZPxubFk cCO/TYPhNXmmKhCAR4V/rGiqz9OL2wyFixGhGhmD3tnC9nAb/wTMzjARsyBopBPi b/KkR2eLFEyXN0HJrwqxiURo4J3nveAYEuNXH5KjRBQZnoBCGSCIlqFhlrp9vdBk B4KIdT8h/M6LsVGeVSEIxIEXCp67YE31kxraFrk4Vsggdh2TFQ0llh1sajj8IFJB XekutedAOlTSOaM1/jvVPUJYg04X90bp3uXn3IU45XlQ8nBOG3immFVITRLkvd3w ywH+SEdeZAhWl3RGy8SHhqdeCJ7nNQbcRaRJ5CoWJBDNJTBGF1X+zJD2Swi6H9vA nWGNRlb3CPPIMPF127nADnOE7Cj2FlpAEIEu52HpcpIrhEdrGvLkGeQfgdWBjbyU aHwC04bLWnvsA9SEFVnuMIBaFQmJ1RuaWAHdtscyyO2uoeCtN8Aa+BX6jXFbVZQN 9eFzpiv0kUiXlAQ= =g1ia -----END PGP SIGNATURE----- Merge tag 'pci-v4.21-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci Pull PCI updates from Bjorn Helgaas: - Remove unused lists from ASPM pcie_link_state (Frederick Lawler) - Fix Broadcom CNB20LE host bridge unintended sign extension (Colin Ian King) - Expand Kconfig "PF" acronyms (Randy Dunlap) - Update MAINTAINERS for arch/x86/kernel/early-quirks.c (Bjorn Helgaas) - Add missing include to drivers/pci.h (Alexandru Gagniuc) - Override Synopsys USB 3.x HAPS device class so dwc3-haps can claim it instead of xhci (Thinh Nguyen) - Clean up P2PDMA documentation (Randy Dunlap) - Allow runtime PM even if driver doesn't supply callbacks (Jarkko Nikula) - Remove status check after submitting Switchtec MRPC Firmware Download commands to avoid Completion Timeouts (Kelvin Cao) - Set Switchtec coherent DMA mask to allow 64-bit DMA (Boris Glimcher) - Fix Switchtec SWITCHTEC_IOCTL_EVENT_IDX_ALL flag overwrite issue (Joey Zhang) - Enable write combining for Switchtec MRPC Input buffers (Kelvin Cao) - Add Switchtec MRPC DMA mode support (Wesley Sheng) - Skip VF scanning on powerpc, which does this in firmware (Sebastian Ott) - Add Amlogic Meson PCIe controller driver and DT bindings (Yue Wang) - Constify histb dw_pcie_host_ops structure (Julia Lawall) - Support multiple power domains for imx6 (Leonard Crestez) - Constify layerscape driver data (Stefan Agner) - Update imx6 Kconfig to allow imx6 PCIe in imx7 kernel (Trent Piepho) - Support armada8k GPIO reset (Baruch Siach) - Support suspend/resume support on imx6 (Leonard Crestez) - Don't hard-code DesignWare DBI/ATU offst (Stephen Warren) - Skip i.MX6 PHY setup on i.MX7D (Andrey Smirnov) - Remove Jianguo Sun from HiSilicon STB maintainers (Lorenzo Pieralisi) - Mask DesignWare interrupts instead of disabling them to avoid lost interrupts (Marc Zyngier) - Add locking when acking DesignWare interrupts (Marc Zyngier) - Ack DesignWare interrupts in the proper callbacks (Marc Zyngier) - Use devm resource parser in mediatek (Honghui Zhang) - Remove unused mediatek "num-lanes" DT property (Honghui Zhang) - Add UniPhier PCIe controller driver and DT bindings (Kunihiko Hayashi) - Enable MSI for imx6 downstream components (Richard Zhu) * tag 'pci-v4.21-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (40 commits) PCI: imx: Enable MSI from downstream components s390/pci: skip VF scanning PCI/IOV: Add flag so platforms can skip VF scanning PCI/IOV: Factor out sriov_add_vfs() PCI: uniphier: Add UniPhier PCIe host controller support dt-bindings: PCI: Add UniPhier PCIe host controller description PCI: amlogic: Add the Amlogic Meson PCIe controller driver dt-bindings: PCI: meson: add DT bindings for Amlogic Meson PCIe controller arm64: dts: mt7622: Remove un-used property for PCIe arm: dts: mt7623: Remove un-used property for PCIe dt-bindings: PCI: MediaTek: Remove un-used property PCI: mediatek: Remove un-used variant in struct mtk_pcie_port MAINTAINERS: Remove Jianguo Sun from HiSilicon STB DWC entry PCI: dwc: Don't hard-code DBI/ATU offset PCI: imx: Add imx6sx suspend/resume support PCI: armada8k: Add support for gpio controlled reset signal PCI: dwc: Adjust Kconfig to allow IMX6 PCIe host on IMX7 PCI: dwc: layerscape: Constify driver data PCI: imx: Add multi-pd support PCI: Override Synopsys USB 3.x HAPS device class ...
This commit is contained in:
commit
926b02d3eb
70
Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
Normal file
70
Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
Normal file
@ -0,0 +1,70 @@
|
||||
Amlogic Meson AXG DWC PCIE SoC controller
|
||||
|
||||
Amlogic Meson PCIe host controller is based on the Synopsys DesignWare PCI core.
|
||||
It shares common functions with the PCIe DesignWare core driver and
|
||||
inherits common properties defined in
|
||||
Documentation/devicetree/bindings/pci/designware-pci.txt.
|
||||
|
||||
Additional properties are described here:
|
||||
|
||||
Required properties:
|
||||
- compatible:
|
||||
should contain "amlogic,axg-pcie" to identify the core.
|
||||
- reg:
|
||||
should contain the configuration address space.
|
||||
- reg-names: Must be
|
||||
- "elbi" External local bus interface registers
|
||||
- "cfg" Meson specific registers
|
||||
- "phy" Meson PCIE PHY registers
|
||||
- "config" PCIe configuration space
|
||||
- reset-gpios: The GPIO to generate PCIe PERST# assert and deassert signal.
|
||||
- clocks: Must contain an entry for each entry in clock-names.
|
||||
- clock-names: Must include the following entries:
|
||||
- "pclk" PCIe GEN 100M PLL clock
|
||||
- "port" PCIe_x(A or B) RC clock gate
|
||||
- "general" PCIe Phy clock
|
||||
- "mipi" PCIe_x(A or B) 100M ref clock gate
|
||||
- resets: phandle to the reset lines.
|
||||
- reset-names: must contain "phy" "port" and "apb"
|
||||
- "phy" Share PHY reset
|
||||
- "port" Port A or B reset
|
||||
- "apb" Share APB reset
|
||||
- device_type:
|
||||
should be "pci". As specified in designware-pcie.txt
|
||||
|
||||
|
||||
Example configuration:
|
||||
|
||||
pcie: pcie@f9800000 {
|
||||
compatible = "amlogic,axg-pcie", "snps,dw-pcie";
|
||||
reg = <0x0 0xf9800000 0x0 0x400000
|
||||
0x0 0xff646000 0x0 0x2000
|
||||
0x0 0xff644000 0x0 0x2000
|
||||
0x0 0xf9f00000 0x0 0x100000>;
|
||||
reg-names = "elbi", "cfg", "phy", "config";
|
||||
reset-gpios = <&gpio GPIOX_19 GPIO_ACTIVE_HIGH>;
|
||||
interrupts = <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 179 IRQ_TYPE_EDGE_RISING>;
|
||||
bus-range = <0x0 0xff>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
ranges = <0x82000000 0 0 0x0 0xf9c00000 0 0x00300000>;
|
||||
|
||||
clocks = <&clkc CLKID_USB
|
||||
&clkc CLKID_MIPI_ENABLE
|
||||
&clkc CLKID_PCIE_A
|
||||
&clkc CLKID_PCIE_CML_EN0>;
|
||||
clock-names = "general",
|
||||
"mipi",
|
||||
"pclk",
|
||||
"port";
|
||||
resets = <&reset RESET_PCIE_PHY>,
|
||||
<&reset RESET_PCIE_A>,
|
||||
<&reset RESET_PCIE_APB>;
|
||||
reset-names = "phy",
|
||||
"port",
|
||||
"apb";
|
||||
};
|
@ -41,7 +41,9 @@ Optional properties:
|
||||
Additional required properties for imx6sx-pcie:
|
||||
- clock names: Must include the following additional entries:
|
||||
- "pcie_inbound_axi"
|
||||
- power-domains: Must be set to a phandle pointing to the PCIE_PHY power domain
|
||||
- power-domains: Must be set to phandles pointing to the DISPLAY and
|
||||
PCIE_PHY power domains
|
||||
- power-domain-names: Must be "pcie", "pcie_phy"
|
||||
|
||||
Additional required properties for imx7d-pcie:
|
||||
- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain
|
||||
|
@ -65,7 +65,6 @@ Required properties:
|
||||
explanation.
|
||||
- ranges: Sub-ranges distributed from the PCIe controller node. An empty
|
||||
property is sufficient.
|
||||
- num-lanes: Number of lanes to use for this port.
|
||||
|
||||
Examples for MT7623:
|
||||
|
||||
@ -118,7 +117,6 @@ Examples for MT7623:
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
};
|
||||
|
||||
pcie@1,0 {
|
||||
@ -129,7 +127,6 @@ Examples for MT7623:
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
};
|
||||
|
||||
pcie@2,0 {
|
||||
@ -140,7 +137,6 @@ Examples for MT7623:
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
};
|
||||
};
|
||||
|
||||
@ -172,7 +168,6 @@ Examples for MT2712:
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
|
||||
<0 0 0 2 &pcie_intc0 1>,
|
||||
@ -191,7 +186,6 @@ Examples for MT2712:
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
|
||||
<0 0 0 2 &pcie_intc1 1>,
|
||||
@ -245,7 +239,6 @@ Examples for MT7622:
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
|
||||
<0 0 0 2 &pcie_intc0 1>,
|
||||
@ -264,7 +257,6 @@ Examples for MT7622:
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
|
||||
<0 0 0 2 &pcie_intc1 1>,
|
||||
|
81
Documentation/devicetree/bindings/pci/uniphier-pcie.txt
Normal file
81
Documentation/devicetree/bindings/pci/uniphier-pcie.txt
Normal file
@ -0,0 +1,81 @@
|
||||
Socionext UniPhier PCIe host controller bindings
|
||||
|
||||
This describes the devicetree bindings for PCIe host controller implemented
|
||||
on Socionext UniPhier SoCs.
|
||||
|
||||
UniPhier PCIe host controller is based on the Synopsys DesignWare PCI core.
|
||||
It shares common functions with the PCIe DesignWare core driver and inherits
|
||||
common properties defined in
|
||||
Documentation/devicetree/bindings/pci/designware-pcie.txt.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "socionext,uniphier-pcie".
|
||||
- reg: Specifies offset and length of the register set for the device.
|
||||
According to the reg-names, appropriate register sets are required.
|
||||
- reg-names: Must include the following entries:
|
||||
"dbi" - controller configuration registers
|
||||
"link" - SoC-specific glue layer registers
|
||||
"config" - PCIe configuration space
|
||||
- clocks: A phandle to the clock gate for PCIe glue layer including
|
||||
the host controller.
|
||||
- resets: A phandle to the reset line for PCIe glue layer including
|
||||
the host controller.
|
||||
- interrupts: A list of interrupt specifiers. According to the
|
||||
interrupt-names, appropriate interrupts are required.
|
||||
- interrupt-names: Must include the following entries:
|
||||
"dma" - DMA interrupt
|
||||
"msi" - MSI interrupt
|
||||
|
||||
Optional properties:
|
||||
- phys: A phandle to generic PCIe PHY. According to the phy-names, appropriate
|
||||
phys are required.
|
||||
- phy-names: Must be "pcie-phy".
|
||||
|
||||
Required sub-node:
|
||||
- legacy-interrupt-controller: Specifies interrupt controller for legacy PCI
|
||||
interrupts.
|
||||
|
||||
Required properties for legacy-interrupt-controller:
|
||||
- interrupt-controller: identifies the node as an interrupt controller.
|
||||
- #interrupt-cells: specifies the number of cells needed to encode an
|
||||
interrupt source. The value must be 1.
|
||||
- interrupt-parent: Phandle to the parent interrupt controller.
|
||||
- interrupts: An interrupt specifier for legacy interrupt.
|
||||
|
||||
Example:
|
||||
|
||||
pcie: pcie@66000000 {
|
||||
compatible = "socionext,uniphier-pcie", "snps,dw-pcie";
|
||||
status = "disabled";
|
||||
reg-names = "dbi", "link", "config";
|
||||
reg = <0x66000000 0x1000>, <0x66010000 0x10000>,
|
||||
<0x2fff0000 0x10000>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
clocks = <&sys_clk 24>;
|
||||
resets = <&sys_rst 24>;
|
||||
num-lanes = <1>;
|
||||
num-viewport = <1>;
|
||||
bus-range = <0x0 0xff>;
|
||||
device_type = "pci";
|
||||
ranges =
|
||||
/* downstream I/O */
|
||||
<0x81000000 0 0x00000000 0x2ffe0000 0 0x00010000
|
||||
/* non-prefetchable memory */
|
||||
0x82000000 0 0x00000000 0x20000000 0 0x0ffe0000>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-names = "dma", "msi";
|
||||
interrupts = <0 224 4>, <0 225 4>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc 0>, /* INTA */
|
||||
<0 0 0 2 &pcie_intc 1>, /* INTB */
|
||||
<0 0 0 3 &pcie_intc 2>, /* INTC */
|
||||
<0 0 0 4 &pcie_intc 3>; /* INTD */
|
||||
|
||||
pcie_intc: legacy-interrupt-controller {
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 226 4>;
|
||||
};
|
||||
};
|
@ -49,7 +49,7 @@ For example, in the NVMe Target Copy Offload implementation:
|
||||
in that it exposes any CMB (Controller Memory Buffer) as a P2P memory
|
||||
resource (provider), it accepts P2P memory pages as buffers in requests
|
||||
to be used directly (client) and it can also make use of the CMB as
|
||||
submission queue entries (orchastrator).
|
||||
submission queue entries (orchestrator).
|
||||
* The RDMA driver is a client in this arrangement so that an RNIC
|
||||
can DMA directly to the memory exposed by the NVMe device.
|
||||
* The NVMe Target driver (nvmet) can orchestrate the data from the RNIC
|
||||
@ -111,7 +111,7 @@ that's compatible with all clients using :c:func:`pci_p2pmem_find()`.
|
||||
If more than one provider is supported, the one nearest to all the clients will
|
||||
be chosen first. If more than one provider is an equal distance away, the
|
||||
one returned will be chosen at random (it is not an arbitrary but
|
||||
truely random). This function returns the PCI device to use for the provider
|
||||
truly random). This function returns the PCI device to use for the provider
|
||||
with a reference taken and therefore when it's no longer needed it should be
|
||||
returned with pci_dev_put().
|
||||
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
@ -11750,6 +11750,7 @@ F: include/uapi/linux/pci*
|
||||
F: lib/pci*
|
||||
F: arch/x86/pci/
|
||||
F: arch/x86/kernel/quirks.c
|
||||
F: arch/x86/kernel/early-quirks.c
|
||||
|
||||
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
|
||||
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
|
||||
@ -11759,6 +11760,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
|
||||
S: Supported
|
||||
F: drivers/pci/controller/
|
||||
|
||||
PCIE DRIVER FOR AMLOGIC MESON
|
||||
M: Yue Wang <yue.wang@Amlogic.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-amlogic@lists.infradead.org
|
||||
S: Maintained
|
||||
F: drivers/pci/controller/dwc/pci-meson.c
|
||||
|
||||
PCIE DRIVER FOR AXIS ARTPEC
|
||||
M: Jesper Nilsson <jesper.nilsson@axis.com>
|
||||
L: linux-arm-kernel@axis.com
|
||||
@ -11791,7 +11799,6 @@ F: Documentation/devicetree/bindings/pci/kirin-pcie.txt
|
||||
F: drivers/pci/controller/dwc/pcie-kirin.c
|
||||
|
||||
PCIE DRIVER FOR HISILICON STB
|
||||
M: Jianguo Sun <sunjianguo1@huawei.com>
|
||||
M: Shawn Guo <shawn.guo@linaro.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -11828,6 +11835,13 @@ S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
|
||||
F: drivers/pci/controller/pci-v3-semi.c
|
||||
|
||||
PCIE DRIVER FOR SOCIONEXT UNIPHIER
|
||||
M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/uniphier-pcie.txt
|
||||
F: drivers/pci/controller/dwc/pcie-uniphier.c
|
||||
|
||||
PCIE DRIVER FOR ST SPEAR13XX
|
||||
M: Pratyush Anand <pratyush.anand@gmail.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
|
@ -850,7 +850,6 @@
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
@ -862,7 +861,6 @@
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
@ -874,7 +872,6 @@
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
|
||||
ranges;
|
||||
num-lanes = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
@ -802,7 +802,6 @@
|
||||
ranges;
|
||||
status = "disabled";
|
||||
|
||||
num-lanes = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
|
||||
<0 0 0 2 &pcie_intc0 1>,
|
||||
@ -823,7 +822,6 @@
|
||||
ranges;
|
||||
status = "disabled";
|
||||
|
||||
num-lanes = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
|
||||
<0 0 0 2 &pcie_intc1 1>,
|
||||
|
@ -649,6 +649,9 @@ int pcibios_add_device(struct pci_dev *pdev)
|
||||
struct resource *res;
|
||||
int i;
|
||||
|
||||
if (pdev->is_physfn)
|
||||
pdev->no_vf_scan = 1;
|
||||
|
||||
pdev->dev.groups = zpci_attr_groups;
|
||||
pdev->dev.dma_ops = &s390_pci_dma_ops;
|
||||
zpci_map_resources(pdev);
|
||||
|
@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
|
||||
word1 = read_pci_config_16(bus, slot, func, 0xc0);
|
||||
word2 = read_pci_config_16(bus, slot, func, 0xc2);
|
||||
if (word1 != word2) {
|
||||
res.start = (word1 << 16) | 0x0000;
|
||||
res.end = (word2 << 16) | 0xffff;
|
||||
res.start = ((resource_size_t) word1 << 16) | 0x0000;
|
||||
res.end = ((resource_size_t) word2 << 16) | 0xffff;
|
||||
res.flags = IORESOURCE_MEM;
|
||||
update_res(info, res.start, res.end, res.flags, 0);
|
||||
}
|
||||
|
@ -103,9 +103,9 @@ config PCI_PF_STUB
|
||||
depends on PCI_IOV
|
||||
help
|
||||
Say Y or M here if you want to enable support for devices that
|
||||
require SR-IOV support, while at the same time the PF itself is
|
||||
not providing any actual services on the host itself such as
|
||||
storage or networking.
|
||||
require SR-IOV support, while at the same time the PF (Physical
|
||||
Function) itself is not providing any actual services on the
|
||||
host itself such as storage or networking.
|
||||
|
||||
When in doubt, say N.
|
||||
|
||||
|
@ -89,8 +89,8 @@ config PCI_EXYNOS
|
||||
select PCIE_DW_HOST
|
||||
|
||||
config PCI_IMX6
|
||||
bool "Freescale i.MX6 PCIe controller"
|
||||
depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
|
||||
bool "Freescale i.MX6/7 PCIe controller"
|
||||
depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST)
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
select PCIE_DW_HOST
|
||||
|
||||
@ -193,4 +193,24 @@ config PCIE_HISI_STB
|
||||
help
|
||||
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
|
||||
|
||||
config PCI_MESON
|
||||
bool "MESON PCIe controller"
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
select PCIE_DW_HOST
|
||||
help
|
||||
Say Y here if you want to enable PCI controller support on Amlogic
|
||||
SoCs. The PCI controller on Amlogic is based on DesignWare hardware
|
||||
and therefore the driver re-uses the DesignWare core functions to
|
||||
implement the driver.
|
||||
|
||||
config PCIE_UNIPHIER
|
||||
bool "Socionext UniPhier PCIe controllers"
|
||||
depends on ARCH_UNIPHIER || COMPILE_TEST
|
||||
depends on OF && HAS_IOMEM
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
select PCIE_DW_HOST
|
||||
help
|
||||
Say Y here if you want PCIe controller support on UniPhier SoCs.
|
||||
This driver supports LD20 and PXs3 SoCs.
|
||||
|
||||
endmenu
|
||||
|
@ -14,6 +14,8 @@ obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
|
||||
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
|
||||
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
|
||||
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
|
||||
obj-$(CONFIG_PCI_MESON) += pci-meson.o
|
||||
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
|
||||
|
||||
# The following drivers are for devices that use the generic ACPI
|
||||
# pci_root.c driver but don't support standard ECAM config access.
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
|
||||
@ -59,6 +61,11 @@ struct imx6_pcie {
|
||||
u32 tx_swing_low;
|
||||
int link_gen;
|
||||
struct regulator *vpcie;
|
||||
|
||||
/* power domain for pcie */
|
||||
struct device *pd_pcie;
|
||||
/* power domain for pcie phy */
|
||||
struct device *pd_pcie_phy;
|
||||
};
|
||||
|
||||
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
|
||||
@ -67,6 +74,7 @@ struct imx6_pcie {
|
||||
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
|
||||
|
||||
/* PCIe Root Complex registers (memory-mapped) */
|
||||
#define PCIE_RC_IMX6_MSI_CAP 0x50
|
||||
#define PCIE_RC_LCR 0x7c
|
||||
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
|
||||
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
|
||||
@ -290,6 +298,43 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int imx6_pcie_attach_pd(struct device *dev)
|
||||
{
|
||||
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
|
||||
struct device_link *link;
|
||||
|
||||
/* Do nothing when in a single power domain */
|
||||
if (dev->pm_domain)
|
||||
return 0;
|
||||
|
||||
imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
|
||||
if (IS_ERR(imx6_pcie->pd_pcie))
|
||||
return PTR_ERR(imx6_pcie->pd_pcie);
|
||||
link = device_link_add(dev, imx6_pcie->pd_pcie,
|
||||
DL_FLAG_STATELESS |
|
||||
DL_FLAG_PM_RUNTIME |
|
||||
DL_FLAG_RPM_ACTIVE);
|
||||
if (!link) {
|
||||
dev_err(dev, "Failed to add device_link to pcie pd.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
|
||||
if (IS_ERR(imx6_pcie->pd_pcie_phy))
|
||||
return PTR_ERR(imx6_pcie->pd_pcie_phy);
|
||||
|
||||
device_link_add(dev, imx6_pcie->pd_pcie_phy,
|
||||
DL_FLAG_STATELESS |
|
||||
DL_FLAG_PM_RUNTIME |
|
||||
DL_FLAG_RPM_ACTIVE);
|
||||
if (IS_ERR(link)) {
|
||||
dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
|
||||
return PTR_ERR(link);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
|
||||
{
|
||||
struct device *dev = imx6_pcie->pci->dev;
|
||||
@ -765,8 +810,28 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
|
||||
|
||||
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
|
||||
{
|
||||
reset_control_assert(imx6_pcie->turnoff_reset);
|
||||
reset_control_deassert(imx6_pcie->turnoff_reset);
|
||||
struct device *dev = imx6_pcie->pci->dev;
|
||||
|
||||
/* Some variants have a turnoff reset in DT */
|
||||
if (imx6_pcie->turnoff_reset) {
|
||||
reset_control_assert(imx6_pcie->turnoff_reset);
|
||||
reset_control_deassert(imx6_pcie->turnoff_reset);
|
||||
goto pm_turnoff_sleep;
|
||||
}
|
||||
|
||||
/* Others poke directly at IOMUXC registers */
|
||||
switch (imx6_pcie->variant) {
|
||||
case IMX6SX:
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
|
||||
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
|
||||
IMX6SX_GPR12_PCIE_PM_TURN_OFF);
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
|
||||
IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "PME_Turn_Off not implemented\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Components with an upstream port must respond to
|
||||
@ -775,6 +840,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
|
||||
* The standard recommends a 1-10ms timeout after which to
|
||||
* proceed anyway as if acks were received.
|
||||
*/
|
||||
pm_turnoff_sleep:
|
||||
usleep_range(1000, 10000);
|
||||
}
|
||||
|
||||
@ -784,18 +850,31 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
|
||||
clk_disable_unprepare(imx6_pcie->pcie_phy);
|
||||
clk_disable_unprepare(imx6_pcie->pcie_bus);
|
||||
|
||||
if (imx6_pcie->variant == IMX7D) {
|
||||
switch (imx6_pcie->variant) {
|
||||
case IMX6SX:
|
||||
clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
|
||||
break;
|
||||
case IMX7D:
|
||||
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
|
||||
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
|
||||
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
|
||||
{
|
||||
return (imx6_pcie->variant == IMX7D ||
|
||||
imx6_pcie->variant == IMX6SX);
|
||||
}
|
||||
|
||||
static int imx6_pcie_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
|
||||
|
||||
if (imx6_pcie->variant != IMX7D)
|
||||
if (!imx6_pcie_supports_suspend(imx6_pcie))
|
||||
return 0;
|
||||
|
||||
imx6_pcie_pm_turnoff(imx6_pcie);
|
||||
@ -811,7 +890,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
|
||||
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
|
||||
struct pcie_port *pp = &imx6_pcie->pci->pp;
|
||||
|
||||
if (imx6_pcie->variant != IMX7D)
|
||||
if (!imx6_pcie_supports_suspend(imx6_pcie))
|
||||
return 0;
|
||||
|
||||
imx6_pcie_assert_core_reset(imx6_pcie);
|
||||
@ -840,6 +919,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
||||
struct resource *dbi_base;
|
||||
struct device_node *node = dev->of_node;
|
||||
int ret;
|
||||
u16 val;
|
||||
|
||||
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
|
||||
if (!imx6_pcie)
|
||||
@ -977,10 +1057,22 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
||||
|
||||
platform_set_drvdata(pdev, imx6_pcie);
|
||||
|
||||
ret = imx6_pcie_attach_pd(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = imx6_add_pcie_port(imx6_pcie, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (pci_msi_enabled()) {
|
||||
val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
|
||||
PCI_MSI_FLAGS);
|
||||
val |= PCI_MSI_FLAGS_ENABLE;
|
||||
dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
|
||||
val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -222,12 +222,12 @@ static const struct dw_pcie_ops dw_ls_pcie_ops = {
|
||||
.link_up = ls_pcie_link_up,
|
||||
};
|
||||
|
||||
static struct ls_pcie_drvdata ls1021_drvdata = {
|
||||
static const struct ls_pcie_drvdata ls1021_drvdata = {
|
||||
.ops = &ls1021_pcie_host_ops,
|
||||
.dw_pcie_ops = &dw_ls1021_pcie_ops,
|
||||
};
|
||||
|
||||
static struct ls_pcie_drvdata ls1043_drvdata = {
|
||||
static const struct ls_pcie_drvdata ls1043_drvdata = {
|
||||
.lut_offset = 0x10000,
|
||||
.ltssm_shift = 24,
|
||||
.lut_dbg = 0x7fc,
|
||||
@ -235,7 +235,7 @@ static struct ls_pcie_drvdata ls1043_drvdata = {
|
||||
.dw_pcie_ops = &dw_ls_pcie_ops,
|
||||
};
|
||||
|
||||
static struct ls_pcie_drvdata ls1046_drvdata = {
|
||||
static const struct ls_pcie_drvdata ls1046_drvdata = {
|
||||
.lut_offset = 0x80000,
|
||||
.ltssm_shift = 24,
|
||||
.lut_dbg = 0x407fc,
|
||||
@ -243,7 +243,7 @@ static struct ls_pcie_drvdata ls1046_drvdata = {
|
||||
.dw_pcie_ops = &dw_ls_pcie_ops,
|
||||
};
|
||||
|
||||
static struct ls_pcie_drvdata ls2080_drvdata = {
|
||||
static const struct ls_pcie_drvdata ls2080_drvdata = {
|
||||
.lut_offset = 0x80000,
|
||||
.ltssm_shift = 0,
|
||||
.lut_dbg = 0x7fc,
|
||||
@ -251,7 +251,7 @@ static struct ls_pcie_drvdata ls2080_drvdata = {
|
||||
.dw_pcie_ops = &dw_ls_pcie_ops,
|
||||
};
|
||||
|
||||
static struct ls_pcie_drvdata ls2088_drvdata = {
|
||||
static const struct ls_pcie_drvdata ls2088_drvdata = {
|
||||
.lut_offset = 0x80000,
|
||||
.ltssm_shift = 0,
|
||||
.lut_dbg = 0x407fc,
|
||||
|
592
drivers/pci/controller/dwc/pci-meson.c
Normal file
592
drivers/pci/controller/dwc/pci-meson.c
Normal file
@ -0,0 +1,592 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Amlogic MESON SoCs
|
||||
*
|
||||
* Copyright (c) 2018 Amlogic, inc.
|
||||
* Author: Yue Wang <yue.wang@amlogic.com>
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
|
||||
#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
|
||||
|
||||
/* External local bus interface registers */
|
||||
#define PLR_OFFSET 0x700
|
||||
#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
|
||||
#define FAST_LINK_MODE BIT(7)
|
||||
#define LINK_CAPABLE_MASK GENMASK(21, 16)
|
||||
#define LINK_CAPABLE_X1 BIT(16)
|
||||
|
||||
#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
|
||||
#define NUM_OF_LANES_MASK GENMASK(12, 8)
|
||||
#define NUM_OF_LANES_X1 BIT(8)
|
||||
#define DIRECT_SPEED_CHANGE BIT(17)
|
||||
|
||||
#define TYPE1_HDR_OFFSET 0x0
|
||||
#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
|
||||
#define PCI_IO_EN BIT(0)
|
||||
#define PCI_MEM_SPACE_EN BIT(1)
|
||||
#define PCI_BUS_MASTER_EN BIT(2)
|
||||
|
||||
#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
|
||||
#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
|
||||
|
||||
#define PCIE_CAP_OFFSET 0x70
|
||||
#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
|
||||
#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
|
||||
#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
|
||||
#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
|
||||
#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
|
||||
|
||||
/* PCIe specific config registers */
|
||||
#define PCIE_CFG0 0x0
|
||||
#define APP_LTSSM_ENABLE BIT(7)
|
||||
|
||||
#define PCIE_CFG_STATUS12 0x30
|
||||
#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6))
|
||||
#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16))
|
||||
#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11)
|
||||
|
||||
#define PCIE_CFG_STATUS17 0x44
|
||||
#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
|
||||
|
||||
#define WAIT_LINKUP_TIMEOUT 4000
|
||||
#define PORT_CLK_RATE 100000000UL
|
||||
#define MAX_PAYLOAD_SIZE 256
|
||||
#define MAX_READ_REQ_SIZE 256
|
||||
#define MESON_PCIE_PHY_POWERUP 0x1c
|
||||
#define PCIE_RESET_DELAY 500
|
||||
#define PCIE_SHARED_RESET 1
|
||||
#define PCIE_NORMAL_RESET 0
|
||||
|
||||
enum pcie_data_rate {
|
||||
PCIE_GEN1,
|
||||
PCIE_GEN2,
|
||||
PCIE_GEN3,
|
||||
PCIE_GEN4
|
||||
};
|
||||
|
||||
struct meson_pcie_mem_res {
|
||||
void __iomem *elbi_base;
|
||||
void __iomem *cfg_base;
|
||||
void __iomem *phy_base;
|
||||
};
|
||||
|
||||
struct meson_pcie_clk_res {
|
||||
struct clk *clk;
|
||||
struct clk *mipi_gate;
|
||||
struct clk *port_clk;
|
||||
struct clk *general_clk;
|
||||
};
|
||||
|
||||
struct meson_pcie_rc_reset {
|
||||
struct reset_control *phy;
|
||||
struct reset_control *port;
|
||||
struct reset_control *apb;
|
||||
};
|
||||
|
||||
struct meson_pcie {
|
||||
struct dw_pcie pci;
|
||||
struct meson_pcie_mem_res mem_res;
|
||||
struct meson_pcie_clk_res clk_res;
|
||||
struct meson_pcie_rc_reset mrst;
|
||||
struct gpio_desc *reset_gpio;
|
||||
};
|
||||
|
||||
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
|
||||
const char *id,
|
||||
u32 reset_type)
|
||||
{
|
||||
struct device *dev = mp->pci.dev;
|
||||
struct reset_control *reset;
|
||||
|
||||
if (reset_type == PCIE_SHARED_RESET)
|
||||
reset = devm_reset_control_get_shared(dev, id);
|
||||
else
|
||||
reset = devm_reset_control_get(dev, id);
|
||||
|
||||
return reset;
|
||||
}
|
||||
|
||||
static int meson_pcie_get_resets(struct meson_pcie *mp)
|
||||
{
|
||||
struct meson_pcie_rc_reset *mrst = &mp->mrst;
|
||||
|
||||
mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
|
||||
if (IS_ERR(mrst->phy))
|
||||
return PTR_ERR(mrst->phy);
|
||||
reset_control_deassert(mrst->phy);
|
||||
|
||||
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
|
||||
if (IS_ERR(mrst->port))
|
||||
return PTR_ERR(mrst->port);
|
||||
reset_control_deassert(mrst->port);
|
||||
|
||||
mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
|
||||
if (IS_ERR(mrst->apb))
|
||||
return PTR_ERR(mrst->apb);
|
||||
reset_control_deassert(mrst->apb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
|
||||
struct meson_pcie *mp,
|
||||
const char *id)
|
||||
{
|
||||
struct device *dev = mp->pci.dev;
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
|
||||
|
||||
return devm_ioremap_resource(dev, res);
|
||||
}
|
||||
|
||||
static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
|
||||
struct meson_pcie *mp,
|
||||
const char *id)
|
||||
{
|
||||
struct device *dev = mp->pci.dev;
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
|
||||
if (!res) {
|
||||
dev_err(dev, "No REG resource %s\n", id);
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
return devm_ioremap(dev, res->start, resource_size(res));
|
||||
}
|
||||
|
||||
static int meson_pcie_get_mems(struct platform_device *pdev,
|
||||
struct meson_pcie *mp)
|
||||
{
|
||||
mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
|
||||
if (IS_ERR(mp->mem_res.elbi_base))
|
||||
return PTR_ERR(mp->mem_res.elbi_base);
|
||||
|
||||
mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
|
||||
if (IS_ERR(mp->mem_res.cfg_base))
|
||||
return PTR_ERR(mp->mem_res.cfg_base);
|
||||
|
||||
/* Meson SoC has two PCI controllers use same phy register*/
|
||||
mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
|
||||
if (IS_ERR(mp->mem_res.phy_base))
|
||||
return PTR_ERR(mp->mem_res.phy_base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void meson_pcie_power_on(struct meson_pcie *mp)
|
||||
{
|
||||
writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
|
||||
}
|
||||
|
||||
static void meson_pcie_reset(struct meson_pcie *mp)
|
||||
{
|
||||
struct meson_pcie_rc_reset *mrst = &mp->mrst;
|
||||
|
||||
reset_control_assert(mrst->phy);
|
||||
udelay(PCIE_RESET_DELAY);
|
||||
reset_control_deassert(mrst->phy);
|
||||
udelay(PCIE_RESET_DELAY);
|
||||
|
||||
reset_control_assert(mrst->port);
|
||||
reset_control_assert(mrst->apb);
|
||||
udelay(PCIE_RESET_DELAY);
|
||||
reset_control_deassert(mrst->port);
|
||||
reset_control_deassert(mrst->apb);
|
||||
udelay(PCIE_RESET_DELAY);
|
||||
}
|
||||
|
||||
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
|
||||
const char *id, u64 rate)
|
||||
{
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
||||
clk = devm_clk_get(dev, id);
|
||||
if (IS_ERR(clk))
|
||||
return clk;
|
||||
|
||||
if (rate) {
|
||||
ret = clk_set_rate(clk, rate);
|
||||
if (ret) {
|
||||
dev_err(dev, "set clk rate failed, ret = %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "couldn't enable clk\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
devm_add_action_or_reset(dev,
|
||||
(void (*) (void *))clk_disable_unprepare,
|
||||
clk);
|
||||
|
||||
return clk;
|
||||
}
|
||||
|
||||
static int meson_pcie_probe_clocks(struct meson_pcie *mp)
|
||||
{
|
||||
struct device *dev = mp->pci.dev;
|
||||
struct meson_pcie_clk_res *res = &mp->clk_res;
|
||||
|
||||
res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
|
||||
if (IS_ERR(res->port_clk))
|
||||
return PTR_ERR(res->port_clk);
|
||||
|
||||
res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
|
||||
if (IS_ERR(res->mipi_gate))
|
||||
return PTR_ERR(res->mipi_gate);
|
||||
|
||||
res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
|
||||
if (IS_ERR(res->general_clk))
|
||||
return PTR_ERR(res->general_clk);
|
||||
|
||||
res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
|
||||
if (IS_ERR(res->clk))
|
||||
return PTR_ERR(res->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
|
||||
{
|
||||
writel(val, mp->mem_res.elbi_base + reg);
|
||||
}
|
||||
|
||||
static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
|
||||
{
|
||||
return readl(mp->mem_res.elbi_base + reg);
|
||||
}
|
||||
|
||||
static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
|
||||
{
|
||||
return readl(mp->mem_res.cfg_base + reg);
|
||||
}
|
||||
|
||||
static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
|
||||
{
|
||||
writel(val, mp->mem_res.cfg_base + reg);
|
||||
}
|
||||
|
||||
static void meson_pcie_assert_reset(struct meson_pcie *mp)
|
||||
{
|
||||
gpiod_set_value_cansleep(mp->reset_gpio, 0);
|
||||
udelay(500);
|
||||
gpiod_set_value_cansleep(mp->reset_gpio, 1);
|
||||
}
|
||||
|
||||
static void meson_pcie_init_dw(struct meson_pcie *mp)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = meson_cfg_readl(mp, PCIE_CFG0);
|
||||
val |= APP_LTSSM_ENABLE;
|
||||
meson_cfg_writel(mp, val, PCIE_CFG0);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
|
||||
val &= ~LINK_CAPABLE_MASK;
|
||||
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
|
||||
val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
|
||||
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
|
||||
val &= ~NUM_OF_LANES_MASK;
|
||||
meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
|
||||
val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
|
||||
meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
|
||||
|
||||
meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
|
||||
meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
|
||||
}
|
||||
|
||||
static int meson_size_to_payload(struct meson_pcie *mp, int size)
|
||||
{
|
||||
struct device *dev = mp->pci.dev;
|
||||
|
||||
/*
|
||||
* dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
|
||||
* So if input size is not 2^order alignment or less than 2^7 or bigger
|
||||
* than 2^12, just set to default size 2^(1+7).
|
||||
*/
|
||||
if (!is_power_of_2(size) || size < 128 || size > 4096) {
|
||||
dev_warn(dev, "payload size %d, set to default 256\n", size);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return fls(size) - 8;
|
||||
}
|
||||
|
||||
static void meson_set_max_payload(struct meson_pcie *mp, int size)
|
||||
{
|
||||
u32 val;
|
||||
int max_payload_size = meson_size_to_payload(mp, size);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
|
||||
val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
|
||||
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
|
||||
val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
|
||||
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
|
||||
}
|
||||
|
||||
static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
|
||||
{
|
||||
u32 val;
|
||||
int max_rd_req_size = meson_size_to_payload(mp, size);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
|
||||
val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
|
||||
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
|
||||
|
||||
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
|
||||
val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
|
||||
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
|
||||
}
|
||||
|
||||
static inline void meson_enable_memory_space(struct meson_pcie *mp)
|
||||
{
|
||||
/* Set the RC Bus Master, Memory Space and I/O Space enables */
|
||||
meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
|
||||
PCIE_STATUS_COMMAND);
|
||||
}
|
||||
|
||||
static int meson_pcie_establish_link(struct meson_pcie *mp)
|
||||
{
|
||||
struct dw_pcie *pci = &mp->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
|
||||
meson_pcie_init_dw(mp);
|
||||
meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
|
||||
meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
meson_enable_memory_space(mp);
|
||||
|
||||
meson_pcie_assert_reset(mp);
|
||||
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
}
|
||||
|
||||
static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
dw_pcie_msi_init(&mp->pci.pp);
|
||||
}
|
||||
|
||||
static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
|
||||
u32 *val)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
int ret;
|
||||
|
||||
ret = dw_pcie_read(pci->dbi_base + where, size, val);
|
||||
if (ret != PCIBIOS_SUCCESSFUL)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* There is a bug in the MESON AXG PCIe controller whereby software
|
||||
* cannot program the PCI_CLASS_DEVICE register, so we must fabricate
|
||||
* the return value in the config accessors.
|
||||
*/
|
||||
if (where == PCI_CLASS_REVISION && size == 4)
|
||||
*val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
|
||||
else if (where == PCI_CLASS_DEVICE && size == 2)
|
||||
*val = PCI_CLASS_BRIDGE_PCI;
|
||||
else if (where == PCI_CLASS_DEVICE && size == 1)
|
||||
*val = PCI_CLASS_BRIDGE_PCI & 0xff;
|
||||
else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
|
||||
*val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
|
||||
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
|
||||
int size, u32 val)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
||||
return dw_pcie_write(pci->dbi_base + where, size, val);
|
||||
}
|
||||
|
||||
static int meson_pcie_link_up(struct dw_pcie *pci)
|
||||
{
|
||||
struct meson_pcie *mp = to_meson_pcie(pci);
|
||||
struct device *dev = pci->dev;
|
||||
u32 speed_okay = 0;
|
||||
u32 cnt = 0;
|
||||
u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
|
||||
|
||||
do {
|
||||
state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
|
||||
state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
|
||||
smlh_up = IS_SMLH_LINK_UP(state12);
|
||||
rdlh_up = IS_RDLH_LINK_UP(state12);
|
||||
ltssm_up = IS_LTSSM_UP(state12);
|
||||
|
||||
if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
|
||||
speed_okay = 1;
|
||||
|
||||
if (smlh_up)
|
||||
dev_dbg(dev, "smlh_link_up is on\n");
|
||||
if (rdlh_up)
|
||||
dev_dbg(dev, "rdlh_link_up is on\n");
|
||||
if (ltssm_up)
|
||||
dev_dbg(dev, "ltssm_up is on\n");
|
||||
if (speed_okay)
|
||||
dev_dbg(dev, "speed_okay\n");
|
||||
|
||||
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
|
||||
return 1;
|
||||
|
||||
cnt++;
|
||||
|
||||
udelay(10);
|
||||
} while (cnt < WAIT_LINKUP_TIMEOUT);
|
||||
|
||||
dev_err(dev, "error: wait linkup timeout\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int meson_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct meson_pcie *mp = to_meson_pcie(pci);
|
||||
int ret;
|
||||
|
||||
ret = meson_pcie_establish_link(mp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
meson_pcie_enable_interrupts(mp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
|
||||
.rd_own_conf = meson_pcie_rd_own_conf,
|
||||
.wr_own_conf = meson_pcie_wr_own_conf,
|
||||
.host_init = meson_pcie_host_init,
|
||||
};
|
||||
|
||||
static int meson_add_pcie_port(struct meson_pcie *mp,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &mp->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq(pdev, 0);
|
||||
if (pp->msi_irq < 0) {
|
||||
dev_err(dev, "failed to get MSI IRQ\n");
|
||||
return pp->msi_irq;
|
||||
}
|
||||
}
|
||||
|
||||
pp->ops = &meson_pcie_host_ops;
|
||||
pci->dbi_base = mp->mem_res.elbi_base;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.link_up = meson_pcie_link_up,
|
||||
};
|
||||
|
||||
static int meson_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci;
|
||||
struct meson_pcie *mp;
|
||||
int ret;
|
||||
|
||||
mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
|
||||
if (!mp)
|
||||
return -ENOMEM;
|
||||
|
||||
pci = &mp->pci;
|
||||
pci->dev = dev;
|
||||
pci->ops = &dw_pcie_ops;
|
||||
|
||||
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(mp->reset_gpio)) {
|
||||
dev_err(dev, "get reset gpio failed\n");
|
||||
return PTR_ERR(mp->reset_gpio);
|
||||
}
|
||||
|
||||
ret = meson_pcie_get_resets(mp);
|
||||
if (ret) {
|
||||
dev_err(dev, "get reset resource failed, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = meson_pcie_get_mems(pdev, mp);
|
||||
if (ret) {
|
||||
dev_err(dev, "get memory resource failed, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
meson_pcie_power_on(mp);
|
||||
meson_pcie_reset(mp);
|
||||
|
||||
ret = meson_pcie_probe_clocks(mp);
|
||||
if (ret) {
|
||||
dev_err(dev, "init clock resources failed, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, mp);
|
||||
|
||||
ret = meson_add_pcie_port(mp, pdev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Add PCIe port failed, %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id meson_pcie_of_match[] = {
|
||||
{
|
||||
.compatible = "amlogic,axg-pcie",
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver meson_pcie_driver = {
|
||||
.probe = meson_pcie_probe,
|
||||
.driver = {
|
||||
.name = "meson-pcie",
|
||||
.of_match_table = meson_pcie_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
builtin_platform_driver(meson_pcie_driver);
|
@ -22,6 +22,7 @@
|
||||
#include <linux/resource.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
|
||||
@ -29,6 +30,7 @@ struct armada8k_pcie {
|
||||
struct dw_pcie *pci;
|
||||
struct clk *clk;
|
||||
struct clk *clk_reg;
|
||||
struct gpio_desc *reset_gpio;
|
||||
};
|
||||
|
||||
#define PCIE_VENDOR_REGS_OFFSET 0x8000
|
||||
@ -137,6 +139,12 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
|
||||
|
||||
if (pcie->reset_gpio) {
|
||||
/* assert and then deassert the reset signal */
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 1);
|
||||
msleep(100);
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 0);
|
||||
}
|
||||
dw_pcie_setup_rc(pp);
|
||||
armada8k_pcie_establish_link(pcie);
|
||||
|
||||
@ -249,6 +257,14 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
|
||||
goto fail_clkreg;
|
||||
}
|
||||
|
||||
/* Get reset gpio signal and hold asserted (logically high) */
|
||||
pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(pcie->reset_gpio)) {
|
||||
ret = PTR_ERR(pcie->reset_gpio);
|
||||
goto fail_clkreg;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
ret = armada8k_add_pcie_port(pcie, pdev);
|
||||
|
@ -503,6 +503,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (pci->iatu_unroll_enabled && !pci->atu_base) {
|
||||
dev_err(dev, "atu_base is not populated\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
|
||||
if (ret < 0) {
|
||||
|
@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
||||
(i * MAX_MSI_IRQS_PER_CTRL) +
|
||||
pos);
|
||||
generic_handle_irq(irq);
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
|
||||
(i * MSI_REG_CTRL_BLOCK_SIZE),
|
||||
4, 1 << pos);
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data)
|
||||
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
pp->irq_status[ctrl] &= ~(1 << bit);
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
|
||||
pp->irq_status[ctrl]);
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
|
||||
~pp->irq_status[ctrl]);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
||||
@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
|
||||
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
pp->irq_status[ctrl] |= 1 << bit;
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
|
||||
pp->irq_status[ctrl]);
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
|
||||
~pp->irq_status[ctrl]);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
||||
@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
|
||||
|
||||
static void dw_pci_bottom_ack(struct irq_data *d)
|
||||
{
|
||||
struct msi_desc *msi = irq_data_get_msi_desc(d);
|
||||
struct pcie_port *pp;
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
unsigned int res, bit, ctrl;
|
||||
unsigned long flags;
|
||||
|
||||
pp = msi_desc_to_pci_sysdata(msi);
|
||||
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
||||
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
||||
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
raw_spin_lock_irqsave(&pp->lock, flags);
|
||||
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
|
||||
|
||||
if (pp->ops->msi_irq_ack)
|
||||
pp->ops->msi_irq_ack(d->hwirq, pp);
|
||||
|
||||
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
|
||||
@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
/* Initialize IRQ Status array */
|
||||
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
|
||||
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
|
||||
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
|
||||
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
||||
4, &pp->irq_status[ctrl]);
|
||||
4, ~0);
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
|
||||
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
||||
4, ~0);
|
||||
pp->irq_status[ctrl] = 0;
|
||||
}
|
||||
|
||||
/* Setup RC BARs */
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
|
||||
@ -699,6 +710,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
dev_dbg(pci->dev, "iATU unroll: %s\n",
|
||||
pci->iatu_unroll_enabled ? "enabled" : "disabled");
|
||||
|
||||
if (pci->iatu_unroll_enabled && !pci->atu_base)
|
||||
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
|
||||
|
||||
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
|
||||
PCIE_ATU_TYPE_MEM, pp->mem_base,
|
||||
pp->mem_bus_addr, pp->mem_size);
|
||||
|
@ -93,7 +93,7 @@ static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
|
||||
|
||||
return dw_pcie_readl_dbi(pci, offset + reg);
|
||||
return dw_pcie_readl_atu(pci, offset + reg);
|
||||
}
|
||||
|
||||
static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
@ -101,7 +101,7 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
|
||||
|
||||
dw_pcie_writel_dbi(pci, offset + reg, val);
|
||||
dw_pcie_writel_atu(pci, offset + reg, val);
|
||||
}
|
||||
|
||||
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
|
||||
@ -187,7 +187,7 @@ static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
|
||||
|
||||
return dw_pcie_readl_dbi(pci, offset + reg);
|
||||
return dw_pcie_readl_atu(pci, offset + reg);
|
||||
}
|
||||
|
||||
static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
@ -195,7 +195,7 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
|
||||
|
||||
dw_pcie_writel_dbi(pci, offset + reg, val);
|
||||
dw_pcie_writel_atu(pci, offset + reg, val);
|
||||
}
|
||||
|
||||
static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
|
||||
|
@ -92,12 +92,20 @@
|
||||
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
|
||||
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
|
||||
|
||||
/* Register address builder */
|
||||
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
|
||||
((0x3 << 20) | ((region) << 9))
|
||||
/*
|
||||
* The default address offset between dbi_base and atu_base. Root controller
|
||||
* drivers are not required to initialize atu_base if the offset matches this
|
||||
* default; the driver core automatically derives atu_base from dbi_base using
|
||||
* this offset, if atu_base not set.
|
||||
*/
|
||||
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
|
||||
|
||||
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
|
||||
((0x3 << 20) | ((region) << 9) | (0x1 << 8))
|
||||
/* Register address builder */
|
||||
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
|
||||
((region) << 9)
|
||||
|
||||
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
|
||||
(((region) << 9) | (0x1 << 8))
|
||||
|
||||
#define MAX_MSI_IRQS 256
|
||||
#define MAX_MSI_IRQS_PER_CTRL 32
|
||||
@ -219,6 +227,8 @@ struct dw_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *dbi_base;
|
||||
void __iomem *dbi_base2;
|
||||
/* Used when iatu_unroll_enabled is true */
|
||||
void __iomem *atu_base;
|
||||
u32 num_viewport;
|
||||
u8 iatu_unroll_enabled;
|
||||
struct pcie_port pp;
|
||||
@ -289,6 +299,16 @@ static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
|
||||
return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
|
||||
}
|
||||
|
||||
static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
|
||||
{
|
||||
__dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val);
|
||||
}
|
||||
|
||||
static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
|
||||
{
|
||||
return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4);
|
||||
}
|
||||
|
||||
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
|
||||
{
|
||||
u32 reg;
|
||||
|
@ -202,7 +202,7 @@ static int histb_pcie_host_init(struct pcie_port *pp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dw_pcie_host_ops histb_pcie_host_ops = {
|
||||
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
|
||||
.rd_own_conf = histb_pcie_rd_own_conf,
|
||||
.wr_own_conf = histb_pcie_wr_own_conf,
|
||||
.host_init = histb_pcie_host_init,
|
||||
|
471
drivers/pci/controller/dwc/pcie-uniphier.c
Normal file
471
drivers/pci/controller/dwc/pcie-uniphier.c
Normal file
@ -0,0 +1,471 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for UniPhier SoCs
|
||||
* Copyright 2018 Socionext Inc.
|
||||
* Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
|
||||
#define PCL_PINCTRL0 0x002c
|
||||
#define PCL_PERST_PLDN_REGEN BIT(12)
|
||||
#define PCL_PERST_NOE_REGEN BIT(11)
|
||||
#define PCL_PERST_OUT_REGEN BIT(8)
|
||||
#define PCL_PERST_PLDN_REGVAL BIT(4)
|
||||
#define PCL_PERST_NOE_REGVAL BIT(3)
|
||||
#define PCL_PERST_OUT_REGVAL BIT(0)
|
||||
|
||||
#define PCL_PIPEMON 0x0044
|
||||
#define PCL_PCLK_ALIVE BIT(15)
|
||||
|
||||
#define PCL_APP_READY_CTRL 0x8008
|
||||
#define PCL_APP_LTSSM_ENABLE BIT(0)
|
||||
|
||||
#define PCL_APP_PM0 0x8078
|
||||
#define PCL_SYS_AUX_PWR_DET BIT(8)
|
||||
|
||||
#define PCL_RCV_INT 0x8108
|
||||
#define PCL_RCV_INT_ALL_ENABLE GENMASK(20, 17)
|
||||
#define PCL_CFG_BW_MGT_STATUS BIT(4)
|
||||
#define PCL_CFG_LINK_AUTO_BW_STATUS BIT(3)
|
||||
#define PCL_CFG_AER_RC_ERR_MSI_STATUS BIT(2)
|
||||
#define PCL_CFG_PME_MSI_STATUS BIT(1)
|
||||
|
||||
#define PCL_RCV_INTX 0x810c
|
||||
#define PCL_RCV_INTX_ALL_ENABLE GENMASK(19, 16)
|
||||
#define PCL_RCV_INTX_ALL_MASK GENMASK(11, 8)
|
||||
#define PCL_RCV_INTX_MASK_SHIFT 8
|
||||
#define PCL_RCV_INTX_ALL_STATUS GENMASK(3, 0)
|
||||
#define PCL_RCV_INTX_STATUS_SHIFT 0
|
||||
|
||||
#define PCL_STATUS_LINK 0x8140
|
||||
#define PCL_RDLH_LINK_UP BIT(1)
|
||||
#define PCL_XMLH_LINK_UP BIT(0)
|
||||
|
||||
struct uniphier_pcie_priv {
|
||||
void __iomem *base;
|
||||
struct dw_pcie pci;
|
||||
struct clk *clk;
|
||||
struct reset_control *rst;
|
||||
struct phy *phy;
|
||||
struct irq_domain *legacy_irq_domain;
|
||||
};
|
||||
|
||||
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
|
||||
|
||||
static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
|
||||
bool enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(priv->base + PCL_APP_READY_CTRL);
|
||||
if (enable)
|
||||
val |= PCL_APP_LTSSM_ENABLE;
|
||||
else
|
||||
val &= ~PCL_APP_LTSSM_ENABLE;
|
||||
writel(val, priv->base + PCL_APP_READY_CTRL);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* use auxiliary power detection */
|
||||
val = readl(priv->base + PCL_APP_PM0);
|
||||
val |= PCL_SYS_AUX_PWR_DET;
|
||||
writel(val, priv->base + PCL_APP_PM0);
|
||||
|
||||
/* assert PERST# */
|
||||
val = readl(priv->base + PCL_PINCTRL0);
|
||||
val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
|
||||
| PCL_PERST_PLDN_REGVAL);
|
||||
val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
|
||||
| PCL_PERST_PLDN_REGEN;
|
||||
writel(val, priv->base + PCL_PINCTRL0);
|
||||
|
||||
uniphier_pcie_ltssm_enable(priv, false);
|
||||
|
||||
usleep_range(100000, 200000);
|
||||
|
||||
/* deassert PERST# */
|
||||
val = readl(priv->base + PCL_PINCTRL0);
|
||||
val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
|
||||
writel(val, priv->base + PCL_PINCTRL0);
|
||||
}
|
||||
|
||||
static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
/* wait PIPE clock */
|
||||
ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
|
||||
status & PCL_PCLK_ALIVE, 100000, 1000000);
|
||||
if (ret) {
|
||||
dev_err(priv->pci.dev,
|
||||
"Failed to initialize controller in RC mode\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_link_up(struct dw_pcie *pci)
|
||||
{
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
u32 val, mask;
|
||||
|
||||
val = readl(priv->base + PCL_STATUS_LINK);
|
||||
mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
|
||||
|
||||
return (val & mask) == mask;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_establish_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
|
||||
if (dw_pcie_link_up(pci))
|
||||
return 0;
|
||||
|
||||
uniphier_pcie_ltssm_enable(priv, true);
|
||||
|
||||
return dw_pcie_wait_for_link(pci);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
|
||||
uniphier_pcie_ltssm_enable(priv, false);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
|
||||
writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
writel(0, priv->base + PCL_RCV_INT);
|
||||
writel(0, priv->base + PCL_RCV_INTX);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_irq_ack(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
val = readl(priv->base + PCL_RCV_INTX);
|
||||
val &= ~PCL_RCV_INTX_ALL_STATUS;
|
||||
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
|
||||
writel(val, priv->base + PCL_RCV_INTX);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
val = readl(priv->base + PCL_RCV_INTX);
|
||||
val &= ~PCL_RCV_INTX_ALL_MASK;
|
||||
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
|
||||
writel(val, priv->base + PCL_RCV_INTX);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
val = readl(priv->base + PCL_RCV_INTX);
|
||||
val &= ~PCL_RCV_INTX_ALL_MASK;
|
||||
val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
|
||||
writel(val, priv->base + PCL_RCV_INTX);
|
||||
}
|
||||
|
||||
static struct irq_chip uniphier_pcie_irq_chip = {
|
||||
.name = "PCI",
|
||||
.irq_ack = uniphier_pcie_irq_ack,
|
||||
.irq_mask = uniphier_pcie_irq_mask,
|
||||
.irq_unmask = uniphier_pcie_irq_unmask,
|
||||
};
|
||||
|
||||
static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip,
|
||||
handle_level_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops uniphier_intx_domain_ops = {
|
||||
.map = uniphier_pcie_intx_map,
|
||||
};
|
||||
|
||||
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct pcie_port *pp = irq_desc_get_handler_data(desc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
unsigned long reg;
|
||||
u32 val, bit, virq;
|
||||
|
||||
/* INT for debug */
|
||||
val = readl(priv->base + PCL_RCV_INT);
|
||||
|
||||
if (val & PCL_CFG_BW_MGT_STATUS)
|
||||
dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
|
||||
if (val & PCL_CFG_LINK_AUTO_BW_STATUS)
|
||||
dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n");
|
||||
if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS)
|
||||
dev_dbg(pci->dev, "Root Error\n");
|
||||
if (val & PCL_CFG_PME_MSI_STATUS)
|
||||
dev_dbg(pci->dev, "PME Interrupt\n");
|
||||
|
||||
writel(val, priv->base + PCL_RCV_INT);
|
||||
|
||||
/* INTx */
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
val = readl(priv->base + PCL_RCV_INTX);
|
||||
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
|
||||
|
||||
for_each_set_bit(bit, ®, PCI_NUM_INTX) {
|
||||
virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
struct device_node *np = pci->dev->of_node;
|
||||
struct device_node *np_intc;
|
||||
|
||||
np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
|
||||
if (!np_intc) {
|
||||
dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pp->irq = irq_of_parse_and_map(np_intc, 0);
|
||||
if (!pp->irq) {
|
||||
dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
|
||||
&uniphier_intx_domain_ops, pp);
|
||||
if (!priv->legacy_irq_domain) {
|
||||
dev_err(pci->dev, "Failed to get INTx domain\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
|
||||
pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_host_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
|
||||
int ret;
|
||||
|
||||
ret = uniphier_pcie_config_legacy_irq(pp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
uniphier_pcie_irq_enable(priv);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
ret = uniphier_pcie_establish_link(pci);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
|
||||
.host_init = uniphier_pcie_host_init,
|
||||
};
|
||||
|
||||
static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &priv->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
pp->ops = &uniphier_pcie_host_ops;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize host (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = reset_control_deassert(priv->rst);
|
||||
if (ret)
|
||||
goto out_clk_disable;
|
||||
|
||||
uniphier_pcie_init_rc(priv);
|
||||
|
||||
ret = phy_init(priv->phy);
|
||||
if (ret)
|
||||
goto out_rst_assert;
|
||||
|
||||
ret = uniphier_pcie_wait_rc(priv);
|
||||
if (ret)
|
||||
goto out_phy_exit;
|
||||
|
||||
return 0;
|
||||
|
||||
out_phy_exit:
|
||||
phy_exit(priv->phy);
|
||||
out_rst_assert:
|
||||
reset_control_assert(priv->rst);
|
||||
out_clk_disable:
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
uniphier_pcie_irq_disable(priv);
|
||||
phy_exit(priv->phy);
|
||||
reset_control_assert(priv->rst);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.start_link = uniphier_pcie_establish_link,
|
||||
.stop_link = uniphier_pcie_stop_link,
|
||||
.link_up = uniphier_pcie_link_up,
|
||||
};
|
||||
|
||||
static int uniphier_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct uniphier_pcie_priv *priv;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->pci.dev = dev;
|
||||
priv->pci.ops = &dw_pcie_ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(priv->pci.dbi_base))
|
||||
return PTR_ERR(priv->pci.dbi_base);
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
|
||||
priv->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(priv->base))
|
||||
return PTR_ERR(priv->base);
|
||||
|
||||
priv->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(priv->clk))
|
||||
return PTR_ERR(priv->clk);
|
||||
|
||||
priv->rst = devm_reset_control_get_shared(dev, NULL);
|
||||
if (IS_ERR(priv->rst))
|
||||
return PTR_ERR(priv->rst);
|
||||
|
||||
priv->phy = devm_phy_optional_get(dev, "pcie-phy");
|
||||
if (IS_ERR(priv->phy))
|
||||
return PTR_ERR(priv->phy);
|
||||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
ret = uniphier_pcie_host_enable(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return uniphier_add_pcie_port(priv, pdev);
|
||||
}
|
||||
|
||||
static int uniphier_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
|
||||
|
||||
uniphier_pcie_host_disable(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id uniphier_pcie_match[] = {
|
||||
{ .compatible = "socionext,uniphier-pcie", },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
|
||||
|
||||
static struct platform_driver uniphier_pcie_driver = {
|
||||
.probe = uniphier_pcie_probe,
|
||||
.remove = uniphier_pcie_remove,
|
||||
.driver = {
|
||||
.name = "uniphier-pcie",
|
||||
.of_match_table = uniphier_pcie_match,
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(uniphier_pcie_driver);
|
||||
|
||||
MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
|
||||
MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -161,7 +161,6 @@ struct mtk_pcie_soc {
|
||||
* @obff_ck: pointer to OBFF functional block operating clock
|
||||
* @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
|
||||
* @phy: pointer to PHY control block
|
||||
* @lane: lane count
|
||||
* @slot: port slot
|
||||
* @irq: GIC irq
|
||||
* @irq_domain: legacy INTx IRQ domain
|
||||
@ -182,7 +181,6 @@ struct mtk_pcie_port {
|
||||
struct clk *obff_ck;
|
||||
struct clk *pipe_ck;
|
||||
struct phy *phy;
|
||||
u32 lane;
|
||||
u32 slot;
|
||||
int irq;
|
||||
struct irq_domain *irq_domain;
|
||||
@ -197,29 +195,20 @@ struct mtk_pcie_port {
|
||||
* @dev: pointer to PCIe device
|
||||
* @base: IO mapped register base
|
||||
* @free_ck: free-run reference clock
|
||||
* @io: IO resource
|
||||
* @pio: PIO resource
|
||||
* @mem: non-prefetchable memory resource
|
||||
* @busn: bus range
|
||||
* @offset: IO / Memory offset
|
||||
* @ports: pointer to PCIe port information
|
||||
* @soc: pointer to SoC-dependent operations
|
||||
* @busnr: root bus number
|
||||
*/
|
||||
struct mtk_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct clk *free_ck;
|
||||
|
||||
struct resource io;
|
||||
struct resource pio;
|
||||
struct resource mem;
|
||||
struct resource busn;
|
||||
struct {
|
||||
resource_size_t mem;
|
||||
resource_size_t io;
|
||||
} offset;
|
||||
struct list_head ports;
|
||||
const struct mtk_pcie_soc *soc;
|
||||
unsigned int busnr;
|
||||
};
|
||||
|
||||
static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
|
||||
@ -904,12 +893,6 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
|
||||
if (!port)
|
||||
return -ENOMEM;
|
||||
|
||||
err = of_property_read_u32(node, "num-lanes", &port->lane);
|
||||
if (err) {
|
||||
dev_err(dev, "missing num-lanes property\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "port%d", slot);
|
||||
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
|
||||
port->base = devm_ioremap_resource(dev, regs);
|
||||
@ -1045,55 +1028,43 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
|
||||
{
|
||||
struct device *dev = pcie->dev;
|
||||
struct device_node *node = dev->of_node, *child;
|
||||
struct of_pci_range_parser parser;
|
||||
struct of_pci_range range;
|
||||
struct resource res;
|
||||
struct mtk_pcie_port *port, *tmp;
|
||||
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
|
||||
struct list_head *windows = &host->windows;
|
||||
struct resource_entry *win, *tmp_win;
|
||||
resource_size_t io_base;
|
||||
int err;
|
||||
|
||||
if (of_pci_range_parser_init(&parser, node)) {
|
||||
dev_err(dev, "missing \"ranges\" property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
|
||||
windows, &io_base);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
err = of_pci_range_to_resource(&range, node, &res);
|
||||
if (err < 0)
|
||||
return err;
|
||||
err = devm_request_pci_bus_resources(dev, windows);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
switch (res.flags & IORESOURCE_TYPE_BITS) {
|
||||
/* Get the I/O and memory ranges from DT */
|
||||
resource_list_for_each_entry_safe(win, tmp_win, windows) {
|
||||
switch (resource_type(win->res)) {
|
||||
case IORESOURCE_IO:
|
||||
pcie->offset.io = res.start - range.pci_addr;
|
||||
|
||||
memcpy(&pcie->pio, &res, sizeof(res));
|
||||
pcie->pio.name = node->full_name;
|
||||
|
||||
pcie->io.start = range.cpu_addr;
|
||||
pcie->io.end = range.cpu_addr + range.size - 1;
|
||||
pcie->io.flags = IORESOURCE_MEM;
|
||||
pcie->io.name = "I/O";
|
||||
|
||||
memcpy(&res, &pcie->io, sizeof(res));
|
||||
err = devm_pci_remap_iospace(dev, win->res, io_base);
|
||||
if (err) {
|
||||
dev_warn(dev, "error %d: failed to map resource %pR\n",
|
||||
err, win->res);
|
||||
resource_list_destroy_entry(win);
|
||||
}
|
||||
break;
|
||||
|
||||
case IORESOURCE_MEM:
|
||||
pcie->offset.mem = res.start - range.pci_addr;
|
||||
|
||||
memcpy(&pcie->mem, &res, sizeof(res));
|
||||
memcpy(&pcie->mem, win->res, sizeof(*win->res));
|
||||
pcie->mem.name = "non-prefetchable";
|
||||
break;
|
||||
case IORESOURCE_BUS:
|
||||
pcie->busnr = win->res->start;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
err = of_pci_parse_bus_range(node, &pcie->busn);
|
||||
if (err < 0) {
|
||||
dev_err(dev, "failed to parse bus ranges property: %d\n", err);
|
||||
pcie->busn.name = node->name;
|
||||
pcie->busn.start = 0;
|
||||
pcie->busn.end = 0xff;
|
||||
pcie->busn.flags = IORESOURCE_BUS;
|
||||
}
|
||||
|
||||
for_each_available_child_of_node(node, child) {
|
||||
int slot;
|
||||
|
||||
@ -1125,28 +1096,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
|
||||
{
|
||||
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
|
||||
struct list_head *windows = &host->windows;
|
||||
struct device *dev = pcie->dev;
|
||||
int err;
|
||||
|
||||
pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
|
||||
pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
|
||||
pci_add_resource(windows, &pcie->busn);
|
||||
|
||||
err = devm_request_pci_bus_resources(dev, windows);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -1169,11 +1118,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mtk_pcie_request_resources(pcie);
|
||||
if (err)
|
||||
goto put_resources;
|
||||
|
||||
host->busnr = pcie->busn.start;
|
||||
host->busnr = pcie->busnr;
|
||||
host->dev.parent = pcie->dev;
|
||||
host->ops = pcie->soc->ops;
|
||||
host->map_irq = of_irq_parse_and_map_pci;
|
||||
|
@ -252,6 +252,27 @@ int __weak pcibios_sriov_disable(struct pci_dev *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
|
||||
{
|
||||
unsigned int i;
|
||||
int rc;
|
||||
|
||||
if (dev->no_vf_scan)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < num_vfs; i++) {
|
||||
rc = pci_iov_add_virtfn(dev, i);
|
||||
if (rc)
|
||||
goto failed;
|
||||
}
|
||||
return 0;
|
||||
failed:
|
||||
while (i--)
|
||||
pci_iov_remove_virtfn(dev, i);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
|
||||
{
|
||||
int rc;
|
||||
@ -337,21 +358,15 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
|
||||
msleep(100);
|
||||
pci_cfg_access_unlock(dev);
|
||||
|
||||
for (i = 0; i < initial; i++) {
|
||||
rc = pci_iov_add_virtfn(dev, i);
|
||||
if (rc)
|
||||
goto failed;
|
||||
}
|
||||
rc = sriov_add_vfs(dev, initial);
|
||||
if (rc)
|
||||
goto err_pcibios;
|
||||
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
|
||||
iov->num_VFs = nr_virtfn;
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
while (i--)
|
||||
pci_iov_remove_virtfn(dev, i);
|
||||
|
||||
err_pcibios:
|
||||
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
|
||||
pci_cfg_access_lock(dev);
|
||||
@ -368,17 +383,26 @@ err_pcibios:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void sriov_del_vfs(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_sriov *iov = dev->sriov;
|
||||
int i;
|
||||
|
||||
if (dev->no_vf_scan)
|
||||
return;
|
||||
|
||||
for (i = 0; i < iov->num_VFs; i++)
|
||||
pci_iov_remove_virtfn(dev, i);
|
||||
}
|
||||
|
||||
static void sriov_disable(struct pci_dev *dev)
|
||||
{
|
||||
int i;
|
||||
struct pci_sriov *iov = dev->sriov;
|
||||
|
||||
if (!iov->num_VFs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < iov->num_VFs; i++)
|
||||
pci_iov_remove_virtfn(dev, i);
|
||||
|
||||
sriov_del_vfs(dev);
|
||||
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
|
||||
pci_cfg_access_lock(dev);
|
||||
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
|
||||
|
@ -416,7 +416,7 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider,
|
||||
*
|
||||
* Returns -1 if any of the clients are not compatible (behind the same
|
||||
* root port as the provider), otherwise returns a positive number where
|
||||
* a lower number is the preferrable choice. (If there's one client
|
||||
* a lower number is the preferable choice. (If there's one client
|
||||
* that's the same as the provider it will return 0, which is best choice).
|
||||
*
|
||||
* For now, "compatible" means the provider and the clients are all behind
|
||||
@ -487,7 +487,7 @@ EXPORT_SYMBOL_GPL(pci_has_p2pmem);
|
||||
* @num_clients: number of client devices in the list
|
||||
*
|
||||
* If multiple devices are behind the same switch, the one "closest" to the
|
||||
* client devices in use will be chosen first. (So if one of the providers are
|
||||
* client devices in use will be chosen first. (So if one of the providers is
|
||||
* the same as one of the clients, that provider will be used ahead of any
|
||||
* other providers that are unrelated). If multiple providers are an equal
|
||||
* distance away, one will be chosen at random.
|
||||
@ -574,7 +574,7 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
|
||||
* pci_free_p2pmem - free peer-to-peer DMA memory
|
||||
* @pdev: the device the memory was allocated from
|
||||
* @addr: address of the memory that was allocated
|
||||
* @size: number of bytes that was allocated
|
||||
* @size: number of bytes that were allocated
|
||||
*/
|
||||
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
|
||||
{
|
||||
@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
|
||||
* @nents: the number of SG entries in the list
|
||||
* @length: number of bytes to allocate
|
||||
*
|
||||
* Returns 0 on success
|
||||
* Return: %NULL on error or &struct scatterlist pointer and @nents on success
|
||||
*/
|
||||
struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
|
||||
unsigned int *nents, u32 length)
|
||||
@ -667,7 +667,7 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
|
||||
*
|
||||
* Published memory can be used by other PCI device drivers for
|
||||
* peer-2-peer DMA operations. Non-published memory is reserved for
|
||||
* exlusive use of the device driver that registers the peer-to-peer
|
||||
* exclusive use of the device driver that registers the peer-to-peer
|
||||
* memory.
|
||||
*/
|
||||
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
|
||||
@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg);
|
||||
* @use_p2pdma: returns whether to enable p2pdma or not
|
||||
*
|
||||
* Parses an attribute value to decide whether to enable p2pdma.
|
||||
* The value can select a PCI device (using it's full BDF device
|
||||
* The value can select a PCI device (using its full BDF device
|
||||
* name) or a boolean (in any format strtobool() accepts). A false
|
||||
* value disables p2pdma, a true value expects the caller
|
||||
* to automatically find a compatible device and specifying a PCI device
|
||||
@ -778,7 +778,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
|
||||
* whether p2pdma is enabled
|
||||
* @page: contents of the stored value
|
||||
* @p2p_dev: the selected p2p device (NULL if no device is selected)
|
||||
* @use_p2pdma: whether p2pdme has been enabled
|
||||
* @use_p2pdma: whether p2pdma has been enabled
|
||||
*
|
||||
* Attributes that use pci_p2pdma_enable_store() should use this function
|
||||
* to show the value of the attribute.
|
||||
|
@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!pm || !pm->runtime_suspend)
|
||||
return -ENOSYS;
|
||||
|
||||
pci_dev->state_saved = false;
|
||||
error = pm->runtime_suspend(dev);
|
||||
if (error) {
|
||||
if (pm && pm->runtime_suspend) {
|
||||
error = pm->runtime_suspend(dev);
|
||||
/*
|
||||
* -EBUSY and -EAGAIN is used to request the runtime PM core
|
||||
* to schedule a new suspend, so log the event only with debug
|
||||
* log level.
|
||||
*/
|
||||
if (error == -EBUSY || error == -EAGAIN)
|
||||
if (error == -EBUSY || error == -EAGAIN) {
|
||||
dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
|
||||
pm->runtime_suspend, error);
|
||||
else
|
||||
return error;
|
||||
} else if (error) {
|
||||
dev_err(dev, "can't suspend (%pf returned %d)\n",
|
||||
pm->runtime_suspend, error);
|
||||
|
||||
return error;
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
pci_fixup_device(pci_fixup_suspend, pci_dev);
|
||||
|
||||
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
||||
if (pm && pm->runtime_suspend
|
||||
&& !pci_dev->state_saved && pci_dev->current_state != PCI_D0
|
||||
&& pci_dev->current_state != PCI_UNKNOWN) {
|
||||
WARN_ONCE(pci_dev->current_state != prev,
|
||||
"PCI PM: State of device not saved by %pF\n",
|
||||
@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
|
||||
|
||||
static int pci_pm_runtime_resume(struct device *dev)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev)
|
||||
if (!pci_dev->driver)
|
||||
return 0;
|
||||
|
||||
if (!pm || !pm->runtime_resume)
|
||||
return -ENOSYS;
|
||||
|
||||
pci_fixup_device(pci_fixup_resume_early, pci_dev);
|
||||
pci_enable_wake(pci_dev, PCI_D0, false);
|
||||
pci_fixup_device(pci_fixup_resume, pci_dev);
|
||||
|
||||
rc = pm->runtime_resume(dev);
|
||||
if (pm && pm->runtime_resume)
|
||||
rc = pm->runtime_resume(dev);
|
||||
|
||||
pci_dev->runtime_d3cold = false;
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
#ifndef DRIVERS_PCI_H
|
||||
#define DRIVERS_PCI_H
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define PCI_FIND_CAP_TTL 48
|
||||
|
||||
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
|
||||
|
@ -53,8 +53,6 @@ struct pcie_link_state {
|
||||
struct pcie_link_state *root; /* pointer to the root port link */
|
||||
struct pcie_link_state *parent; /* pointer to the parent Link state */
|
||||
struct list_head sibling; /* node in link_list */
|
||||
struct list_head children; /* list of child link states */
|
||||
struct list_head link; /* node in parent's children list */
|
||||
|
||||
/* ASPM state */
|
||||
u32 aspm_support:7; /* Supported ASPM state */
|
||||
@ -850,8 +848,6 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&link->sibling);
|
||||
INIT_LIST_HEAD(&link->children);
|
||||
INIT_LIST_HEAD(&link->link);
|
||||
link->pdev = pdev;
|
||||
link->downstream = pci_function_0(pdev->subordinate);
|
||||
|
||||
@ -877,7 +873,6 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
|
||||
|
||||
link->parent = parent;
|
||||
link->root = link->parent->root;
|
||||
list_add(&link->link, &parent->children);
|
||||
}
|
||||
|
||||
list_add(&link->sibling, &link_list);
|
||||
@ -1001,7 +996,6 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
|
||||
/* All functions are removed, so just disable ASPM for the link */
|
||||
pcie_config_aspm_link(link, 0);
|
||||
list_del(&link->sibling);
|
||||
list_del(&link->link);
|
||||
/* Clock PM is for endpoint device */
|
||||
free_link_state(link);
|
||||
|
||||
|
@ -71,19 +71,19 @@ static inline void *get_service_data(struct pcie_device *dev)
|
||||
|
||||
struct pcie_port_service_driver {
|
||||
const char *name;
|
||||
int (*probe) (struct pcie_device *dev);
|
||||
void (*remove) (struct pcie_device *dev);
|
||||
int (*suspend) (struct pcie_device *dev);
|
||||
int (*resume_noirq) (struct pcie_device *dev);
|
||||
int (*resume) (struct pcie_device *dev);
|
||||
int (*runtime_suspend) (struct pcie_device *dev);
|
||||
int (*runtime_resume) (struct pcie_device *dev);
|
||||
int (*probe)(struct pcie_device *dev);
|
||||
void (*remove)(struct pcie_device *dev);
|
||||
int (*suspend)(struct pcie_device *dev);
|
||||
int (*resume_noirq)(struct pcie_device *dev);
|
||||
int (*resume)(struct pcie_device *dev);
|
||||
int (*runtime_suspend)(struct pcie_device *dev);
|
||||
int (*runtime_resume)(struct pcie_device *dev);
|
||||
|
||||
/* Device driver may resume normal operations */
|
||||
void (*error_resume)(struct pci_dev *dev);
|
||||
|
||||
/* Link Reset Capability - AER service driver specific */
|
||||
pci_ers_result_t (*reset_link) (struct pci_dev *dev);
|
||||
pci_ers_result_t (*reset_link)(struct pci_dev *dev);
|
||||
|
||||
int port_type; /* Type of the port this driver can handle */
|
||||
u32 service; /* Port service this device represents */
|
||||
|
@ -618,6 +618,30 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
|
||||
quirk_amd_nl_class);
|
||||
|
||||
/*
|
||||
* Synopsys USB 3.x host HAPS platform has a class code of
|
||||
* PCI_CLASS_SERIAL_USB_XHCI, and xhci driver can claim it. However, these
|
||||
* devices should use dwc3-haps driver. Change these devices' class code to
|
||||
* PCI_CLASS_SERIAL_USB_DEVICE to prevent the xhci-pci driver from claiming
|
||||
* them.
|
||||
*/
|
||||
static void quirk_synopsys_haps(struct pci_dev *pdev)
|
||||
{
|
||||
u32 class = pdev->class;
|
||||
|
||||
switch (pdev->device) {
|
||||
case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
|
||||
case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
|
||||
case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
|
||||
pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
|
||||
pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
|
||||
class, pdev->class);
|
||||
break;
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
|
||||
quirk_synopsys_haps);
|
||||
|
||||
/*
|
||||
* Let's make the southbridge information explicit instead of having to
|
||||
* worry about people probing the ACPI areas, for example.. (Yes, it
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
|
||||
@ -25,6 +25,11 @@ static int max_devices = 16;
|
||||
module_param(max_devices, int, 0644);
|
||||
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
|
||||
|
||||
static bool use_dma_mrpc = 1;
|
||||
module_param(use_dma_mrpc, bool, 0644);
|
||||
MODULE_PARM_DESC(use_dma_mrpc,
|
||||
"Enable the use of the DMA MRPC feature");
|
||||
|
||||
static dev_t switchtec_devt;
|
||||
static DEFINE_IDA(switchtec_minor_ida);
|
||||
|
||||
@ -113,6 +118,19 @@ static void stuser_set_state(struct switchtec_user *stuser,
|
||||
|
||||
static void mrpc_complete_cmd(struct switchtec_dev *stdev);
|
||||
|
||||
static void flush_wc_buf(struct switchtec_dev *stdev)
|
||||
{
|
||||
struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
|
||||
|
||||
/*
|
||||
* odb (outbound doorbell) register is processed by low latency
|
||||
* hardware and w/o side effect
|
||||
*/
|
||||
mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
|
||||
SWITCHTEC_NTB_REG_DBMSG_OFFSET;
|
||||
ioread32(&mmio_dbmsg->odb);
|
||||
}
|
||||
|
||||
static void mrpc_cmd_submit(struct switchtec_dev *stdev)
|
||||
{
|
||||
/* requires the mrpc_mutex to already be held when called */
|
||||
@ -128,16 +146,18 @@ static void mrpc_cmd_submit(struct switchtec_dev *stdev)
|
||||
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
|
||||
list);
|
||||
|
||||
if (stdev->dma_mrpc) {
|
||||
stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
|
||||
memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
|
||||
}
|
||||
|
||||
stuser_set_state(stuser, MRPC_RUNNING);
|
||||
stdev->mrpc_busy = 1;
|
||||
memcpy_toio(&stdev->mmio_mrpc->input_data,
|
||||
stuser->data, stuser->data_len);
|
||||
flush_wc_buf(stdev);
|
||||
iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
|
||||
|
||||
stuser->status = ioread32(&stdev->mmio_mrpc->status);
|
||||
if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
|
||||
mrpc_complete_cmd(stdev);
|
||||
|
||||
schedule_delayed_work(&stdev->mrpc_timeout,
|
||||
msecs_to_jiffies(500));
|
||||
}
|
||||
@ -170,7 +190,11 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
|
||||
stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
|
||||
list);
|
||||
|
||||
stuser->status = ioread32(&stdev->mmio_mrpc->status);
|
||||
if (stdev->dma_mrpc)
|
||||
stuser->status = stdev->dma_mrpc->status;
|
||||
else
|
||||
stuser->status = ioread32(&stdev->mmio_mrpc->status);
|
||||
|
||||
if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
|
||||
return;
|
||||
|
||||
@ -180,13 +204,19 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
|
||||
if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
|
||||
goto out;
|
||||
|
||||
stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
|
||||
if (stdev->dma_mrpc)
|
||||
stuser->return_code = stdev->dma_mrpc->rtn_code;
|
||||
else
|
||||
stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
|
||||
if (stuser->return_code != 0)
|
||||
goto out;
|
||||
|
||||
memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
|
||||
stuser->read_len);
|
||||
|
||||
if (stdev->dma_mrpc)
|
||||
memcpy(stuser->data, &stdev->dma_mrpc->data,
|
||||
stuser->read_len);
|
||||
else
|
||||
memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
|
||||
stuser->read_len);
|
||||
out:
|
||||
complete_all(&stuser->comp);
|
||||
list_del_init(&stuser->list);
|
||||
@ -221,7 +251,10 @@ static void mrpc_timeout_work(struct work_struct *work)
|
||||
|
||||
mutex_lock(&stdev->mrpc_mutex);
|
||||
|
||||
status = ioread32(&stdev->mmio_mrpc->status);
|
||||
if (stdev->dma_mrpc)
|
||||
status = stdev->dma_mrpc->status;
|
||||
else
|
||||
status = ioread32(&stdev->mmio_mrpc->status);
|
||||
if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
|
||||
schedule_delayed_work(&stdev->mrpc_timeout,
|
||||
msecs_to_jiffies(500));
|
||||
@ -229,7 +262,6 @@ static void mrpc_timeout_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
mrpc_complete_cmd(stdev);
|
||||
|
||||
out:
|
||||
mutex_unlock(&stdev->mrpc_mutex);
|
||||
}
|
||||
@ -800,6 +832,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
|
||||
{
|
||||
int ret;
|
||||
int nr_idxs;
|
||||
unsigned int event_flags;
|
||||
struct switchtec_ioctl_event_ctl ctl;
|
||||
|
||||
if (copy_from_user(&ctl, uctl, sizeof(ctl)))
|
||||
@ -821,7 +854,9 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
event_flags = ctl.flags;
|
||||
for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
|
||||
ctl.flags = event_flags;
|
||||
ret = event_ctl(stdev, &ctl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1017,10 +1052,24 @@ static void enable_link_state_events(struct switchtec_dev *stdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void enable_dma_mrpc(struct switchtec_dev *stdev)
|
||||
{
|
||||
writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
|
||||
flush_wc_buf(stdev);
|
||||
iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
|
||||
}
|
||||
|
||||
static void stdev_release(struct device *dev)
|
||||
{
|
||||
struct switchtec_dev *stdev = to_stdev(dev);
|
||||
|
||||
if (stdev->dma_mrpc) {
|
||||
iowrite32(0, &stdev->mmio_mrpc->dma_en);
|
||||
flush_wc_buf(stdev);
|
||||
writeq(0, &stdev->mmio_mrpc->dma_addr);
|
||||
dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
|
||||
stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
|
||||
}
|
||||
kfree(stdev);
|
||||
}
|
||||
|
||||
@ -1176,10 +1225,27 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
|
||||
{
|
||||
struct switchtec_dev *stdev = dev;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
iowrite32(SWITCHTEC_EVENT_CLEAR |
|
||||
SWITCHTEC_EVENT_EN_IRQ,
|
||||
&stdev->mmio_part_cfg->mrpc_comp_hdr);
|
||||
schedule_work(&stdev->mrpc_work);
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int switchtec_init_isr(struct switchtec_dev *stdev)
|
||||
{
|
||||
int nvecs;
|
||||
int event_irq;
|
||||
int dma_mrpc_irq;
|
||||
int rc;
|
||||
|
||||
nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_MSI);
|
||||
@ -1194,9 +1260,29 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
|
||||
if (event_irq < 0)
|
||||
return event_irq;
|
||||
|
||||
return devm_request_irq(&stdev->pdev->dev, event_irq,
|
||||
rc = devm_request_irq(&stdev->pdev->dev, event_irq,
|
||||
switchtec_event_isr, 0,
|
||||
KBUILD_MODNAME, stdev);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (!stdev->dma_mrpc)
|
||||
return rc;
|
||||
|
||||
dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
|
||||
if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
|
||||
return -EFAULT;
|
||||
|
||||
dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
|
||||
if (dma_mrpc_irq < 0)
|
||||
return dma_mrpc_irq;
|
||||
|
||||
rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
|
||||
switchtec_dma_mrpc_isr, 0,
|
||||
KBUILD_MODNAME, stdev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void init_pff(struct switchtec_dev *stdev)
|
||||
@ -1232,19 +1318,38 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
|
||||
struct pci_dev *pdev)
|
||||
{
|
||||
int rc;
|
||||
void __iomem *map;
|
||||
unsigned long res_start, res_len;
|
||||
|
||||
rc = pcim_enable_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
|
||||
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
stdev->mmio = pcim_iomap_table(pdev)[0];
|
||||
stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
|
||||
res_start = pci_resource_start(pdev, 0);
|
||||
res_len = pci_resource_len(pdev, 0);
|
||||
|
||||
if (!devm_request_mem_region(&pdev->dev, res_start,
|
||||
res_len, KBUILD_MODNAME))
|
||||
return -EBUSY;
|
||||
|
||||
stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
|
||||
SWITCHTEC_GAS_TOP_CFG_OFFSET);
|
||||
if (!stdev->mmio_mrpc)
|
||||
return -ENOMEM;
|
||||
|
||||
map = devm_ioremap(&pdev->dev,
|
||||
res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
|
||||
res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
|
||||
stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
|
||||
stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
|
||||
stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
|
||||
@ -1262,6 +1367,19 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
|
||||
|
||||
pci_set_drvdata(pdev, stdev);
|
||||
|
||||
if (!use_dma_mrpc)
|
||||
return 0;
|
||||
|
||||
if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
|
||||
return 0;
|
||||
|
||||
stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev,
|
||||
sizeof(*stdev->dma_mrpc),
|
||||
&stdev->dma_mrpc_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (stdev->dma_mrpc == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1293,6 +1411,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
|
||||
&stdev->mmio_part_cfg->mrpc_comp_hdr);
|
||||
enable_link_state_events(stdev);
|
||||
|
||||
if (stdev->dma_mrpc)
|
||||
enable_dma_mrpc(stdev);
|
||||
|
||||
rc = cdev_device_add(&stdev->cdev, &stdev->dev);
|
||||
if (rc)
|
||||
goto err_devadd;
|
||||
@ -1318,7 +1439,6 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
|
||||
cdev_device_del(&stdev->cdev, &stdev->dev);
|
||||
ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
|
||||
dev_info(&stdev->dev, "unregistered.\n");
|
||||
|
||||
stdev_kill(stdev);
|
||||
put_device(&stdev->dev);
|
||||
}
|
||||
|
@ -15,10 +15,6 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/property.h>
|
||||
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
|
||||
|
||||
/**
|
||||
* struct dwc3_haps - Driver private structure
|
||||
* @dwc3: child dwc3 platform_device
|
||||
|
@ -440,6 +440,7 @@
|
||||
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
|
||||
|
||||
#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30)
|
||||
#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16)
|
||||
#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0)
|
||||
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
|
||||
|
||||
|
@ -413,6 +413,7 @@ struct pci_dev {
|
||||
unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
|
||||
unsigned int is_probed:1; /* Device probing in progress */
|
||||
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
|
||||
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
|
||||
pci_dev_flags_t dev_flags;
|
||||
atomic_t enable_cnt; /* pci_enable_device has been called */
|
||||
|
||||
@ -772,9 +773,9 @@ struct pci_driver {
|
||||
int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
|
||||
int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
|
||||
int (*resume_early)(struct pci_dev *dev);
|
||||
int (*resume) (struct pci_dev *dev); /* Device woken up */
|
||||
void (*shutdown) (struct pci_dev *dev);
|
||||
int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */
|
||||
int (*resume)(struct pci_dev *dev); /* Device woken up */
|
||||
void (*shutdown)(struct pci_dev *dev);
|
||||
int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
|
||||
const struct pci_error_handlers *err_handler;
|
||||
const struct attribute_group **groups;
|
||||
struct device_driver driver;
|
||||
|
@ -2361,6 +2361,9 @@
|
||||
#define PCI_DEVICE_ID_CENATEK_IDE 0x0001
|
||||
|
||||
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
|
||||
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
|
||||
|
||||
#define PCI_VENDOR_ID_USR 0x16ec
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
|
||||
#define SWITCHTEC_EVENT_FATAL BIT(4)
|
||||
|
||||
#define SWITCHTEC_DMA_MRPC_EN BIT(0)
|
||||
enum {
|
||||
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
|
||||
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
|
||||
@ -46,6 +47,10 @@ struct mrpc_regs {
|
||||
u32 cmd;
|
||||
u32 status;
|
||||
u32 ret_value;
|
||||
u32 dma_en;
|
||||
u64 dma_addr;
|
||||
u32 dma_vector;
|
||||
u32 dma_ver;
|
||||
} __packed;
|
||||
|
||||
enum mrpc_status {
|
||||
@ -342,6 +347,14 @@ struct pff_csr_regs {
|
||||
|
||||
struct switchtec_ntb;
|
||||
|
||||
struct dma_mrpc_output {
|
||||
u32 status;
|
||||
u32 cmd_id;
|
||||
u32 rtn_code;
|
||||
u32 output_size;
|
||||
u8 data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
|
||||
};
|
||||
|
||||
struct switchtec_dev {
|
||||
struct pci_dev *pdev;
|
||||
struct device dev;
|
||||
@ -381,6 +394,9 @@ struct switchtec_dev {
|
||||
u8 link_event_count[SWITCHTEC_MAX_PFF_CSR];
|
||||
|
||||
struct switchtec_ntb *sndev;
|
||||
|
||||
struct dma_mrpc_output *dma_mrpc;
|
||||
dma_addr_t dma_mrpc_dma_addr;
|
||||
};
|
||||
|
||||
static inline struct switchtec_dev *to_stdev(struct device *dev)
|
||||
|
Loading…
Reference in New Issue
Block a user