mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
Merge remote-tracking branch 'spi/for-5.15' into spi-next
This commit is contained in:
commit
6e9c846aa0
@ -19,7 +19,6 @@ properties:
|
||||
compatible:
|
||||
enum:
|
||||
- ibm,fsi2spi
|
||||
- ibm,fsi2spi-restricted
|
||||
|
||||
reg:
|
||||
items:
|
||||
|
@ -1,48 +0,0 @@
|
||||
OMAP2+ McSPI device
|
||||
|
||||
Required properties:
|
||||
- compatible :
|
||||
- "ti,am654-mcspi" for AM654.
|
||||
- "ti,omap2-mcspi" for OMAP2 & OMAP3.
|
||||
- "ti,omap4-mcspi" for OMAP4+.
|
||||
- ti,spi-num-cs : Number of chipselect supported by the instance.
|
||||
- ti,hwmods: Name of the hwmod associated to the McSPI
|
||||
- ti,pindir-d0-out-d1-in: Select the D0 pin as output and D1 as
|
||||
input. The default is D0 as input and
|
||||
D1 as output.
|
||||
|
||||
Optional properties:
|
||||
- dmas: List of DMA specifiers with the controller specific format
|
||||
as described in the generic DMA client binding. A tx and rx
|
||||
specifier is required for each chip select.
|
||||
- dma-names: List of DMA request names. These strings correspond
|
||||
1:1 with the DMA specifiers listed in dmas. The string naming
|
||||
is to be "rxN" and "txN" for RX and TX requests,
|
||||
respectively, where N equals the chip select number.
|
||||
|
||||
Examples:
|
||||
|
||||
[hwmod populated DMA resources]
|
||||
|
||||
mcspi1: mcspi@1 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "ti,omap4-mcspi";
|
||||
ti,hwmods = "mcspi1";
|
||||
ti,spi-num-cs = <4>;
|
||||
};
|
||||
|
||||
[generic DMA request binding]
|
||||
|
||||
mcspi1: mcspi@1 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "ti,omap4-mcspi";
|
||||
ti,hwmods = "mcspi1";
|
||||
ti,spi-num-cs = <2>;
|
||||
dmas = <&edma 42
|
||||
&edma 43
|
||||
&edma 44
|
||||
&edma 45>;
|
||||
dma-names = "tx0", "rx0", "tx1", "rx1";
|
||||
};
|
117
Documentation/devicetree/bindings/spi/omap-spi.yaml
Normal file
117
Documentation/devicetree/bindings/spi/omap-spi.yaml
Normal file
@ -0,0 +1,117 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/spi/omap-spi.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: SPI controller bindings for OMAP and K3 SoCs
|
||||
|
||||
maintainers:
|
||||
- Aswath Govindraju <a-govindraju@ti.com>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- ti,am654-mcspi
|
||||
- ti,am4372-mcspi
|
||||
- const: ti,omap4-mcspi
|
||||
- items:
|
||||
- enum:
|
||||
- ti,omap2-mcspi
|
||||
- ti,omap4-mcspi
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
ti,spi-num-cs:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description: Number of chipselect supported by the instance.
|
||||
minimum: 1
|
||||
maximum: 4
|
||||
|
||||
ti,hwmods:
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
description:
|
||||
Must be "mcspi<n>", n being the instance number (1-based).
|
||||
This property is applicable only on legacy platforms mainly omap2/3
|
||||
and ti81xx and should not be used on other platforms.
|
||||
deprecated: true
|
||||
|
||||
ti,pindir-d0-out-d1-in:
|
||||
description:
|
||||
Select the D0 pin as output and D1 as input. The default is D0
|
||||
as input and D1 as output.
|
||||
type: boolean
|
||||
|
||||
dmas:
|
||||
description:
|
||||
List of DMA specifiers with the controller specific format as
|
||||
described in the generic DMA client binding. A tx and rx
|
||||
specifier is required for each chip select.
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
dma-names:
|
||||
description:
|
||||
List of DMA request names. These strings correspond 1:1 with
|
||||
the DMA sepecifiers listed in dmas. The string names is to be
|
||||
"rxN" and "txN" for RX and TX requests, respectively. Where N
|
||||
is the chip select number.
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
if:
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- const: ti,omap2-mcspi
|
||||
- const: ti,omap4-mcspi
|
||||
|
||||
then:
|
||||
properties:
|
||||
ti,hwmods:
|
||||
items:
|
||||
- pattern: "^mcspi([1-9])$"
|
||||
|
||||
else:
|
||||
properties:
|
||||
ti,hwmods: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/soc/ti,sci_pm_domain.h>
|
||||
|
||||
spi@2100000 {
|
||||
compatible = "ti,am654-mcspi","ti,omap4-mcspi";
|
||||
reg = <0x2100000 0x400>;
|
||||
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&k3_clks 137 1>;
|
||||
power-domains = <&k3_pds 137 TI_SCI_PD_EXCLUSIVE>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
|
||||
dma-names = "tx0", "rx0";
|
||||
};
|
91
Documentation/devicetree/bindings/spi/rockchip-sfc.yaml
Normal file
91
Documentation/devicetree/bindings/spi/rockchip-sfc.yaml
Normal file
@ -0,0 +1,91 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/spi/rockchip-sfc.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Rockchip Serial Flash Controller (SFC)
|
||||
|
||||
maintainers:
|
||||
- Heiko Stuebner <heiko@sntech.de>
|
||||
- Chris Morgan <macromorgan@hotmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: rockchip,sfc
|
||||
description:
|
||||
The rockchip sfc controller is a standalone IP with version register,
|
||||
and the driver can handle all the feature difference inside the IP
|
||||
depending on the version register.
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Bus Clock
|
||||
- description: Module Clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: clk_sfc
|
||||
- const: hclk_sfc
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
rockchip,sfc-no-dma:
|
||||
description: Disable DMA and utilize FIFO mode only
|
||||
type: boolean
|
||||
|
||||
patternProperties:
|
||||
"^flash@[0-3]$":
|
||||
type: object
|
||||
properties:
|
||||
reg:
|
||||
minimum: 0
|
||||
maximum: 3
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/px30-cru.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/px30-power.h>
|
||||
|
||||
sfc: spi@ff3a0000 {
|
||||
compatible = "rockchip,sfc";
|
||||
reg = <0xff3a0000 0x4000>;
|
||||
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
|
||||
clock-names = "clk_sfc", "hclk_sfc";
|
||||
pinctrl-0 = <&sfc_clk &sfc_cs &sfc_bus2>;
|
||||
pinctrl-names = "default";
|
||||
power-domains = <&power PX30_PD_MMC_NAND>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
flash@0 {
|
||||
compatible = "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <108000000>;
|
||||
spi-rx-bus-width = <2>;
|
||||
spi-tx-bus-width = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -11,6 +11,7 @@ Required properties:
|
||||
- mediatek,mt8135-spi: for mt8135 platforms
|
||||
- mediatek,mt8173-spi: for mt8173 platforms
|
||||
- mediatek,mt8183-spi: for mt8183 platforms
|
||||
- mediatek,mt6893-spi: for mt6893 platforms
|
||||
- "mediatek,mt8192-spi", "mediatek,mt6765-spi": for mt8192 platforms
|
||||
- "mediatek,mt8195-spi", "mediatek,mt6765-spi": for mt8195 platforms
|
||||
- "mediatek,mt8516-spi", "mediatek,mt2712-spi": for mt8516 platforms
|
||||
|
@ -1,63 +0,0 @@
|
||||
Spreadtrum ADI controller
|
||||
|
||||
ADI is the abbreviation of Anolog-Digital interface, which is used to access
|
||||
analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
|
||||
framework for its hardware implementation is alike to SPI bus and its timing
|
||||
is compatile to SPI timing.
|
||||
|
||||
ADI controller has 50 channels including 2 software read/write channels and
|
||||
48 hardware channels to access analog chip. For 2 software read/write channels,
|
||||
users should set ADI registers to access analog chip. For hardware channels,
|
||||
we can configure them to allow other hardware components to use it independently,
|
||||
which means we can just link one analog chip address to one hardware channel,
|
||||
then users can access the mapped analog chip address by this hardware channel
|
||||
triggered by hardware components instead of ADI software channels.
|
||||
|
||||
Thus we introduce one property named "sprd,hw-channels" to configure hardware
|
||||
channels, the first value specifies the hardware channel id which is used to
|
||||
transfer data triggered by hardware automatically, and the second value specifies
|
||||
the analog chip address where user want to access by hardware components.
|
||||
|
||||
Since we have multi-subsystems will use unique ADI to access analog chip, when
|
||||
one system is reading/writing data by ADI software channels, that should be under
|
||||
one hardware spinlock protection to prevent other systems from reading/writing
|
||||
data by ADI software channels at the same time, or two parallel routine of setting
|
||||
ADI registers will make ADI controller registers chaos to lead incorrect results.
|
||||
Then we need one hardware spinlock to synchronize between the multiple subsystems.
|
||||
|
||||
The new version ADI controller supplies multiple master channels for different
|
||||
subsystem accessing, that means no need to add hardware spinlock to synchronize,
|
||||
thus change the hardware spinlock support to be optional to keep backward
|
||||
compatibility.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "sprd,sc9860-adi".
|
||||
- reg: Offset and length of ADI-SPI controller register space.
|
||||
- #address-cells: Number of cells required to define a chip select address
|
||||
on the ADI-SPI bus. Should be set to 1.
|
||||
- #size-cells: Size of cells required to define a chip select address size
|
||||
on the ADI-SPI bus. Should be set to 0.
|
||||
|
||||
Optional properties:
|
||||
- hwlocks: Reference to a phandle of a hwlock provider node.
|
||||
- hwlock-names: Reference to hwlock name strings defined in the same order
|
||||
as the hwlocks, should be "adi".
|
||||
- sprd,hw-channels: This is an array of channel values up to 49 channels.
|
||||
The first value specifies the hardware channel id which is used to
|
||||
transfer data triggered by hardware automatically, and the second
|
||||
value specifies the analog chip address where user want to access
|
||||
by hardware components.
|
||||
|
||||
SPI slave nodes must be children of the SPI controller node and can contain
|
||||
properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
|
||||
|
||||
Example:
|
||||
adi_bus: spi@40030000 {
|
||||
compatible = "sprd,sc9860-adi";
|
||||
reg = <0 0x40030000 0 0x10000>;
|
||||
hwlocks = <&hwlock1 0>;
|
||||
hwlock-names = "adi";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
sprd,hw-channels = <30 0x8c20>;
|
||||
};
|
104
Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml
Normal file
104
Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml
Normal file
@ -0,0 +1,104 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: "http://devicetree.org/schemas/spi/sprd,spi-adi.yaml#"
|
||||
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
|
||||
|
||||
title: Spreadtrum ADI controller
|
||||
|
||||
maintainers:
|
||||
- Orson Zhai <orsonzhai@gmail.com>
|
||||
- Baolin Wang <baolin.wang7@gmail.com>
|
||||
- Chunyan Zhang <zhang.lyra@gmail.com>
|
||||
|
||||
description: |
|
||||
ADI is the abbreviation of Anolog-Digital interface, which is used to access
|
||||
analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
|
||||
framework for its hardware implementation is alike to SPI bus and its timing
|
||||
is compatile to SPI timing.
|
||||
|
||||
ADI controller has 50 channels including 2 software read/write channels and
|
||||
48 hardware channels to access analog chip. For 2 software read/write channels,
|
||||
users should set ADI registers to access analog chip. For hardware channels,
|
||||
we can configure them to allow other hardware components to use it independently,
|
||||
which means we can just link one analog chip address to one hardware channel,
|
||||
then users can access the mapped analog chip address by this hardware channel
|
||||
triggered by hardware components instead of ADI software channels.
|
||||
|
||||
Thus we introduce one property named "sprd,hw-channels" to configure hardware
|
||||
channels, the first value specifies the hardware channel id which is used to
|
||||
transfer data triggered by hardware automatically, and the second value specifies
|
||||
the analog chip address where user want to access by hardware components.
|
||||
|
||||
Since we have multi-subsystems will use unique ADI to access analog chip, when
|
||||
one system is reading/writing data by ADI software channels, that should be under
|
||||
one hardware spinlock protection to prevent other systems from reading/writing
|
||||
data by ADI software channels at the same time, or two parallel routine of setting
|
||||
ADI registers will make ADI controller registers chaos to lead incorrect results.
|
||||
Then we need one hardware spinlock to synchronize between the multiple subsystems.
|
||||
|
||||
The new version ADI controller supplies multiple master channels for different
|
||||
subsystem accessing, that means no need to add hardware spinlock to synchronize,
|
||||
thus change the hardware spinlock support to be optional to keep backward
|
||||
compatibility.
|
||||
|
||||
allOf:
|
||||
- $ref: /spi/spi-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- sprd,sc9860-adi
|
||||
- sprd,sc9863-adi
|
||||
- sprd,ums512-adi
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
hwlocks:
|
||||
maxItems: 1
|
||||
|
||||
hwlock-names:
|
||||
const: adi
|
||||
|
||||
sprd,hw-channels:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-matrix
|
||||
description: A list of hardware channels
|
||||
minItems: 1
|
||||
maxItems: 48
|
||||
items:
|
||||
items:
|
||||
- description: The hardware channel id which is used to transfer data
|
||||
triggered by hardware automatically, channel id 0-1 are for software
|
||||
use, 2-49 are hardware channels.
|
||||
minimum: 2
|
||||
maximum: 49
|
||||
- description: The analog chip address where user want to access by
|
||||
hardware components.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- '#address-cells'
|
||||
- '#size-cells'
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
aon {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
adi_bus: spi@40030000 {
|
||||
compatible = "sprd,sc9860-adi";
|
||||
reg = <0 0x40030000 0 0x10000>;
|
||||
hwlocks = <&hwlock1 0>;
|
||||
hwlock-names = "adi";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
sprd,hw-channels = <30 0x8c20>;
|
||||
};
|
||||
};
|
||||
...
|
@ -658,6 +658,18 @@ config SPI_ROCKCHIP
|
||||
The main usecase of this controller is to use spi flash as boot
|
||||
device.
|
||||
|
||||
config SPI_ROCKCHIP_SFC
|
||||
tristate "Rockchip Serial Flash Controller (SFC)"
|
||||
depends on ARCH_ROCKCHIP || COMPILE_TEST
|
||||
depends on HAS_IOMEM && HAS_DMA
|
||||
help
|
||||
This enables support for Rockchip serial flash controller. This
|
||||
is a specialized controller used to access SPI flash on some
|
||||
Rockchip SOCs.
|
||||
|
||||
ROCKCHIP SFC supports DMA and PIO modes. When DMA is not available,
|
||||
the driver automatically falls back to PIO mode.
|
||||
|
||||
config SPI_RB4XX
|
||||
tristate "Mikrotik RB4XX SPI master"
|
||||
depends on SPI_MASTER && ATH79
|
||||
|
@ -95,6 +95,7 @@ obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
|
||||
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
|
||||
obj-$(CONFIG_SPI_QUP) += spi-qup.o
|
||||
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
|
||||
obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
|
||||
obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
|
||||
obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o
|
||||
obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
|
||||
|
@ -143,12 +143,12 @@ static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
|
||||
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned int reg)
|
||||
{
|
||||
return readl(bs->regs + reg);
|
||||
}
|
||||
|
||||
static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
|
||||
static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned int reg,
|
||||
u32 val)
|
||||
{
|
||||
writel(val, bs->regs + reg);
|
||||
|
@ -550,7 +550,7 @@ static int ep93xx_spi_prepare_hardware(struct spi_master *master)
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = clk_enable(espi->clk);
|
||||
ret = clk_prepare_enable(espi->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -570,7 +570,7 @@ static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
|
||||
val &= ~SSPCR1_SSE;
|
||||
writel(val, espi->mmio + SSPCR1);
|
||||
|
||||
clk_disable(espi->clk);
|
||||
clk_disable_unprepare(espi->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -25,16 +25,11 @@
|
||||
|
||||
#define SPI_FSI_BASE 0x70000
|
||||
#define SPI_FSI_INIT_TIMEOUT_MS 1000
|
||||
#define SPI_FSI_MAX_XFR_SIZE 2048
|
||||
#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 8
|
||||
#define SPI_FSI_MAX_RX_SIZE 8
|
||||
#define SPI_FSI_MAX_TX_SIZE 40
|
||||
|
||||
#define SPI_FSI_ERROR 0x0
|
||||
#define SPI_FSI_COUNTER_CFG 0x1
|
||||
#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
|
||||
#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
|
||||
#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
|
||||
#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
|
||||
#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
|
||||
#define SPI_FSI_CFG1 0x2
|
||||
#define SPI_FSI_CLOCK_CFG 0x3
|
||||
#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
|
||||
@ -76,8 +71,6 @@ struct fsi_spi {
|
||||
struct device *dev; /* SPI controller device */
|
||||
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
|
||||
u32 base;
|
||||
size_t max_xfr_size;
|
||||
bool restricted;
|
||||
};
|
||||
|
||||
struct fsi_spi_sequence {
|
||||
@ -241,7 +234,7 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
|
||||
return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
|
||||
}
|
||||
|
||||
static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
|
||||
static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
|
||||
{
|
||||
/*
|
||||
* Add the next byte of instruction to the 8-byte sequence register.
|
||||
@ -251,8 +244,6 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
|
||||
*/
|
||||
seq->data |= (u64)val << seq->bit;
|
||||
seq->bit -= 8;
|
||||
|
||||
return ((64 - seq->bit) / 8) - 2;
|
||||
}
|
||||
|
||||
static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
|
||||
@ -261,71 +252,11 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
|
||||
seq->data = 0ULL;
|
||||
}
|
||||
|
||||
static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
|
||||
struct fsi_spi_sequence *seq,
|
||||
struct spi_transfer *transfer)
|
||||
{
|
||||
int loops;
|
||||
int idx;
|
||||
int rc;
|
||||
u8 val = 0;
|
||||
u8 len = min(transfer->len, 8U);
|
||||
u8 rem = transfer->len % len;
|
||||
|
||||
loops = transfer->len / len;
|
||||
|
||||
if (transfer->tx_buf) {
|
||||
val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
|
||||
idx = fsi_spi_sequence_add(seq, val);
|
||||
|
||||
if (rem)
|
||||
rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
|
||||
} else if (transfer->rx_buf) {
|
||||
val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
|
||||
idx = fsi_spi_sequence_add(seq, val);
|
||||
|
||||
if (rem)
|
||||
rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->restricted && loops > 1) {
|
||||
dev_warn(ctx->dev,
|
||||
"Transfer too large; no branches permitted.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (loops > 1) {
|
||||
u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
|
||||
|
||||
fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
|
||||
|
||||
if (transfer->rx_buf)
|
||||
cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
|
||||
SPI_FSI_COUNTER_CFG_N2_TX |
|
||||
SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
|
||||
SPI_FSI_COUNTER_CFG_N2_RELOAD;
|
||||
|
||||
rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else {
|
||||
fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
|
||||
}
|
||||
|
||||
if (rem)
|
||||
fsi_spi_sequence_add(seq, rem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsi_spi_transfer_data(struct fsi_spi *ctx,
|
||||
struct spi_transfer *transfer)
|
||||
{
|
||||
int rc = 0;
|
||||
u64 status = 0ULL;
|
||||
u64 cfg = 0ULL;
|
||||
|
||||
if (transfer->tx_buf) {
|
||||
int nb;
|
||||
@ -363,16 +294,6 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
|
||||
u64 in = 0ULL;
|
||||
u8 *rx = transfer->rx_buf;
|
||||
|
||||
rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
|
||||
rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (transfer->len > recv) {
|
||||
do {
|
||||
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
|
||||
@ -439,6 +360,10 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
|
||||
}
|
||||
} while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
|
||||
|
||||
rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -459,6 +384,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
||||
{
|
||||
int rc;
|
||||
u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
|
||||
unsigned int len;
|
||||
struct spi_transfer *transfer;
|
||||
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
|
||||
|
||||
@ -471,8 +397,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
||||
struct spi_transfer *next = NULL;
|
||||
|
||||
/* Sequencer must do shift out (tx) first. */
|
||||
if (!transfer->tx_buf ||
|
||||
transfer->len > (ctx->max_xfr_size + 8)) {
|
||||
if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -486,9 +411,13 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
||||
fsi_spi_sequence_init(&seq);
|
||||
fsi_spi_sequence_add(&seq, seq_slave);
|
||||
|
||||
rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
|
||||
if (rc)
|
||||
goto error;
|
||||
len = transfer->len;
|
||||
while (len > 8) {
|
||||
fsi_spi_sequence_add(&seq,
|
||||
SPI_FSI_SEQUENCE_SHIFT_OUT(8));
|
||||
len -= 8;
|
||||
}
|
||||
fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
|
||||
|
||||
if (!list_is_last(&transfer->transfer_list,
|
||||
&mesg->transfers)) {
|
||||
@ -496,7 +425,9 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
||||
|
||||
/* Sequencer can only do shift in (rx) after tx. */
|
||||
if (next->rx_buf) {
|
||||
if (next->len > ctx->max_xfr_size) {
|
||||
u8 shift;
|
||||
|
||||
if (next->len > SPI_FSI_MAX_RX_SIZE) {
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@ -504,10 +435,8 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
|
||||
dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
|
||||
next->len);
|
||||
|
||||
rc = fsi_spi_sequence_transfer(ctx, &seq,
|
||||
next);
|
||||
if (rc)
|
||||
goto error;
|
||||
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
|
||||
fsi_spi_sequence_add(&seq, shift);
|
||||
} else {
|
||||
next = NULL;
|
||||
}
|
||||
@ -541,9 +470,7 @@ error:
|
||||
|
||||
static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
|
||||
{
|
||||
struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
|
||||
|
||||
return ctx->max_xfr_size;
|
||||
return SPI_FSI_MAX_RX_SIZE;
|
||||
}
|
||||
|
||||
static int fsi_spi_probe(struct device *dev)
|
||||
@ -582,14 +509,6 @@ static int fsi_spi_probe(struct device *dev)
|
||||
ctx->fsi = fsi;
|
||||
ctx->base = base + SPI_FSI_BASE;
|
||||
|
||||
if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
|
||||
ctx->restricted = true;
|
||||
ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
|
||||
} else {
|
||||
ctx->restricted = false;
|
||||
ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
|
||||
}
|
||||
|
||||
rc = devm_spi_register_controller(dev, ctlr);
|
||||
if (rc)
|
||||
spi_controller_put(ctlr);
|
||||
|
@ -530,6 +530,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
|
||||
goto err_rx_dma_buf;
|
||||
}
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
cfg.src_addr = phy_addr + SPI_POPR;
|
||||
cfg.dst_addr = phy_addr + SPI_PUSHR;
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
@ -549,12 +549,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
|
||||
*/
|
||||
spin_lock_irq(&mas->lock);
|
||||
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
|
||||
|
||||
/*
|
||||
* TX_WATERMARK_REG should be set after SPI configuration and
|
||||
* setting up GENI SE engine, as driver starts data transfer
|
||||
* for the watermark interrupt.
|
||||
*/
|
||||
if (m_cmd & SPI_TX_ONLY) {
|
||||
if (geni_spi_handle_tx(mas))
|
||||
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
|
@ -1052,12 +1052,8 @@ static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
|
||||
|
||||
static void spi_imx_push(struct spi_imx_data *spi_imx)
|
||||
{
|
||||
unsigned int burst_len, fifo_words;
|
||||
unsigned int burst_len;
|
||||
|
||||
if (spi_imx->dynamic_burst)
|
||||
fifo_words = 4;
|
||||
else
|
||||
fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
||||
/*
|
||||
* Reload the FIFO when the remaining bytes to be transferred in the
|
||||
* current burst is 0. This only applies when bits_per_word is a
|
||||
@ -1076,7 +1072,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
|
||||
|
||||
spi_imx->remainder = burst_len;
|
||||
} else {
|
||||
spi_imx->remainder = fifo_words;
|
||||
spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1084,8 +1080,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
|
||||
if (!spi_imx->count)
|
||||
break;
|
||||
if (spi_imx->dynamic_burst &&
|
||||
spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
|
||||
fifo_words))
|
||||
spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
|
||||
break;
|
||||
spi_imx->tx(spi_imx);
|
||||
spi_imx->txfifo++;
|
||||
@ -1195,6 +1190,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
|
||||
* dynamic_burst in that case.
|
||||
*/
|
||||
if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
|
||||
!(spi->mode & SPI_CS_WORD) &&
|
||||
(spi_imx->bits_per_word == 8 ||
|
||||
spi_imx->bits_per_word == 16 ||
|
||||
spi_imx->bits_per_word == 32)) {
|
||||
@ -1630,6 +1626,15 @@ static int spi_imx_probe(struct platform_device *pdev)
|
||||
is_imx53_ecspi(spi_imx))
|
||||
spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
|
||||
|
||||
if (is_imx51_ecspi(spi_imx) &&
|
||||
device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
|
||||
/*
|
||||
* When using HW-CS implementing SPI_CS_WORD can be done by just
|
||||
* setting the burst length to the word size. This is
|
||||
* considerably faster than manually controlling the CS.
|
||||
*/
|
||||
spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
|
||||
|
||||
spi_imx->spi_drctl = spi_drctl;
|
||||
|
||||
init_completion(&spi_imx->xfer_done);
|
||||
|
@ -42,8 +42,9 @@
|
||||
#define SPI_CFG1_CS_IDLE_OFFSET 0
|
||||
#define SPI_CFG1_PACKET_LOOP_OFFSET 8
|
||||
#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
|
||||
#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
|
||||
#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
|
||||
|
||||
#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
|
||||
#define SPI_CFG1_CS_IDLE_MASK 0xff
|
||||
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
|
||||
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
|
||||
@ -90,6 +91,8 @@ struct mtk_spi_compatible {
|
||||
bool enhance_timing;
|
||||
/* some IC support DMA addr extension */
|
||||
bool dma_ext;
|
||||
/* some IC no need unprepare SPI clk */
|
||||
bool no_need_unprepare;
|
||||
};
|
||||
|
||||
struct mtk_spi {
|
||||
@ -104,6 +107,7 @@ struct mtk_spi {
|
||||
struct scatterlist *tx_sgl, *rx_sgl;
|
||||
u32 tx_sgl_len, rx_sgl_len;
|
||||
const struct mtk_spi_compatible *dev_comp;
|
||||
u32 spi_clk_hz;
|
||||
};
|
||||
|
||||
static const struct mtk_spi_compatible mtk_common_compat;
|
||||
@ -135,12 +139,21 @@ static const struct mtk_spi_compatible mt8183_compat = {
|
||||
.enhance_timing = true,
|
||||
};
|
||||
|
||||
static const struct mtk_spi_compatible mt6893_compat = {
|
||||
.need_pad_sel = true,
|
||||
.must_tx = true,
|
||||
.enhance_timing = true,
|
||||
.dma_ext = true,
|
||||
.no_need_unprepare = true,
|
||||
};
|
||||
|
||||
/*
|
||||
* A piece of default chip info unless the platform
|
||||
* supplies it.
|
||||
*/
|
||||
static const struct mtk_chip_config mtk_default_chip_info = {
|
||||
.sample_sel = 0,
|
||||
.tick_delay = 0,
|
||||
};
|
||||
|
||||
static const struct of_device_id mtk_spi_of_match[] = {
|
||||
@ -174,6 +187,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
|
||||
{ .compatible = "mediatek,mt8192-spi",
|
||||
.data = (void *)&mt6765_compat,
|
||||
},
|
||||
{ .compatible = "mediatek,mt6893-spi",
|
||||
.data = (void *)&mt6893_compat,
|
||||
},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
|
||||
@ -192,6 +208,65 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
|
||||
writel(reg_val, mdata->base + SPI_CMD_REG);
|
||||
}
|
||||
|
||||
static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
|
||||
{
|
||||
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
|
||||
struct spi_delay *cs_setup = &spi->cs_setup;
|
||||
struct spi_delay *cs_hold = &spi->cs_hold;
|
||||
struct spi_delay *cs_inactive = &spi->cs_inactive;
|
||||
u32 setup, hold, inactive;
|
||||
u32 reg_val;
|
||||
int delay;
|
||||
|
||||
delay = spi_delay_to_ns(cs_setup, NULL);
|
||||
if (delay < 0)
|
||||
return delay;
|
||||
setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
|
||||
|
||||
delay = spi_delay_to_ns(cs_hold, NULL);
|
||||
if (delay < 0)
|
||||
return delay;
|
||||
hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
|
||||
|
||||
delay = spi_delay_to_ns(cs_inactive, NULL);
|
||||
if (delay < 0)
|
||||
return delay;
|
||||
inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
|
||||
|
||||
setup = setup ? setup : 1;
|
||||
hold = hold ? hold : 1;
|
||||
inactive = inactive ? inactive : 1;
|
||||
|
||||
reg_val = readl(mdata->base + SPI_CFG0_REG);
|
||||
if (mdata->dev_comp->enhance_timing) {
|
||||
hold = min_t(u32, hold, 0x10000);
|
||||
setup = min_t(u32, setup, 0x10000);
|
||||
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val |= (((hold - 1) & 0xffff)
|
||||
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
|
||||
reg_val |= (((setup - 1) & 0xffff)
|
||||
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
|
||||
} else {
|
||||
hold = min_t(u32, hold, 0x100);
|
||||
setup = min_t(u32, setup, 0x100);
|
||||
reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
|
||||
reg_val |= (((setup - 1) & 0xff)
|
||||
<< SPI_CFG0_CS_SETUP_OFFSET);
|
||||
}
|
||||
writel(reg_val, mdata->base + SPI_CFG0_REG);
|
||||
|
||||
inactive = min_t(u32, inactive, 0x100);
|
||||
reg_val = readl(mdata->base + SPI_CFG1_REG);
|
||||
reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
|
||||
reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
|
||||
writel(reg_val, mdata->base + SPI_CFG1_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_spi_prepare_message(struct spi_master *master,
|
||||
struct spi_message *msg)
|
||||
{
|
||||
@ -261,6 +336,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
|
||||
writel(mdata->pad_sel[spi->chip_select],
|
||||
mdata->base + SPI_PAD_SEL_REG);
|
||||
|
||||
/* tick delay */
|
||||
reg_val = readl(mdata->base + SPI_CFG1_REG);
|
||||
reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
|
||||
reg_val |= ((chip_config->tick_delay & 0x7)
|
||||
<< SPI_CFG1_GET_TICK_DLY_OFFSET);
|
||||
writel(reg_val, mdata->base + SPI_CFG1_REG);
|
||||
|
||||
/* set hw cs timing */
|
||||
mtk_spi_set_hw_cs_timing(spi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -287,12 +371,11 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
|
||||
static void mtk_spi_prepare_transfer(struct spi_master *master,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
u32 spi_clk_hz, div, sck_time, reg_val;
|
||||
u32 div, sck_time, reg_val;
|
||||
struct mtk_spi *mdata = spi_master_get_devdata(master);
|
||||
|
||||
spi_clk_hz = clk_get_rate(mdata->spi_clk);
|
||||
if (xfer->speed_hz < spi_clk_hz / 2)
|
||||
div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
|
||||
if (xfer->speed_hz < mdata->spi_clk_hz / 2)
|
||||
div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
|
||||
else
|
||||
div = 1;
|
||||
|
||||
@ -507,52 +590,6 @@ static bool mtk_spi_can_dma(struct spi_master *master,
|
||||
(unsigned long)xfer->rx_buf % 4 == 0);
|
||||
}
|
||||
|
||||
static int mtk_spi_set_hw_cs_timing(struct spi_device *spi,
|
||||
struct spi_delay *setup,
|
||||
struct spi_delay *hold,
|
||||
struct spi_delay *inactive)
|
||||
{
|
||||
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
|
||||
u16 setup_dly, hold_dly, inactive_dly;
|
||||
u32 reg_val;
|
||||
|
||||
if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
|
||||
(hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
|
||||
(inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
|
||||
dev_err(&spi->dev,
|
||||
"Invalid delay unit, should be SPI_DELAY_UNIT_SCK\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
setup_dly = setup ? setup->value : 1;
|
||||
hold_dly = hold ? hold->value : 1;
|
||||
inactive_dly = inactive ? inactive->value : 1;
|
||||
|
||||
reg_val = readl(mdata->base + SPI_CFG0_REG);
|
||||
if (mdata->dev_comp->enhance_timing) {
|
||||
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val |= (((hold_dly - 1) & 0xffff)
|
||||
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
|
||||
reg_val |= (((setup_dly - 1) & 0xffff)
|
||||
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
|
||||
} else {
|
||||
reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val |= (((hold_dly - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
|
||||
reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
|
||||
reg_val |= (((setup_dly - 1) & 0xff)
|
||||
<< SPI_CFG0_CS_SETUP_OFFSET);
|
||||
}
|
||||
writel(reg_val, mdata->base + SPI_CFG0_REG);
|
||||
|
||||
reg_val = readl(mdata->base + SPI_CFG1_REG);
|
||||
reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
|
||||
reg_val |= (((inactive_dly - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
|
||||
writel(reg_val, mdata->base + SPI_CFG1_REG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_spi_setup(struct spi_device *spi)
|
||||
{
|
||||
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
|
||||
@ -790,7 +827,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
|
||||
goto err_put_master;
|
||||
}
|
||||
|
||||
clk_disable_unprepare(mdata->spi_clk);
|
||||
mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
|
||||
|
||||
if (mdata->dev_comp->no_need_unprepare)
|
||||
clk_disable(mdata->spi_clk);
|
||||
else
|
||||
clk_disable_unprepare(mdata->spi_clk);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
@ -858,6 +900,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
|
||||
|
||||
mtk_spi_reset(mdata);
|
||||
|
||||
if (mdata->dev_comp->no_need_unprepare)
|
||||
clk_unprepare(mdata->spi_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -906,7 +951,10 @@ static int mtk_spi_runtime_suspend(struct device *dev)
|
||||
struct spi_master *master = dev_get_drvdata(dev);
|
||||
struct mtk_spi *mdata = spi_master_get_devdata(master);
|
||||
|
||||
clk_disable_unprepare(mdata->spi_clk);
|
||||
if (mdata->dev_comp->no_need_unprepare)
|
||||
clk_disable(mdata->spi_clk);
|
||||
else
|
||||
clk_disable_unprepare(mdata->spi_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -917,7 +965,10 @@ static int mtk_spi_runtime_resume(struct device *dev)
|
||||
struct mtk_spi *mdata = spi_master_get_devdata(master);
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(mdata->spi_clk);
|
||||
if (mdata->dev_comp->no_need_unprepare)
|
||||
ret = clk_enable(mdata->spi_clk);
|
||||
else
|
||||
ret = clk_prepare_enable(mdata->spi_clk);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
|
||||
return ret;
|
||||
|
@ -335,8 +335,10 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
|
||||
static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
|
||||
op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
|
||||
bool all_false;
|
||||
|
||||
if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
|
||||
op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
|
||||
return false;
|
||||
|
||||
if (op->data.nbytes && op->dummy.nbytes &&
|
||||
@ -346,7 +348,13 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
|
||||
if (op->addr.nbytes > 7)
|
||||
return false;
|
||||
|
||||
return spi_mem_default_supports_op(mem, op);
|
||||
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
|
||||
!op->data.dtr;
|
||||
|
||||
if (all_false)
|
||||
return spi_mem_default_supports_op(mem, op);
|
||||
else
|
||||
return spi_mem_dtr_supports_op(mem, op);
|
||||
}
|
||||
|
||||
static int mxic_spi_mem_exec_op(struct spi_mem *mem,
|
||||
@ -355,14 +363,15 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
|
||||
struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
|
||||
int nio = 1, i, ret;
|
||||
u32 ss_ctrl;
|
||||
u8 addr[8];
|
||||
u8 opcode = op->cmd.opcode;
|
||||
u8 addr[8], cmd[2];
|
||||
|
||||
ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
|
||||
if (mem->spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
|
||||
nio = 8;
|
||||
else if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
|
||||
nio = 4;
|
||||
else if (mem->spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
|
||||
nio = 2;
|
||||
@ -374,19 +383,26 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
|
||||
mxic->regs + HC_CFG);
|
||||
writel(HC_EN_BIT, mxic->regs + HC_EN);
|
||||
|
||||
ss_ctrl = OP_CMD_BYTES(1) | OP_CMD_BUSW(fls(op->cmd.buswidth) - 1);
|
||||
ss_ctrl = OP_CMD_BYTES(op->cmd.nbytes) |
|
||||
OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
|
||||
(op->cmd.dtr ? OP_CMD_DDR : 0);
|
||||
|
||||
if (op->addr.nbytes)
|
||||
ss_ctrl |= OP_ADDR_BYTES(op->addr.nbytes) |
|
||||
OP_ADDR_BUSW(fls(op->addr.buswidth) - 1);
|
||||
OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
|
||||
(op->addr.dtr ? OP_ADDR_DDR : 0);
|
||||
|
||||
if (op->dummy.nbytes)
|
||||
ss_ctrl |= OP_DUMMY_CYC(op->dummy.nbytes);
|
||||
|
||||
if (op->data.nbytes) {
|
||||
ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1);
|
||||
if (op->data.dir == SPI_MEM_DATA_IN)
|
||||
ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
|
||||
(op->data.dtr ? OP_DATA_DDR : 0);
|
||||
if (op->data.dir == SPI_MEM_DATA_IN) {
|
||||
ss_ctrl |= OP_READ;
|
||||
if (op->data.dtr)
|
||||
ss_ctrl |= OP_DQS_EN;
|
||||
}
|
||||
}
|
||||
|
||||
writel(ss_ctrl, mxic->regs + SS_CTRL(mem->spi->chip_select));
|
||||
@ -394,7 +410,10 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
|
||||
writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
|
||||
mxic->regs + HC_CFG);
|
||||
|
||||
ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1);
|
||||
for (i = 0; i < op->cmd.nbytes; i++)
|
||||
cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
|
||||
|
||||
ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -567,7 +586,8 @@ static int mxic_spi_probe(struct platform_device *pdev)
|
||||
master->bits_per_word_mask = SPI_BPW_MASK(8);
|
||||
master->mode_bits = SPI_CPOL | SPI_CPHA |
|
||||
SPI_RX_DUAL | SPI_TX_DUAL |
|
||||
SPI_RX_QUAD | SPI_TX_QUAD;
|
||||
SPI_RX_QUAD | SPI_TX_QUAD |
|
||||
SPI_RX_OCTAL | SPI_TX_OCTAL;
|
||||
|
||||
mxic_spi_hw_init(mxic);
|
||||
|
||||
|
@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
|
||||
struct dma_slave_config cfg;
|
||||
int ret;
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
cfg.device_fc = true;
|
||||
cfg.src_addr = pic32s->dma_base + buf_offset;
|
||||
cfg.dst_addr = pic32s->dma_base + buf_offset;
|
||||
|
@ -594,24 +594,29 @@ static int u32_reader(struct driver_data *drv_data)
|
||||
|
||||
static void reset_sccr1(struct driver_data *drv_data)
|
||||
{
|
||||
struct chip_data *chip =
|
||||
spi_get_ctldata(drv_data->controller->cur_msg->spi);
|
||||
u32 sccr1_reg;
|
||||
u32 mask = drv_data->int_cr1 | drv_data->dma_cr1, threshold;
|
||||
struct chip_data *chip;
|
||||
|
||||
if (drv_data->controller->cur_msg) {
|
||||
chip = spi_get_ctldata(drv_data->controller->cur_msg->spi);
|
||||
threshold = chip->threshold;
|
||||
} else {
|
||||
threshold = 0;
|
||||
}
|
||||
|
||||
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
|
||||
switch (drv_data->ssp_type) {
|
||||
case QUARK_X1000_SSP:
|
||||
sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
|
||||
mask |= QUARK_X1000_SSCR1_RFT;
|
||||
break;
|
||||
case CE4100_SSP:
|
||||
sccr1_reg &= ~CE4100_SSCR1_RFT;
|
||||
mask |= CE4100_SSCR1_RFT;
|
||||
break;
|
||||
default:
|
||||
sccr1_reg &= ~SSCR1_RFT;
|
||||
mask |= SSCR1_RFT;
|
||||
break;
|
||||
}
|
||||
sccr1_reg |= chip->threshold;
|
||||
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
|
||||
|
||||
pxa2xx_spi_update(drv_data, SSCR1, mask, threshold);
|
||||
}
|
||||
|
||||
static void int_stop_and_reset(struct driver_data *drv_data)
|
||||
@ -724,11 +729,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
|
||||
|
||||
static void handle_bad_msg(struct driver_data *drv_data)
|
||||
{
|
||||
int_stop_and_reset(drv_data);
|
||||
pxa2xx_spi_off(drv_data);
|
||||
clear_SSCR1_bits(drv_data, drv_data->int_cr1);
|
||||
if (!pxa25x_ssp_comp(drv_data))
|
||||
pxa2xx_spi_write(drv_data, SSTO, 0);
|
||||
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
||||
|
||||
dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
|
||||
}
|
||||
@ -1156,13 +1158,10 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
|
||||
{
|
||||
struct driver_data *drv_data = spi_controller_get_devdata(controller);
|
||||
|
||||
int_stop_and_reset(drv_data);
|
||||
|
||||
/* Disable the SSP */
|
||||
pxa2xx_spi_off(drv_data);
|
||||
/* Clear and disable interrupts and service requests */
|
||||
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
||||
clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
|
||||
if (!pxa25x_ssp_comp(drv_data))
|
||||
pxa2xx_spi_write(drv_data, SSTO, 0);
|
||||
|
||||
/*
|
||||
* Stop the DMA if running. Note DMA callback handler may have unset
|
||||
|
694
drivers/spi/spi-rockchip-sfc.c
Normal file
694
drivers/spi/spi-rockchip-sfc.c
Normal file
@ -0,0 +1,694 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Rockchip Serial Flash Controller Driver
|
||||
*
|
||||
* Copyright (c) 2017-2021, Rockchip Inc.
|
||||
* Author: Shawn Lin <shawn.lin@rock-chips.com>
|
||||
* Chris Morgan <macroalpha82@gmail.com>
|
||||
* Jon Lin <Jon.lin@rock-chips.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spi/spi-mem.h>
|
||||
|
||||
/* System control */
|
||||
#define SFC_CTRL 0x0
|
||||
#define SFC_CTRL_PHASE_SEL_NEGETIVE BIT(1)
|
||||
#define SFC_CTRL_CMD_BITS_SHIFT 8
|
||||
#define SFC_CTRL_ADDR_BITS_SHIFT 10
|
||||
#define SFC_CTRL_DATA_BITS_SHIFT 12
|
||||
|
||||
/* Interrupt mask */
|
||||
#define SFC_IMR 0x4
|
||||
#define SFC_IMR_RX_FULL BIT(0)
|
||||
#define SFC_IMR_RX_UFLOW BIT(1)
|
||||
#define SFC_IMR_TX_OFLOW BIT(2)
|
||||
#define SFC_IMR_TX_EMPTY BIT(3)
|
||||
#define SFC_IMR_TRAN_FINISH BIT(4)
|
||||
#define SFC_IMR_BUS_ERR BIT(5)
|
||||
#define SFC_IMR_NSPI_ERR BIT(6)
|
||||
#define SFC_IMR_DMA BIT(7)
|
||||
|
||||
/* Interrupt clear */
|
||||
#define SFC_ICLR 0x8
|
||||
#define SFC_ICLR_RX_FULL BIT(0)
|
||||
#define SFC_ICLR_RX_UFLOW BIT(1)
|
||||
#define SFC_ICLR_TX_OFLOW BIT(2)
|
||||
#define SFC_ICLR_TX_EMPTY BIT(3)
|
||||
#define SFC_ICLR_TRAN_FINISH BIT(4)
|
||||
#define SFC_ICLR_BUS_ERR BIT(5)
|
||||
#define SFC_ICLR_NSPI_ERR BIT(6)
|
||||
#define SFC_ICLR_DMA BIT(7)
|
||||
|
||||
/* FIFO threshold level */
|
||||
#define SFC_FTLR 0xc
|
||||
#define SFC_FTLR_TX_SHIFT 0
|
||||
#define SFC_FTLR_TX_MASK 0x1f
|
||||
#define SFC_FTLR_RX_SHIFT 8
|
||||
#define SFC_FTLR_RX_MASK 0x1f
|
||||
|
||||
/* Reset FSM and FIFO */
|
||||
#define SFC_RCVR 0x10
|
||||
#define SFC_RCVR_RESET BIT(0)
|
||||
|
||||
/* Enhanced mode */
|
||||
#define SFC_AX 0x14
|
||||
|
||||
/* Address Bit number */
|
||||
#define SFC_ABIT 0x18
|
||||
|
||||
/* Interrupt status */
|
||||
#define SFC_ISR 0x1c
|
||||
#define SFC_ISR_RX_FULL_SHIFT BIT(0)
|
||||
#define SFC_ISR_RX_UFLOW_SHIFT BIT(1)
|
||||
#define SFC_ISR_TX_OFLOW_SHIFT BIT(2)
|
||||
#define SFC_ISR_TX_EMPTY_SHIFT BIT(3)
|
||||
#define SFC_ISR_TX_FINISH_SHIFT BIT(4)
|
||||
#define SFC_ISR_BUS_ERR_SHIFT BIT(5)
|
||||
#define SFC_ISR_NSPI_ERR_SHIFT BIT(6)
|
||||
#define SFC_ISR_DMA_SHIFT BIT(7)
|
||||
|
||||
/* FIFO status */
|
||||
#define SFC_FSR 0x20
|
||||
#define SFC_FSR_TX_IS_FULL BIT(0)
|
||||
#define SFC_FSR_TX_IS_EMPTY BIT(1)
|
||||
#define SFC_FSR_RX_IS_EMPTY BIT(2)
|
||||
#define SFC_FSR_RX_IS_FULL BIT(3)
|
||||
#define SFC_FSR_TXLV_MASK GENMASK(12, 8)
|
||||
#define SFC_FSR_TXLV_SHIFT 8
|
||||
#define SFC_FSR_RXLV_MASK GENMASK(20, 16)
|
||||
#define SFC_FSR_RXLV_SHIFT 16
|
||||
|
||||
/* FSM status */
|
||||
#define SFC_SR 0x24
|
||||
#define SFC_SR_IS_IDLE 0x0
|
||||
#define SFC_SR_IS_BUSY 0x1
|
||||
|
||||
/* Raw interrupt status */
|
||||
#define SFC_RISR 0x28
|
||||
#define SFC_RISR_RX_FULL BIT(0)
|
||||
#define SFC_RISR_RX_UNDERFLOW BIT(1)
|
||||
#define SFC_RISR_TX_OVERFLOW BIT(2)
|
||||
#define SFC_RISR_TX_EMPTY BIT(3)
|
||||
#define SFC_RISR_TRAN_FINISH BIT(4)
|
||||
#define SFC_RISR_BUS_ERR BIT(5)
|
||||
#define SFC_RISR_NSPI_ERR BIT(6)
|
||||
#define SFC_RISR_DMA BIT(7)
|
||||
|
||||
/* Version */
|
||||
#define SFC_VER 0x2C
|
||||
#define SFC_VER_3 0x3
|
||||
#define SFC_VER_4 0x4
|
||||
#define SFC_VER_5 0x5
|
||||
|
||||
/* Delay line controller resiter */
|
||||
#define SFC_DLL_CTRL0 0x3C
|
||||
#define SFC_DLL_CTRL0_SCLK_SMP_DLL BIT(15)
|
||||
#define SFC_DLL_CTRL0_DLL_MAX_VER4 0xFFU
|
||||
#define SFC_DLL_CTRL0_DLL_MAX_VER5 0x1FFU
|
||||
|
||||
/* Master trigger */
|
||||
#define SFC_DMA_TRIGGER 0x80
|
||||
#define SFC_DMA_TRIGGER_START 1
|
||||
|
||||
/* Src or Dst addr for master */
|
||||
#define SFC_DMA_ADDR 0x84
|
||||
|
||||
/* Length control register extension 32GB */
|
||||
#define SFC_LEN_CTRL 0x88
|
||||
#define SFC_LEN_CTRL_TRB_SEL 1
|
||||
#define SFC_LEN_EXT 0x8C
|
||||
|
||||
/* Command */
|
||||
#define SFC_CMD 0x100
|
||||
#define SFC_CMD_IDX_SHIFT 0
|
||||
#define SFC_CMD_DUMMY_SHIFT 8
|
||||
#define SFC_CMD_DIR_SHIFT 12
|
||||
#define SFC_CMD_DIR_RD 0
|
||||
#define SFC_CMD_DIR_WR 1
|
||||
#define SFC_CMD_ADDR_SHIFT 14
|
||||
#define SFC_CMD_ADDR_0BITS 0
|
||||
#define SFC_CMD_ADDR_24BITS 1
|
||||
#define SFC_CMD_ADDR_32BITS 2
|
||||
#define SFC_CMD_ADDR_XBITS 3
|
||||
#define SFC_CMD_TRAN_BYTES_SHIFT 16
|
||||
#define SFC_CMD_CS_SHIFT 30
|
||||
|
||||
/* Address */
|
||||
#define SFC_ADDR 0x104
|
||||
|
||||
/* Data */
|
||||
#define SFC_DATA 0x108
|
||||
|
||||
/* The controller and documentation reports that it supports up to 4 CS
|
||||
* devices (0-3), however I have only been able to test a single CS (CS 0)
|
||||
* due to the configuration of my device.
|
||||
*/
|
||||
#define SFC_MAX_CHIPSELECT_NUM 4
|
||||
|
||||
/* The SFC can transfer max 16KB - 1 at one time
|
||||
* we set it to 15.5KB here for alignment.
|
||||
*/
|
||||
#define SFC_MAX_IOSIZE_VER3 (512 * 31)
|
||||
|
||||
/* DMA is only enabled for large data transmission */
|
||||
#define SFC_DMA_TRANS_THRETHOLD (0x40)
|
||||
|
||||
/* Maximum clock values from datasheet suggest keeping clock value under
|
||||
* 150MHz. No minimum or average value is suggested.
|
||||
*/
|
||||
#define SFC_MAX_SPEED (150 * 1000 * 1000)
|
||||
|
||||
struct rockchip_sfc {
|
||||
struct device *dev;
|
||||
void __iomem *regbase;
|
||||
struct clk *hclk;
|
||||
struct clk *clk;
|
||||
u32 frequency;
|
||||
/* virtual mapped addr for dma_buffer */
|
||||
void *buffer;
|
||||
dma_addr_t dma_buffer;
|
||||
struct completion cp;
|
||||
bool use_dma;
|
||||
u32 max_iosize;
|
||||
u16 version;
|
||||
};
|
||||
|
||||
static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
|
||||
{
|
||||
int err;
|
||||
u32 status;
|
||||
|
||||
writel_relaxed(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
|
||||
|
||||
err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
|
||||
!(status & SFC_RCVR_RESET), 20,
|
||||
jiffies_to_usecs(HZ));
|
||||
if (err)
|
||||
dev_err(sfc->dev, "SFC reset never finished\n");
|
||||
|
||||
/* Still need to clear the masked interrupt from RISR */
|
||||
writel_relaxed(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
|
||||
|
||||
dev_dbg(sfc->dev, "reset\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
|
||||
{
|
||||
return (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
|
||||
}
|
||||
|
||||
static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
|
||||
{
|
||||
return SFC_MAX_IOSIZE_VER3;
|
||||
}
|
||||
|
||||
static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/* Enable transfer complete interrupt */
|
||||
reg = readl(sfc->regbase + SFC_IMR);
|
||||
reg &= ~mask;
|
||||
writel(reg, sfc->regbase + SFC_IMR);
|
||||
}
|
||||
|
||||
static void rockchip_sfc_irq_mask(struct rockchip_sfc *sfc, u32 mask)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/* Disable transfer finish interrupt */
|
||||
reg = readl(sfc->regbase + SFC_IMR);
|
||||
reg |= mask;
|
||||
writel(reg, sfc->regbase + SFC_IMR);
|
||||
}
|
||||
|
||||
static int rockchip_sfc_init(struct rockchip_sfc *sfc)
|
||||
{
|
||||
writel(0, sfc->regbase + SFC_CTRL);
|
||||
writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
|
||||
rockchip_sfc_irq_mask(sfc, 0xFFFFFFFF);
|
||||
if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
|
||||
writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 status;
|
||||
|
||||
ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
|
||||
status & SFC_FSR_TXLV_MASK, 0,
|
||||
timeout_us);
|
||||
if (ret) {
|
||||
dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 status;
|
||||
|
||||
ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
|
||||
status & SFC_FSR_RXLV_MASK, 0,
|
||||
timeout_us);
|
||||
if (ret) {
|
||||
dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
|
||||
}
|
||||
|
||||
static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
|
||||
{
|
||||
if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
|
||||
/*
|
||||
* SFC not support output DUMMY cycles right after CMD cycles, so
|
||||
* treat it as ADDR cycles.
|
||||
*/
|
||||
op->addr.nbytes = op->dummy.nbytes;
|
||||
op->addr.buswidth = op->dummy.buswidth;
|
||||
op->addr.val = 0xFFFFFFFFF;
|
||||
|
||||
op->dummy.nbytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
|
||||
struct spi_mem *mem,
|
||||
const struct spi_mem_op *op,
|
||||
u32 len)
|
||||
{
|
||||
u32 ctrl = 0, cmd = 0;
|
||||
|
||||
/* set CMD */
|
||||
cmd = op->cmd.opcode;
|
||||
ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
|
||||
|
||||
/* set ADDR */
|
||||
if (op->addr.nbytes) {
|
||||
if (op->addr.nbytes == 4) {
|
||||
cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
|
||||
} else if (op->addr.nbytes == 3) {
|
||||
cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
|
||||
} else {
|
||||
cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
|
||||
writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
|
||||
}
|
||||
|
||||
ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
|
||||
}
|
||||
|
||||
/* set DUMMY */
|
||||
if (op->dummy.nbytes) {
|
||||
if (op->dummy.buswidth == 4)
|
||||
cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
|
||||
else if (op->dummy.buswidth == 2)
|
||||
cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
|
||||
else
|
||||
cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
|
||||
}
|
||||
|
||||
/* set DATA */
|
||||
if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
|
||||
writel(len, sfc->regbase + SFC_LEN_EXT);
|
||||
else
|
||||
cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
|
||||
if (len) {
|
||||
if (op->data.dir == SPI_MEM_DATA_OUT)
|
||||
cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
|
||||
|
||||
ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
|
||||
}
|
||||
if (!len && op->addr.nbytes)
|
||||
cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
|
||||
|
||||
/* set the Controller */
|
||||
ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
|
||||
cmd |= mem->spi->chip_select << SFC_CMD_CS_SHIFT;
|
||||
|
||||
dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
|
||||
op->addr.nbytes, op->addr.buswidth,
|
||||
op->dummy.nbytes, op->dummy.buswidth);
|
||||
dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
|
||||
ctrl, cmd, op->addr.val, len);
|
||||
|
||||
writel(ctrl, sfc->regbase + SFC_CTRL);
|
||||
writel(cmd, sfc->regbase + SFC_CMD);
|
||||
if (op->addr.nbytes)
|
||||
writel(op->addr.val, sfc->regbase + SFC_ADDR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
|
||||
{
|
||||
u8 bytes = len & 0x3;
|
||||
u32 dwords;
|
||||
int tx_level;
|
||||
u32 write_words;
|
||||
u32 tmp = 0;
|
||||
|
||||
dwords = len >> 2;
|
||||
while (dwords) {
|
||||
tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
|
||||
if (tx_level < 0)
|
||||
return tx_level;
|
||||
write_words = min_t(u32, tx_level, dwords);
|
||||
iowrite32_rep(sfc->regbase + SFC_DATA, buf, write_words);
|
||||
buf += write_words << 2;
|
||||
dwords -= write_words;
|
||||
}
|
||||
|
||||
/* write the rest non word aligned bytes */
|
||||
if (bytes) {
|
||||
tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
|
||||
if (tx_level < 0)
|
||||
return tx_level;
|
||||
memcpy(&tmp, buf, bytes);
|
||||
writel(tmp, sfc->regbase + SFC_DATA);
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
|
||||
{
|
||||
u8 bytes = len & 0x3;
|
||||
u32 dwords;
|
||||
u8 read_words;
|
||||
int rx_level;
|
||||
int tmp;
|
||||
|
||||
/* word aligned access only */
|
||||
dwords = len >> 2;
|
||||
while (dwords) {
|
||||
rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
|
||||
if (rx_level < 0)
|
||||
return rx_level;
|
||||
read_words = min_t(u32, rx_level, dwords);
|
||||
ioread32_rep(sfc->regbase + SFC_DATA, buf, read_words);
|
||||
buf += read_words << 2;
|
||||
dwords -= read_words;
|
||||
}
|
||||
|
||||
/* read the rest non word aligned bytes */
|
||||
if (bytes) {
|
||||
rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
|
||||
if (rx_level < 0)
|
||||
return rx_level;
|
||||
tmp = readl(sfc->regbase + SFC_DATA);
|
||||
memcpy(buf, &tmp, bytes);
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
|
||||
{
|
||||
writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
|
||||
writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
|
||||
writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
|
||||
const struct spi_mem_op *op, u32 len)
|
||||
{
|
||||
dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
|
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_OUT)
|
||||
return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
|
||||
else
|
||||
return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
|
||||
}
|
||||
|
||||
static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
|
||||
const struct spi_mem_op *op, u32 len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
|
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_OUT)
|
||||
memcpy(sfc->buffer, op->data.buf.out, len);
|
||||
|
||||
ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
|
||||
if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
|
||||
dev_err(sfc->dev, "DMA wait for transfer finish timeout\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
|
||||
if (op->data.dir == SPI_MEM_DATA_IN)
|
||||
memcpy(op->data.buf.in, sfc->buffer, len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 status;
|
||||
|
||||
ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
|
||||
!(status & SFC_SR_IS_BUSY),
|
||||
20, timeout_us);
|
||||
if (ret) {
|
||||
dev_err(sfc->dev, "wait sfc idle timeout\n");
|
||||
rockchip_sfc_reset(sfc);
|
||||
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
|
||||
{
|
||||
struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
|
||||
u32 len = op->data.nbytes;
|
||||
int ret;
|
||||
|
||||
if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
|
||||
ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
|
||||
if (ret)
|
||||
return ret;
|
||||
sfc->frequency = mem->spi->max_speed_hz;
|
||||
dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
|
||||
sfc->frequency, clk_get_rate(sfc->clk));
|
||||
}
|
||||
|
||||
rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
|
||||
rockchip_sfc_xfer_setup(sfc, mem, op, len);
|
||||
if (len) {
|
||||
if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
|
||||
init_completion(&sfc->cp);
|
||||
rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA);
|
||||
ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
|
||||
} else {
|
||||
ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
|
||||
}
|
||||
|
||||
if (ret != len) {
|
||||
dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return rockchip_sfc_xfer_done(sfc, 100000);
|
||||
}
|
||||
|
||||
static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
|
||||
{
|
||||
struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
|
||||
|
||||
op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
|
||||
.exec_op = rockchip_sfc_exec_mem_op,
|
||||
.adjust_op_size = rockchip_sfc_adjust_op_size,
|
||||
};
|
||||
|
||||
static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct rockchip_sfc *sfc = dev_id;
|
||||
u32 reg;
|
||||
|
||||
reg = readl(sfc->regbase + SFC_RISR);
|
||||
|
||||
/* Clear interrupt */
|
||||
writel_relaxed(reg, sfc->regbase + SFC_ICLR);
|
||||
|
||||
if (reg & SFC_RISR_DMA) {
|
||||
complete(&sfc->cp);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
struct rockchip_sfc *sfc;
|
||||
int ret;
|
||||
|
||||
master = devm_spi_alloc_master(&pdev->dev, sizeof(*sfc));
|
||||
if (!master)
|
||||
return -ENOMEM;
|
||||
|
||||
master->flags = SPI_MASTER_HALF_DUPLEX;
|
||||
master->mem_ops = &rockchip_sfc_mem_ops;
|
||||
master->dev.of_node = pdev->dev.of_node;
|
||||
master->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
|
||||
master->max_speed_hz = SFC_MAX_SPEED;
|
||||
master->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
|
||||
|
||||
sfc = spi_master_get_devdata(master);
|
||||
sfc->dev = dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sfc->regbase = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(sfc->regbase))
|
||||
return PTR_ERR(sfc->regbase);
|
||||
|
||||
sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
|
||||
if (IS_ERR(sfc->clk)) {
|
||||
dev_err(&pdev->dev, "Failed to get sfc interface clk\n");
|
||||
return PTR_ERR(sfc->clk);
|
||||
}
|
||||
|
||||
sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
|
||||
if (IS_ERR(sfc->hclk)) {
|
||||
dev_err(&pdev->dev, "Failed to get sfc ahb clk\n");
|
||||
return PTR_ERR(sfc->hclk);
|
||||
}
|
||||
|
||||
sfc->use_dma = !of_property_read_bool(sfc->dev->of_node,
|
||||
"rockchip,sfc-no-dma");
|
||||
|
||||
if (sfc->use_dma) {
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||
if (ret) {
|
||||
dev_warn(dev, "Unable to set dma mask\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
|
||||
&sfc->dma_buffer,
|
||||
GFP_KERNEL);
|
||||
if (!sfc->buffer)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(sfc->hclk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to enable ahb clk\n");
|
||||
goto err_hclk;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(sfc->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to enable interface clk\n");
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
/* Find the irq */
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to get the irq\n");
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
|
||||
0, pdev->name, sfc);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to request irq\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rockchip_sfc_init(sfc);
|
||||
if (ret)
|
||||
goto err_irq;
|
||||
|
||||
sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
|
||||
sfc->version = rockchip_sfc_get_version(sfc);
|
||||
|
||||
ret = spi_register_master(master);
|
||||
if (ret)
|
||||
goto err_irq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_irq:
|
||||
clk_disable_unprepare(sfc->clk);
|
||||
err_clk:
|
||||
clk_disable_unprepare(sfc->hclk);
|
||||
err_hclk:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rockchip_sfc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct spi_master *master = platform_get_drvdata(pdev);
|
||||
struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
|
||||
|
||||
spi_unregister_master(master);
|
||||
|
||||
clk_disable_unprepare(sfc->clk);
|
||||
clk_disable_unprepare(sfc->hclk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id rockchip_sfc_dt_ids[] = {
|
||||
{ .compatible = "rockchip,sfc"},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, rockchip_sfc_dt_ids);
|
||||
|
||||
static struct platform_driver rockchip_sfc_driver = {
|
||||
.driver = {
|
||||
.name = "rockchip-sfc",
|
||||
.of_match_table = rockchip_sfc_dt_ids,
|
||||
},
|
||||
.probe = rockchip_sfc_probe,
|
||||
.remove = rockchip_sfc_remove,
|
||||
};
|
||||
module_platform_driver(rockchip_sfc_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Rockchip Serial Flash Controller Driver");
|
||||
MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
|
||||
MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
|
||||
MODULE_AUTHOR("Jon Lin <Jon.lin@rock-chips.com>");
|
@ -52,10 +52,20 @@
|
||||
|
||||
/*
|
||||
* ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
|
||||
* The slave devices address offset is always 0x8000 and size is 4K.
|
||||
* ADI supports 12/14bit address for r2p0, and additional 17bit for r3p0 or
|
||||
* later versions. Since bit[1:0] are zero, so the spec describe them as
|
||||
* 10/12/15bit address mode.
|
||||
* The 10bit mode supports sigle slave, 12/15bit mode supports 3 slave, the
|
||||
* high two bits is slave_id.
|
||||
* The slave devices address offset is 0x8000 for 10/12bit address mode,
|
||||
* and 0x20000 for 15bit mode.
|
||||
*/
|
||||
#define ADI_SLAVE_ADDR_SIZE SZ_4K
|
||||
#define ADI_SLAVE_OFFSET 0x8000
|
||||
#define ADI_10BIT_SLAVE_ADDR_SIZE SZ_4K
|
||||
#define ADI_10BIT_SLAVE_OFFSET 0x8000
|
||||
#define ADI_12BIT_SLAVE_ADDR_SIZE SZ_16K
|
||||
#define ADI_12BIT_SLAVE_OFFSET 0x8000
|
||||
#define ADI_15BIT_SLAVE_ADDR_SIZE SZ_128K
|
||||
#define ADI_15BIT_SLAVE_OFFSET 0x20000
|
||||
|
||||
/* Timeout (ms) for the trylock of hardware spinlocks */
|
||||
#define ADI_HWSPINLOCK_TIMEOUT 5000
|
||||
@ -67,24 +77,35 @@
|
||||
|
||||
#define ADI_FIFO_DRAIN_TIMEOUT 1000
|
||||
#define ADI_READ_TIMEOUT 2000
|
||||
#define REG_ADDR_LOW_MASK GENMASK(11, 0)
|
||||
|
||||
/*
|
||||
* Read back address from REG_ADI_RD_DATA bit[30:16] which maps to:
|
||||
* REG_ADI_RD_CMD bit[14:0] for r2p0
|
||||
* REG_ADI_RD_CMD bit[16:2] for r3p0
|
||||
*/
|
||||
#define RDBACK_ADDR_MASK_R2 GENMASK(14, 0)
|
||||
#define RDBACK_ADDR_MASK_R3 GENMASK(16, 2)
|
||||
#define RDBACK_ADDR_SHIFT_R3 2
|
||||
|
||||
/* Registers definitions for PMIC watchdog controller */
|
||||
#define REG_WDG_LOAD_LOW 0x80
|
||||
#define REG_WDG_LOAD_HIGH 0x84
|
||||
#define REG_WDG_CTRL 0x88
|
||||
#define REG_WDG_LOCK 0xa0
|
||||
#define REG_WDG_LOAD_LOW 0x0
|
||||
#define REG_WDG_LOAD_HIGH 0x4
|
||||
#define REG_WDG_CTRL 0x8
|
||||
#define REG_WDG_LOCK 0x20
|
||||
|
||||
/* Bits definitions for register REG_WDG_CTRL */
|
||||
#define BIT_WDG_RUN BIT(1)
|
||||
#define BIT_WDG_NEW BIT(2)
|
||||
#define BIT_WDG_RST BIT(3)
|
||||
|
||||
/* Bits definitions for register REG_MODULE_EN */
|
||||
#define BIT_WDG_EN BIT(2)
|
||||
|
||||
/* Registers definitions for PMIC */
|
||||
#define PMIC_RST_STATUS 0xee8
|
||||
#define PMIC_MODULE_EN 0xc08
|
||||
#define PMIC_CLK_EN 0xc18
|
||||
#define BIT_WDG_EN BIT(2)
|
||||
#define PMIC_WDG_BASE 0x80
|
||||
|
||||
/* Definition of PMIC reset status register */
|
||||
#define HWRST_STATUS_SECURITY 0x02
|
||||
@ -103,10 +124,26 @@
|
||||
#define HWRST_STATUS_WATCHDOG 0xf0
|
||||
|
||||
/* Use default timeout 50 ms that converts to watchdog values */
|
||||
#define WDG_LOAD_VAL ((50 * 1000) / 32768)
|
||||
#define WDG_LOAD_VAL ((50 * 32768) / 1000)
|
||||
#define WDG_LOAD_MASK GENMASK(15, 0)
|
||||
#define WDG_UNLOCK_KEY 0xe551
|
||||
|
||||
struct sprd_adi_wdg {
|
||||
u32 base;
|
||||
u32 rst_sts;
|
||||
u32 wdg_en;
|
||||
u32 wdg_clk;
|
||||
};
|
||||
|
||||
struct sprd_adi_data {
|
||||
u32 slave_offset;
|
||||
u32 slave_addr_size;
|
||||
int (*read_check)(u32 val, u32 reg);
|
||||
int (*restart)(struct notifier_block *this,
|
||||
unsigned long mode, void *cmd);
|
||||
void (*wdg_rst)(void *p);
|
||||
};
|
||||
|
||||
struct sprd_adi {
|
||||
struct spi_controller *ctlr;
|
||||
struct device *dev;
|
||||
@ -115,26 +152,21 @@ struct sprd_adi {
|
||||
unsigned long slave_vbase;
|
||||
unsigned long slave_pbase;
|
||||
struct notifier_block restart_handler;
|
||||
const struct sprd_adi_data *data;
|
||||
};
|
||||
|
||||
static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
|
||||
static int sprd_adi_check_addr(struct sprd_adi *sadi, u32 reg)
|
||||
{
|
||||
if (paddr < sadi->slave_pbase || paddr >
|
||||
(sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
|
||||
if (reg >= sadi->data->slave_addr_size) {
|
||||
dev_err(sadi->dev,
|
||||
"slave physical address is incorrect, addr = 0x%x\n",
|
||||
paddr);
|
||||
"slave address offset is incorrect, reg = 0x%x\n",
|
||||
reg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
|
||||
{
|
||||
return (paddr - sadi->slave_pbase + sadi->slave_vbase);
|
||||
}
|
||||
|
||||
static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
|
||||
{
|
||||
u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
|
||||
@ -161,11 +193,35 @@ static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
|
||||
return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
|
||||
}
|
||||
|
||||
static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
|
||||
static int sprd_adi_read_check(u32 val, u32 addr)
|
||||
{
|
||||
u32 rd_addr;
|
||||
|
||||
rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
|
||||
|
||||
if (rd_addr != addr) {
|
||||
pr_err("ADI read error, addr = 0x%x, val = 0x%x\n", addr, val);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sprd_adi_read_check_r2(u32 val, u32 reg)
|
||||
{
|
||||
return sprd_adi_read_check(val, reg & RDBACK_ADDR_MASK_R2);
|
||||
}
|
||||
|
||||
static int sprd_adi_read_check_r3(u32 val, u32 reg)
|
||||
{
|
||||
return sprd_adi_read_check(val, (reg & RDBACK_ADDR_MASK_R3) >> RDBACK_ADDR_SHIFT_R3);
|
||||
}
|
||||
|
||||
static int sprd_adi_read(struct sprd_adi *sadi, u32 reg, u32 *read_val)
|
||||
{
|
||||
int read_timeout = ADI_READ_TIMEOUT;
|
||||
unsigned long flags;
|
||||
u32 val, rd_addr;
|
||||
u32 val;
|
||||
int ret = 0;
|
||||
|
||||
if (sadi->hwlock) {
|
||||
@ -178,11 +234,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
|
||||
}
|
||||
}
|
||||
|
||||
ret = sprd_adi_check_addr(sadi, reg);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Set the physical register address need to read into RD_CMD register,
|
||||
* Set the slave address offset need to read into RD_CMD register,
|
||||
* then ADI controller will start to transfer automatically.
|
||||
*/
|
||||
writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
|
||||
writel_relaxed(reg, sadi->base + REG_ADI_RD_CMD);
|
||||
|
||||
/*
|
||||
* Wait read operation complete, the BIT_RD_CMD_BUSY will be set
|
||||
@ -205,18 +265,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
|
||||
}
|
||||
|
||||
/*
|
||||
* The return value includes data and read register address, from bit 0
|
||||
* to bit 15 are data, and from bit 16 to bit 30 are read register
|
||||
* address. Then we can check the returned register address to validate
|
||||
* data.
|
||||
* The return value before adi r5p0 includes data and read register
|
||||
* address, from bit 0to bit 15 are data, and from bit 16 to bit 30
|
||||
* are read register address. Then we can check the returned register
|
||||
* address to validate data.
|
||||
*/
|
||||
rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
|
||||
|
||||
if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
|
||||
dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
|
||||
reg_paddr, val);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
if (sadi->data->read_check) {
|
||||
ret = sadi->data->read_check(val, reg);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
*read_val = val & RD_VALUE_MASK;
|
||||
@ -227,9 +284,8 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
|
||||
static int sprd_adi_write(struct sprd_adi *sadi, u32 reg, u32 val)
|
||||
{
|
||||
unsigned long reg = sprd_adi_to_vaddr(sadi, reg_paddr);
|
||||
u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
@ -244,6 +300,10 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
|
||||
}
|
||||
}
|
||||
|
||||
ret = sprd_adi_check_addr(sadi, reg);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = sprd_adi_drain_fifo(sadi);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@ -254,7 +314,8 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
|
||||
*/
|
||||
do {
|
||||
if (!sprd_adi_fifo_is_full(sadi)) {
|
||||
writel_relaxed(val, (void __iomem *)reg);
|
||||
/* we need virtual register address to write. */
|
||||
writel_relaxed(val, (void __iomem *)(sadi->slave_vbase + reg));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -277,60 +338,41 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
|
||||
struct spi_transfer *t)
|
||||
{
|
||||
struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
|
||||
u32 phy_reg, val;
|
||||
u32 reg, val;
|
||||
int ret;
|
||||
|
||||
if (t->rx_buf) {
|
||||
phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
|
||||
|
||||
ret = sprd_adi_check_paddr(sadi, phy_reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sprd_adi_read(sadi, phy_reg, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg = *(u32 *)t->rx_buf;
|
||||
ret = sprd_adi_read(sadi, reg, &val);
|
||||
*(u32 *)t->rx_buf = val;
|
||||
} else if (t->tx_buf) {
|
||||
u32 *p = (u32 *)t->tx_buf;
|
||||
|
||||
/*
|
||||
* Get the physical register address need to write and convert
|
||||
* the physical address to virtual address. Since we need
|
||||
* virtual register address to write.
|
||||
*/
|
||||
phy_reg = *p++ + sadi->slave_pbase;
|
||||
ret = sprd_adi_check_paddr(sadi, phy_reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg = *p++;
|
||||
val = *p;
|
||||
ret = sprd_adi_write(sadi, phy_reg, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = sprd_adi_write(sadi, reg, val);
|
||||
} else {
|
||||
dev_err(sadi->dev, "no buffer for transfer\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
|
||||
static void sprd_adi_set_wdt_rst_mode(void *p)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_SPRD_WATCHDOG)
|
||||
u32 val;
|
||||
struct sprd_adi *sadi = (struct sprd_adi *)p;
|
||||
|
||||
/* Set default watchdog reboot mode */
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
|
||||
/* Init watchdog reset mode */
|
||||
sprd_adi_read(sadi, PMIC_RST_STATUS, &val);
|
||||
val |= HWRST_STATUS_WATCHDOG;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
|
||||
sprd_adi_write(sadi, PMIC_RST_STATUS, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int sprd_adi_restart_handler(struct notifier_block *this,
|
||||
unsigned long mode, void *cmd)
|
||||
static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
|
||||
void *cmd, struct sprd_adi_wdg *wdg)
|
||||
{
|
||||
struct sprd_adi *sadi = container_of(this, struct sprd_adi,
|
||||
restart_handler);
|
||||
@ -366,40 +408,40 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
|
||||
reboot_mode = HWRST_STATUS_NORMAL;
|
||||
|
||||
/* Record the reboot mode */
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
|
||||
sprd_adi_read(sadi, wdg->rst_sts, &val);
|
||||
val &= ~HWRST_STATUS_WATCHDOG;
|
||||
val |= reboot_mode;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
|
||||
sprd_adi_write(sadi, wdg->rst_sts, val);
|
||||
|
||||
/* Enable the interface clock of the watchdog */
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + PMIC_MODULE_EN, &val);
|
||||
sprd_adi_read(sadi, wdg->wdg_en, &val);
|
||||
val |= BIT_WDG_EN;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + PMIC_MODULE_EN, val);
|
||||
sprd_adi_write(sadi, wdg->wdg_en, val);
|
||||
|
||||
/* Enable the work clock of the watchdog */
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + PMIC_CLK_EN, &val);
|
||||
sprd_adi_read(sadi, wdg->wdg_clk, &val);
|
||||
val |= BIT_WDG_EN;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + PMIC_CLK_EN, val);
|
||||
sprd_adi_write(sadi, wdg->wdg_clk, val);
|
||||
|
||||
/* Unlock the watchdog */
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY);
|
||||
sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, WDG_UNLOCK_KEY);
|
||||
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
|
||||
sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
|
||||
val |= BIT_WDG_NEW;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
|
||||
sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
|
||||
|
||||
/* Load the watchdog timeout value, 50ms is always enough. */
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
|
||||
sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_HIGH, 0);
|
||||
sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_LOW,
|
||||
WDG_LOAD_VAL & WDG_LOAD_MASK);
|
||||
|
||||
/* Start the watchdog to reset system */
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
|
||||
sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
|
||||
val |= BIT_WDG_RUN | BIT_WDG_RST;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
|
||||
sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
|
||||
|
||||
/* Lock the watchdog */
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
|
||||
sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
@ -407,6 +449,19 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int sprd_adi_restart_sc9860(struct notifier_block *this,
|
||||
unsigned long mode, void *cmd)
|
||||
{
|
||||
struct sprd_adi_wdg wdg = {
|
||||
.base = PMIC_WDG_BASE,
|
||||
.rst_sts = PMIC_RST_STATUS,
|
||||
.wdg_en = PMIC_MODULE_EN,
|
||||
.wdg_clk = PMIC_CLK_EN,
|
||||
};
|
||||
|
||||
return sprd_adi_restart(this, mode, cmd, &wdg);
|
||||
}
|
||||
|
||||
static void sprd_adi_hw_init(struct sprd_adi *sadi)
|
||||
{
|
||||
struct device_node *np = sadi->dev->of_node;
|
||||
@ -458,10 +513,11 @@ static void sprd_adi_hw_init(struct sprd_adi *sadi)
|
||||
static int sprd_adi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
const struct sprd_adi_data *data;
|
||||
struct spi_controller *ctlr;
|
||||
struct sprd_adi *sadi;
|
||||
struct resource *res;
|
||||
u32 num_chipselect;
|
||||
u16 num_chipselect;
|
||||
int ret;
|
||||
|
||||
if (!np) {
|
||||
@ -469,6 +525,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
data = of_device_get_match_data(&pdev->dev);
|
||||
if (!data) {
|
||||
dev_err(&pdev->dev, "no matching driver data found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pdev->id = of_alias_get_id(np, "spi");
|
||||
num_chipselect = of_get_child_count(np);
|
||||
|
||||
@ -486,10 +548,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
|
||||
goto put_ctlr;
|
||||
}
|
||||
|
||||
sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
|
||||
sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
|
||||
sadi->slave_vbase = (unsigned long)sadi->base +
|
||||
data->slave_offset;
|
||||
sadi->slave_pbase = res->start + data->slave_offset;
|
||||
sadi->ctlr = ctlr;
|
||||
sadi->dev = &pdev->dev;
|
||||
sadi->data = data;
|
||||
ret = of_hwspin_lock_get_id(np, 0);
|
||||
if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
|
||||
sadi->hwlock =
|
||||
@ -510,7 +574,9 @@ static int sprd_adi_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
sprd_adi_hw_init(sadi);
|
||||
sprd_adi_set_wdt_rst_mode(sadi);
|
||||
|
||||
if (sadi->data->wdg_rst)
|
||||
sadi->data->wdg_rst(sadi);
|
||||
|
||||
ctlr->dev.of_node = pdev->dev.of_node;
|
||||
ctlr->bus_num = pdev->id;
|
||||
@ -525,12 +591,14 @@ static int sprd_adi_probe(struct platform_device *pdev)
|
||||
goto put_ctlr;
|
||||
}
|
||||
|
||||
sadi->restart_handler.notifier_call = sprd_adi_restart_handler;
|
||||
sadi->restart_handler.priority = 128;
|
||||
ret = register_restart_handler(&sadi->restart_handler);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "can not register restart handler\n");
|
||||
goto put_ctlr;
|
||||
if (sadi->data->restart) {
|
||||
sadi->restart_handler.notifier_call = sadi->data->restart;
|
||||
sadi->restart_handler.priority = 128;
|
||||
ret = register_restart_handler(&sadi->restart_handler);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "can not register restart handler\n");
|
||||
goto put_ctlr;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -549,9 +617,38 @@ static int sprd_adi_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sprd_adi_data sc9860_data = {
|
||||
.slave_offset = ADI_10BIT_SLAVE_OFFSET,
|
||||
.slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
|
||||
.read_check = sprd_adi_read_check_r2,
|
||||
.restart = sprd_adi_restart_sc9860,
|
||||
.wdg_rst = sprd_adi_set_wdt_rst_mode,
|
||||
};
|
||||
|
||||
static struct sprd_adi_data sc9863_data = {
|
||||
.slave_offset = ADI_12BIT_SLAVE_OFFSET,
|
||||
.slave_addr_size = ADI_12BIT_SLAVE_ADDR_SIZE,
|
||||
.read_check = sprd_adi_read_check_r3,
|
||||
};
|
||||
|
||||
static struct sprd_adi_data ums512_data = {
|
||||
.slave_offset = ADI_15BIT_SLAVE_OFFSET,
|
||||
.slave_addr_size = ADI_15BIT_SLAVE_ADDR_SIZE,
|
||||
.read_check = sprd_adi_read_check_r3,
|
||||
};
|
||||
|
||||
static const struct of_device_id sprd_adi_of_match[] = {
|
||||
{
|
||||
.compatible = "sprd,sc9860-adi",
|
||||
.data = &sc9860_data,
|
||||
},
|
||||
{
|
||||
.compatible = "sprd,sc9863-adi",
|
||||
.data = &sc9863_data,
|
||||
},
|
||||
{
|
||||
.compatible = "sprd,ums512-adi",
|
||||
.data = &ums512_data,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
@ -162,6 +162,8 @@
|
||||
#define SPI_3WIRE_TX 3
|
||||
#define SPI_3WIRE_RX 4
|
||||
|
||||
#define STM32_SPI_AUTOSUSPEND_DELAY 1 /* 1 ms */
|
||||
|
||||
/*
|
||||
* use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
|
||||
* without fifo buffers.
|
||||
@ -568,29 +570,30 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
|
||||
/**
|
||||
* stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
|
||||
* @spi: pointer to the spi controller data structure
|
||||
* @flush: boolean indicating that FIFO should be flushed
|
||||
*
|
||||
* Write in rx_buf depends on remaining bytes to avoid to write beyond
|
||||
* rx_buf end.
|
||||
*/
|
||||
static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
|
||||
static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi)
|
||||
{
|
||||
u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
|
||||
u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
|
||||
|
||||
while ((spi->rx_len > 0) &&
|
||||
((sr & STM32H7_SPI_SR_RXP) ||
|
||||
(flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
|
||||
((sr & STM32H7_SPI_SR_EOT) &&
|
||||
((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
|
||||
u32 offs = spi->cur_xferlen - spi->rx_len;
|
||||
|
||||
if ((spi->rx_len >= sizeof(u32)) ||
|
||||
(flush && (sr & STM32H7_SPI_SR_RXWNE))) {
|
||||
(sr & STM32H7_SPI_SR_RXWNE)) {
|
||||
u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
|
||||
|
||||
*rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
|
||||
spi->rx_len -= sizeof(u32);
|
||||
} else if ((spi->rx_len >= sizeof(u16)) ||
|
||||
(flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
|
||||
(!(sr & STM32H7_SPI_SR_RXWNE) &&
|
||||
(rxplvl >= 2 || spi->cur_bpw > 8))) {
|
||||
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
|
||||
|
||||
*rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
|
||||
@ -606,8 +609,8 @@ static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
|
||||
rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
|
||||
}
|
||||
|
||||
dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
|
||||
flush ? "(flush)" : "", spi->rx_len);
|
||||
dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
|
||||
__func__, spi->rx_len, sr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -674,18 +677,12 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
|
||||
* stm32h7_spi_disable - Disable SPI controller
|
||||
* @spi: pointer to the spi controller data structure
|
||||
*
|
||||
* RX-Fifo is flushed when SPI controller is disabled. To prevent any data
|
||||
* loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
|
||||
* RX-Fifo.
|
||||
* Normally, if TSIZE has been configured, we should relax the hardware at the
|
||||
* reception of the EOT interrupt. But in case of error, EOT will not be
|
||||
* raised. So the subsystem unprepare_message call allows us to properly
|
||||
* complete the transfer from an hardware point of view.
|
||||
* RX-Fifo is flushed when SPI controller is disabled.
|
||||
*/
|
||||
static void stm32h7_spi_disable(struct stm32_spi *spi)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cr1, sr;
|
||||
u32 cr1;
|
||||
|
||||
dev_dbg(spi->dev, "disable controller\n");
|
||||
|
||||
@ -698,25 +695,6 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Wait on EOT or suspend the flow */
|
||||
if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
|
||||
sr, !(sr & STM32H7_SPI_SR_EOT),
|
||||
10, 100000) < 0) {
|
||||
if (cr1 & STM32H7_SPI_CR1_CSTART) {
|
||||
writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
|
||||
spi->base + STM32H7_SPI_CR1);
|
||||
if (readl_relaxed_poll_timeout_atomic(
|
||||
spi->base + STM32H7_SPI_SR,
|
||||
sr, !(sr & STM32H7_SPI_SR_SUSP),
|
||||
10, 100000) < 0)
|
||||
dev_warn(spi->dev,
|
||||
"Suspend request timeout\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
|
||||
stm32h7_spi_read_rxfifo(spi, true);
|
||||
|
||||
if (spi->cur_usedma && spi->dma_tx)
|
||||
dmaengine_terminate_all(spi->dma_tx);
|
||||
if (spi->cur_usedma && spi->dma_rx)
|
||||
@ -911,7 +889,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
|
||||
if (__ratelimit(&rs))
|
||||
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
|
||||
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
|
||||
stm32h7_spi_read_rxfifo(spi, false);
|
||||
stm32h7_spi_read_rxfifo(spi);
|
||||
/*
|
||||
* If communication is suspended while using DMA, it means
|
||||
* that something went wrong, so stop the current transfer
|
||||
@ -932,8 +910,10 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
|
||||
|
||||
if (sr & STM32H7_SPI_SR_EOT) {
|
||||
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
|
||||
stm32h7_spi_read_rxfifo(spi, true);
|
||||
end = true;
|
||||
stm32h7_spi_read_rxfifo(spi);
|
||||
if (!spi->cur_usedma ||
|
||||
(spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX))
|
||||
end = true;
|
||||
}
|
||||
|
||||
if (sr & STM32H7_SPI_SR_TXP)
|
||||
@ -942,7 +922,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
|
||||
|
||||
if (sr & STM32H7_SPI_SR_RXP)
|
||||
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
|
||||
stm32h7_spi_read_rxfifo(spi, false);
|
||||
stm32h7_spi_read_rxfifo(spi);
|
||||
|
||||
writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
|
||||
|
||||
@ -1041,42 +1021,17 @@ static void stm32f4_spi_dma_tx_cb(void *data)
|
||||
}
|
||||
|
||||
/**
|
||||
* stm32f4_spi_dma_rx_cb - dma callback
|
||||
* stm32_spi_dma_rx_cb - dma callback
|
||||
* @data: pointer to the spi controller data structure
|
||||
*
|
||||
* DMA callback is called when the transfer is complete for DMA RX channel.
|
||||
*/
|
||||
static void stm32f4_spi_dma_rx_cb(void *data)
|
||||
static void stm32_spi_dma_rx_cb(void *data)
|
||||
{
|
||||
struct stm32_spi *spi = data;
|
||||
|
||||
spi_finalize_current_transfer(spi->master);
|
||||
stm32f4_spi_disable(spi);
|
||||
}
|
||||
|
||||
/**
|
||||
* stm32h7_spi_dma_cb - dma callback
|
||||
* @data: pointer to the spi controller data structure
|
||||
*
|
||||
* DMA callback is called when the transfer is complete or when an error
|
||||
* occurs. If the transfer is complete, EOT flag is raised.
|
||||
*/
|
||||
static void stm32h7_spi_dma_cb(void *data)
|
||||
{
|
||||
struct stm32_spi *spi = data;
|
||||
unsigned long flags;
|
||||
u32 sr;
|
||||
|
||||
spin_lock_irqsave(&spi->lock, flags);
|
||||
|
||||
sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
|
||||
|
||||
spin_unlock_irqrestore(&spi->lock, flags);
|
||||
|
||||
if (!(sr & STM32H7_SPI_SR_EOT))
|
||||
dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
|
||||
|
||||
/* Now wait for EOT, or SUSP or OVR in case of error */
|
||||
spi->cfg->disable(spi);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1242,11 +1197,13 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
|
||||
*/
|
||||
static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
|
||||
{
|
||||
/* Enable the interrupts relative to the end of transfer */
|
||||
stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
|
||||
STM32H7_SPI_IER_TXTFIE |
|
||||
STM32H7_SPI_IER_OVRIE |
|
||||
STM32H7_SPI_IER_MODFIE);
|
||||
uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
|
||||
|
||||
/* Enable the interrupts */
|
||||
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)
|
||||
ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE;
|
||||
|
||||
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
|
||||
|
||||
stm32_spi_enable(spi);
|
||||
|
||||
@ -1645,10 +1602,6 @@ static int stm32_spi_transfer_one(struct spi_master *master,
|
||||
struct stm32_spi *spi = spi_master_get_devdata(master);
|
||||
int ret;
|
||||
|
||||
/* Don't do anything on 0 bytes transfers */
|
||||
if (transfer->len == 0)
|
||||
return 0;
|
||||
|
||||
spi->tx_buf = transfer->tx_buf;
|
||||
spi->rx_buf = transfer->rx_buf;
|
||||
spi->tx_len = spi->tx_buf ? transfer->len : 0;
|
||||
@ -1762,7 +1715,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
|
||||
.set_mode = stm32f4_spi_set_mode,
|
||||
.transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
|
||||
.dma_tx_cb = stm32f4_spi_dma_tx_cb,
|
||||
.dma_rx_cb = stm32f4_spi_dma_rx_cb,
|
||||
.dma_rx_cb = stm32_spi_dma_rx_cb,
|
||||
.transfer_one_irq = stm32f4_spi_transfer_one_irq,
|
||||
.irq_handler_event = stm32f4_spi_irq_event,
|
||||
.irq_handler_thread = stm32f4_spi_irq_thread,
|
||||
@ -1782,8 +1735,11 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
|
||||
.set_data_idleness = stm32h7_spi_data_idleness,
|
||||
.set_number_of_data = stm32h7_spi_number_of_data,
|
||||
.transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
|
||||
.dma_rx_cb = stm32h7_spi_dma_cb,
|
||||
.dma_tx_cb = stm32h7_spi_dma_cb,
|
||||
.dma_rx_cb = stm32_spi_dma_rx_cb,
|
||||
/*
|
||||
* dma_tx_cb is not necessary since in case of TX, dma is followed by
|
||||
* SPI access hence handling is performed within the SPI interrupt
|
||||
*/
|
||||
.transfer_one_irq = stm32h7_spi_transfer_one_irq,
|
||||
.irq_handler_thread = stm32h7_spi_irq_thread,
|
||||
.baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
|
||||
@ -1927,6 +1883,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
||||
if (spi->dma_tx || spi->dma_rx)
|
||||
master->can_dma = stm32_spi_can_dma;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev,
|
||||
STM32_SPI_AUTOSUSPEND_DELAY);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
@ -1938,6 +1897,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(&pdev->dev);
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
|
||||
dev_info(&pdev->dev, "driver initialized\n");
|
||||
|
||||
return 0;
|
||||
@ -1946,6 +1908,7 @@ err_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||
err_dma_release:
|
||||
if (spi->dma_tx)
|
||||
dma_release_channel(spi->dma_tx);
|
||||
@ -1970,6 +1933,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||
|
||||
if (master->dma_tx)
|
||||
dma_release_channel(master->dma_tx);
|
||||
if (master->dma_rx)
|
||||
|
@ -717,12 +717,12 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
|
||||
dma_release_channel(dma_chan);
|
||||
}
|
||||
|
||||
static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
|
||||
struct spi_delay *setup,
|
||||
struct spi_delay *hold,
|
||||
struct spi_delay *inactive)
|
||||
static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
|
||||
{
|
||||
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
|
||||
struct spi_delay *setup = &spi->cs_setup;
|
||||
struct spi_delay *hold = &spi->cs_hold;
|
||||
struct spi_delay *inactive = &spi->cs_inactive;
|
||||
u8 setup_dly, hold_dly, inactive_dly;
|
||||
u32 setup_hold;
|
||||
u32 spi_cs_timing;
|
||||
|
@ -1061,33 +1061,12 @@ static int tegra_slink_probe(struct platform_device *pdev)
|
||||
dev_err(&pdev->dev, "Can not get clock %d\n", ret);
|
||||
goto exit_free_master;
|
||||
}
|
||||
ret = clk_prepare(tspi->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
|
||||
goto exit_free_master;
|
||||
}
|
||||
ret = clk_enable(tspi->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
|
||||
goto exit_clk_unprepare;
|
||||
}
|
||||
|
||||
spi_irq = platform_get_irq(pdev, 0);
|
||||
tspi->irq = spi_irq;
|
||||
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
|
||||
tegra_slink_isr_thread, IRQF_ONESHOT,
|
||||
dev_name(&pdev->dev), tspi);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
|
||||
tspi->irq);
|
||||
goto exit_clk_disable;
|
||||
}
|
||||
|
||||
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
|
||||
if (IS_ERR(tspi->rst)) {
|
||||
dev_err(&pdev->dev, "can not get reset\n");
|
||||
ret = PTR_ERR(tspi->rst);
|
||||
goto exit_free_irq;
|
||||
goto exit_free_master;
|
||||
}
|
||||
|
||||
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
|
||||
@ -1095,7 +1074,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
|
||||
|
||||
ret = tegra_slink_init_dma_param(tspi, true);
|
||||
if (ret < 0)
|
||||
goto exit_free_irq;
|
||||
goto exit_free_master;
|
||||
ret = tegra_slink_init_dma_param(tspi, false);
|
||||
if (ret < 0)
|
||||
goto exit_rx_dma_free;
|
||||
@ -1106,16 +1085,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
|
||||
init_completion(&tspi->xfer_completion);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
if (!pm_runtime_enabled(&pdev->dev)) {
|
||||
ret = tegra_slink_runtime_resume(&pdev->dev);
|
||||
if (ret)
|
||||
goto exit_pm_disable;
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
if (ret < 0) {
|
||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
goto exit_pm_disable;
|
||||
}
|
||||
|
||||
@ -1123,33 +1095,43 @@ static int tegra_slink_probe(struct platform_device *pdev)
|
||||
udelay(2);
|
||||
reset_control_deassert(tspi->rst);
|
||||
|
||||
spi_irq = platform_get_irq(pdev, 0);
|
||||
tspi->irq = spi_irq;
|
||||
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
|
||||
tegra_slink_isr_thread, IRQF_ONESHOT,
|
||||
dev_name(&pdev->dev), tspi);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
|
||||
tspi->irq);
|
||||
goto exit_pm_put;
|
||||
}
|
||||
|
||||
tspi->def_command_reg = SLINK_M_S;
|
||||
tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
|
||||
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
|
||||
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
master->dev.of_node = pdev->dev.of_node;
|
||||
ret = devm_spi_register_master(&pdev->dev, master);
|
||||
ret = spi_register_master(master);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
|
||||
goto exit_pm_disable;
|
||||
goto exit_free_irq;
|
||||
}
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
return ret;
|
||||
|
||||
exit_free_irq:
|
||||
free_irq(spi_irq, tspi);
|
||||
exit_pm_put:
|
||||
pm_runtime_put(&pdev->dev);
|
||||
exit_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
if (!pm_runtime_status_suspended(&pdev->dev))
|
||||
tegra_slink_runtime_suspend(&pdev->dev);
|
||||
|
||||
tegra_slink_deinit_dma_param(tspi, false);
|
||||
exit_rx_dma_free:
|
||||
tegra_slink_deinit_dma_param(tspi, true);
|
||||
exit_free_irq:
|
||||
free_irq(spi_irq, tspi);
|
||||
exit_clk_disable:
|
||||
clk_disable(tspi->clk);
|
||||
exit_clk_unprepare:
|
||||
clk_unprepare(tspi->clk);
|
||||
exit_free_master:
|
||||
spi_master_put(master);
|
||||
return ret;
|
||||
@ -1160,10 +1142,11 @@ static int tegra_slink_remove(struct platform_device *pdev)
|
||||
struct spi_master *master = platform_get_drvdata(pdev);
|
||||
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
|
||||
|
||||
spi_unregister_master(master);
|
||||
|
||||
free_irq(tspi->irq, tspi);
|
||||
|
||||
clk_disable(tspi->clk);
|
||||
clk_unprepare(tspi->clk);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
if (tspi->tx_dma_chan)
|
||||
tegra_slink_deinit_dma_param(tspi, false);
|
||||
@ -1171,10 +1154,6 @@ static int tegra_slink_remove(struct platform_device *pdev)
|
||||
if (tspi->rx_dma_chan)
|
||||
tegra_slink_deinit_dma_param(tspi, true);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
if (!pm_runtime_status_suspended(&pdev->dev))
|
||||
tegra_slink_runtime_suspend(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -545,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
|
||||
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
|
||||
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
|
||||
ZYNQ_QSPI_IXR_RXTX_MASK);
|
||||
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
|
||||
if (!wait_for_completion_timeout(&xqspi->data_completion,
|
||||
msecs_to_jiffies(1000)))
|
||||
err = -ETIMEDOUT;
|
||||
}
|
||||
@ -563,7 +563,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
|
||||
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
|
||||
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
|
||||
ZYNQ_QSPI_IXR_RXTX_MASK);
|
||||
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
|
||||
if (!wait_for_completion_timeout(&xqspi->data_completion,
|
||||
msecs_to_jiffies(1000)))
|
||||
err = -ETIMEDOUT;
|
||||
}
|
||||
@ -579,7 +579,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
|
||||
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
|
||||
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
|
||||
ZYNQ_QSPI_IXR_RXTX_MASK);
|
||||
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
|
||||
if (!wait_for_completion_timeout(&xqspi->data_completion,
|
||||
msecs_to_jiffies(1000)))
|
||||
err = -ETIMEDOUT;
|
||||
|
||||
@ -603,7 +603,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
|
||||
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
|
||||
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
|
||||
ZYNQ_QSPI_IXR_RXTX_MASK);
|
||||
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
|
||||
if (!wait_for_completion_timeout(&xqspi->data_completion,
|
||||
msecs_to_jiffies(1000)))
|
||||
err = -ETIMEDOUT;
|
||||
}
|
||||
|
@ -846,9 +846,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
|
||||
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
|
||||
!spi->controller->set_cs_timing) {
|
||||
if (activate)
|
||||
spi_delay_exec(&spi->controller->cs_setup, NULL);
|
||||
spi_delay_exec(&spi->cs_setup, NULL);
|
||||
else
|
||||
spi_delay_exec(&spi->controller->cs_hold, NULL);
|
||||
spi_delay_exec(&spi->cs_hold, NULL);
|
||||
}
|
||||
|
||||
if (spi->mode & SPI_CS_HIGH)
|
||||
@ -891,7 +891,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
|
||||
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
|
||||
!spi->controller->set_cs_timing) {
|
||||
if (!activate)
|
||||
spi_delay_exec(&spi->controller->cs_inactive, NULL);
|
||||
spi_delay_exec(&spi->cs_inactive, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,5 +12,6 @@
|
||||
/* Board specific platform_data */
|
||||
struct mtk_chip_config {
|
||||
u32 sample_sel;
|
||||
u32 tick_delay;
|
||||
};
|
||||
#endif
|
||||
|
@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
|
||||
* not using a GPIO line)
|
||||
* @word_delay: delay to be inserted between consecutive
|
||||
* words of a transfer
|
||||
*
|
||||
* @cs_setup: delay to be introduced by the controller after CS is asserted
|
||||
* @cs_hold: delay to be introduced by the controller before CS is deasserted
|
||||
* @cs_inactive: delay to be introduced by the controller after CS is
|
||||
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
|
||||
* two delays will be added up.
|
||||
* @statistics: statistics for the spi_device
|
||||
*
|
||||
* A @spi_device is used to interchange data between an SPI slave
|
||||
@ -188,6 +192,10 @@ struct spi_device {
|
||||
int cs_gpio; /* LEGACY: chip select gpio */
|
||||
struct gpio_desc *cs_gpiod; /* chip select gpio desc */
|
||||
struct spi_delay word_delay; /* inter-word delay */
|
||||
/* CS delays */
|
||||
struct spi_delay cs_setup;
|
||||
struct spi_delay cs_hold;
|
||||
struct spi_delay cs_inactive;
|
||||
|
||||
/* the statistics */
|
||||
struct spi_statistics statistics;
|
||||
@ -339,6 +347,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
|
||||
* @max_speed_hz: Highest supported transfer speed
|
||||
* @flags: other constraints relevant to this driver
|
||||
* @slave: indicates that this is an SPI slave controller
|
||||
* @devm_allocated: whether the allocation of this struct is devres-managed
|
||||
* @max_transfer_size: function that returns the max transfer size for
|
||||
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
|
||||
* @max_message_size: function that returns the max message size for
|
||||
@ -412,11 +421,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
|
||||
* controller has native support for memory like operations.
|
||||
* @unprepare_message: undo any work done by prepare_message().
|
||||
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
|
||||
* @cs_setup: delay to be introduced by the controller after CS is asserted
|
||||
* @cs_hold: delay to be introduced by the controller before CS is deasserted
|
||||
* @cs_inactive: delay to be introduced by the controller after CS is
|
||||
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
|
||||
* two delays will be added up.
|
||||
* @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
|
||||
* CS number. Any individual value may be -ENOENT for CS lines that
|
||||
* are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
|
||||
@ -511,7 +515,7 @@ struct spi_controller {
|
||||
|
||||
#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
|
||||
|
||||
/* flag indicating this is a non-devres managed controller */
|
||||
/* flag indicating if the allocation of this struct is devres-managed */
|
||||
bool devm_allocated;
|
||||
|
||||
/* flag indicating this is an SPI slave controller */
|
||||
@ -550,8 +554,7 @@ struct spi_controller {
|
||||
* to configure specific CS timing through spi_set_cs_timing() after
|
||||
* spi_setup().
|
||||
*/
|
||||
int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
|
||||
struct spi_delay *hold, struct spi_delay *inactive);
|
||||
int (*set_cs_timing)(struct spi_device *spi);
|
||||
|
||||
/* bidirectional bulk transfers
|
||||
*
|
||||
@ -638,11 +641,6 @@ struct spi_controller {
|
||||
/* Optimized handlers for SPI memory-like operations. */
|
||||
const struct spi_controller_mem_ops *mem_ops;
|
||||
|
||||
/* CS delays */
|
||||
struct spi_delay cs_setup;
|
||||
struct spi_delay cs_hold;
|
||||
struct spi_delay cs_inactive;
|
||||
|
||||
/* gpio chip select */
|
||||
int *cs_gpios;
|
||||
struct gpio_desc **cs_gpiods;
|
||||
|
Loading…
Reference in New Issue
Block a user