- A series from Hervé Codina that bring support for the newer version

of QMC (QUICC Multi-channel Controller) and TSA (Time Slots Assigner)
 found on MPC 83xx micro-controllers.
 
 - Misc changes for qbman freescale drivers for removing a redundant
 warning and using iommu_paging_domain_alloc()
 -----BEGIN PGP SIGNATURE-----
 
 iJIEABYKADoWIQQQ/+b4s5DeF6zCYyNoqS/rAbjdeAUCZtapTxwcY2hyaXN0b3Bo
 ZS5sZXJveUBjc2dyb3VwLmV1AAoJEGipL+sBuN14I48A/03qjl8XZiNs8OEqjEt3
 ubo2NcFBXO20FadLfJfhgPqWAQC3xKuhLGXf0rn8yHtTol8fmYKbt8H58/D6nAPF
 T1qiBw==
 =rYkp
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEiK/NIGsWEZVxh/FrYKtH/8kJUicFAmbWtFAACgkQYKtH/8kJ
 UiffHw/5AR9xaPWmwSbBa7CSFwRJL1OwNrSjhxAq4+J63FxUaQhAmW9ED8pdPJSw
 BP3n3ma3WvDWDzzN5LN0P8PmjWceDQ5K3rUPjqz2TmT7V6CnzlVzfb9fInhcCCAv
 84ThxgZ9LQVC1vpV/4Po9iV2dnYG+4T8CSUSY6ASl5UcWkTfuGRqHTJTdIe3E6+q
 lG+LhVX2Hl7O4QOmkkjBIpAy0FcxPNoRznmiekFhWEKFX6AOr+utGCjsCWPksEf1
 ScPQwSVKnZ7o9bQhacn9T0E+CA6pVH9VWuS3OUdiBmGYGM9imrj+l+ym8JVHzFdr
 OLfY1ZcqEWrARfvK9G7QvWoEeOyyL9xH9vKHh/YxQUdrv39bi8Z64Trh1EIIOhnY
 ToJtqNsijzLPVyCndLn9fm+qqU6XKUJ+Ms2kj3kHpoMcwTx16qidGJsQu0CZw+R3
 uw89bz0H0xKpvn3sv50zHiMgBuN78G2hKo9JI/IHgpI/X7xAFrBvrZLdAJ13bvu8
 644X8H04/fWx1o2NCh/ECpJeBBVxD2b6s8Y2j7PKcGiiy3SBUHfeayB+ipHMT7lK
 q9Ei4P1nUBWMSRULhs8Dr5oJiZKRBoUqnF2rjWz3XkK5UusmrxagPeykv8DlqC+R
 EVW6g6xBCjE3qIgkcnA4OBZtxcj0/xXAf/Se2nomsz1P1vziVec=
 =7DGO
 -----END PGP SIGNATURE-----

Merge tag 'soc_fsl-6.12-2' of https://github.com/chleroy/linux into soc/drivers

- A series from Hervé Codina that bring support for the newer version
of QMC (QUICC Multi-channel Controller) and TSA (Time Slots Assigner)
found on MPC 83xx micro-controllers.

- Misc changes for qbman freescale drivers for removing a redundant
warning and using iommu_paging_domain_alloc()

* tag 'soc_fsl-6.12-2' of https://github.com/chleroy/linux: (38 commits)
  soc: fsl: qbman: Remove redundant warnings
  soc: fsl: qbman: Use iommu_paging_domain_alloc()
  MAINTAINERS: Add QE files related to the Freescale QMC controller
  soc: fsl: cpm1: qmc: Handle QUICC Engine (QE) soft-qmc firmware
  soc: fsl: cpm1: qmc: Add support for QUICC Engine (QE) implementation
  soc: fsl: qe: Add missing PUSHSCHED command
  soc: fsl: qe: Add resource-managed muram allocators
  soc: fsl: cpm1: qmc: Introduce qmc_version
  soc: fsl: cpm1: qmc: Rename SCC_GSMRL_MODE_QMC
  soc: fsl: cpm1: qmc: Handle RPACK initialization
  soc: fsl: cpm1: qmc: Rename qmc_chan_command()
  soc: fsl: cpm1: qmc: Introduce qmc_{init,exit}_xcc() and their CPM1 version
  soc: fsl: cpm1: qmc: Introduce qmc_init_resource() and its CPM1 version
  soc: fsl: cpm1: qmc: Re-order probe() operations
  soc: fsl: cpm1: qmc: Introduce qmc_data structure
  dt-bindings: soc: fsl: cpm_qe: Add QUICC Engine (QE) QMC controller
  soc: fsl: cpm1: qmc: Add missing spinlock comment
  soc: fsl: cpm1: qmc: Fix 'transmiter' typo
  soc: fsl: cpm1: qmc: Remove unneeded parenthesis
  soc: fsl: cpm1: qmc: Fix blank line and spaces
  ...

Link: https://lore.kernel.org/r/326d9a7d-7674-4c28-aa40-dd2c190244dd@csgroup.eu
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2024-09-03 07:01:30 +00:00
commit a0e199ecf9
12 changed files with 1549 additions and 325 deletions

View File

@ -0,0 +1,210 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,qe-tsa.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: PowerQUICC QE Time-slot assigner (TSA) controller
maintainers:
- Herve Codina <herve.codina@bootlin.com>
description:
The TSA is the time-slot assigner that can be found on some PowerQUICC SoC.
Its purpose is to route some TDM time-slots to other internal serial
controllers.
properties:
compatible:
items:
- enum:
- fsl,mpc8321-tsa
- const: fsl,qe-tsa
reg:
items:
- description: SI (Serial Interface) register base
- description: SI RAM base
reg-names:
items:
- const: si_regs
- const: si_ram
'#address-cells':
const: 1
'#size-cells':
const: 0
patternProperties:
'^tdm@[0-3]$':
description:
The TDM managed by this controller
type: object
additionalProperties: false
properties:
reg:
minimum: 0
maximum: 3
description:
The TDM number for this TDM, 0 for TDMa, 1 for TDMb, 2 for TDMc and 3
for TDMd.
fsl,common-rxtx-pins:
$ref: /schemas/types.yaml#/definitions/flag
description:
The hardware can use four dedicated pins for Tx clock, Tx sync, Rx
clock and Rx sync or use only two pins, Tx/Rx clock and Tx/Rx sync.
Without the 'fsl,common-rxtx-pins' property, the four pins are used.
With the 'fsl,common-rxtx-pins' property, two pins are used.
clocks:
minItems: 2
items:
- description: Receive sync clock
- description: Receive data clock
- description: Transmit sync clock
- description: Transmit data clock
clock-names:
minItems: 2
items:
- const: rsync
- const: rclk
- const: tsync
- const: tclk
fsl,rx-frame-sync-delay-bits:
enum: [0, 1, 2, 3]
default: 0
description: |
Receive frame sync delay in number of bits.
Indicates the delay between the Rx sync and the first bit of the Rx
frame.
fsl,tx-frame-sync-delay-bits:
enum: [0, 1, 2, 3]
default: 0
description: |
Transmit frame sync delay in number of bits.
Indicates the delay between the Tx sync and the first bit of the Tx
frame.
fsl,clock-falling-edge:
$ref: /schemas/types.yaml#/definitions/flag
description:
Data is sent on falling edge of the clock (and received on the rising
edge). If not present, data is sent on the rising edge (and received
on the falling edge).
fsl,fsync-rising-edge:
$ref: /schemas/types.yaml#/definitions/flag
description:
Frame sync pulses are sampled with the rising edge of the channel
clock. If not present, pulses are sampled with the falling edge.
fsl,fsync-active-low:
$ref: /schemas/types.yaml#/definitions/flag
description:
Frame sync signals are active on low logic level.
If not present, sync signals are active on high level.
fsl,double-speed-clock:
$ref: /schemas/types.yaml#/definitions/flag
description:
The channel clock is twice the data rate.
patternProperties:
'^fsl,[rt]x-ts-routes$':
$ref: /schemas/types.yaml#/definitions/uint32-matrix
description: |
A list of tuple that indicates the Tx or Rx time-slots routes.
items:
items:
- description:
The number of time-slots
minimum: 1
maximum: 64
- description: |
The source (Tx) or destination (Rx) serial interface
(dt-bindings/soc/qe-fsl,tsa.h defines these values)
- 0: No destination
- 1: UCC1
- 2: UCC2
- 3: UCC3
- 4: UCC4
- 5: UCC5
enum: [0, 1, 2, 3, 4, 5]
minItems: 1
maxItems: 64
allOf:
# If fsl,common-rxtx-pins is present, only 2 clocks are needed.
# Else, the 4 clocks must be present.
- if:
required:
- fsl,common-rxtx-pins
then:
properties:
clocks:
maxItems: 2
clock-names:
maxItems: 2
else:
properties:
clocks:
minItems: 4
clock-names:
minItems: 4
required:
- reg
- clocks
- clock-names
required:
- compatible
- reg
- reg-names
- '#address-cells'
- '#size-cells'
additionalProperties: false
examples:
- |
#include <dt-bindings/soc/qe-fsl,tsa.h>
tsa@ae0 {
compatible = "fsl,mpc8321-tsa", "fsl,qe-tsa";
reg = <0xae0 0x10>,
<0xc00 0x200>;
reg-names = "si_regs", "si_ram";
#address-cells = <1>;
#size-cells = <0>;
tdm@0 {
/* TDMa */
reg = <0>;
clocks = <&clk_l1rsynca>, <&clk_l1rclka>;
clock-names = "rsync", "rclk";
fsl,common-rxtx-pins;
fsl,fsync-rising-edge;
fsl,tx-ts-routes = <2 0>, /* TS 0..1 */
<24 FSL_QE_TSA_UCC4>, /* TS 2..25 */
<1 0>, /* TS 26 */
<5 FSL_QE_TSA_UCC3>; /* TS 27..31 */
fsl,rx-ts-routes = <2 0>, /* TS 0..1 */
<24 FSL_QE_TSA_UCC4>, /* 2..25 */
<1 0>, /* TS 26 */
<5 FSL_QE_TSA_UCC3>; /* TS 27..31 */
};
};

View File

@ -0,0 +1,197 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: PowerQUICC QE QUICC Multichannel Controller (QMC)
maintainers:
- Herve Codina <herve.codina@bootlin.com>
description:
The QMC (QUICC Multichannel Controller) emulates up to 64 channels within one
serial controller using the same TDM physical interface routed from TSA.
properties:
compatible:
items:
- enum:
- fsl,mpc8321-ucc-qmc
- const: fsl,qe-ucc-qmc
reg:
items:
- description: UCC (Unified communication controller) register base
- description: Dual port ram base
reg-names:
items:
- const: ucc_regs
- const: dpram
interrupts:
maxItems: 1
description: UCC interrupt line in the QE interrupt controller
fsl,tsa-serial:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- items:
- description: phandle to TSA node
- enum: [1, 2, 3, 4, 5]
description: |
TSA serial interface (dt-bindings/soc/qe-fsl,tsa.h defines these
values)
- 1: UCC1
- 2: UCC2
- 3: UCC3
- 4: UCC4
- 5: UCC5
description:
Should be a phandle/number pair. The phandle to TSA node and the TSA
serial interface to use.
fsl,soft-qmc:
$ref: /schemas/types.yaml#/definitions/string
description:
Soft QMC firmware name to load. If this property is omitted, no firmware
are used.
'#address-cells':
const: 1
'#size-cells':
const: 0
patternProperties:
'^channel@([0-9]|[1-5][0-9]|6[0-3])$':
description:
A channel managed by this controller
type: object
additionalProperties: false
properties:
compatible:
items:
- enum:
- fsl,mpc8321-ucc-qmc-hdlc
- const: fsl,qe-ucc-qmc-hdlc
- const: fsl,qmc-hdlc
reg:
minimum: 0
maximum: 63
description:
The channel number
fsl,operational-mode:
$ref: /schemas/types.yaml#/definitions/string
enum: [transparent, hdlc]
default: transparent
description: |
The channel operational mode
- hdlc: The channel handles HDLC frames
- transparent: The channel handles raw data without any processing
fsl,reverse-data:
$ref: /schemas/types.yaml#/definitions/flag
description:
The bit order as seen on the channels is reversed,
transmitting/receiving the MSB of each octet first.
This flag is used only in 'transparent' mode.
fsl,tx-ts-mask:
$ref: /schemas/types.yaml#/definitions/uint64
description:
Channel assigned Tx time-slots within the Tx time-slots routed by the
TSA to this cell.
fsl,rx-ts-mask:
$ref: /schemas/types.yaml#/definitions/uint64
description:
Channel assigned Rx time-slots within the Rx time-slots routed by the
TSA to this cell.
fsl,framer:
$ref: /schemas/types.yaml#/definitions/phandle
description:
phandle to the framer node. The framer is in charge of an E1/T1 line
interface connected to the TDM bus. It can be used to get the E1/T1 line
status such as link up/down.
allOf:
- if:
properties:
compatible:
not:
contains:
const: fsl,qmc-hdlc
then:
properties:
fsl,framer: false
required:
- reg
- fsl,tx-ts-mask
- fsl,rx-ts-mask
required:
- compatible
- reg
- reg-names
- interrupts
- fsl,tsa-serial
- '#address-cells'
- '#size-cells'
additionalProperties: false
examples:
- |
#include <dt-bindings/soc/qe-fsl,tsa.h>
qmc@a60 {
compatible = "fsl,mpc8321-ucc-qmc", "fsl,qe-ucc-qmc";
reg = <0x3200 0x200>,
<0x10000 0x1000>;
reg-names = "ucc_regs", "dpram";
interrupts = <35>;
interrupt-parent = <&qeic>;
fsl,soft-qmc = "fsl_qe_ucode_qmc_8321_11.bin";
#address-cells = <1>;
#size-cells = <0>;
fsl,tsa-serial = <&tsa FSL_QE_TSA_UCC4>;
channel@16 {
/* Ch16 : First 4 even TS from all routed from TSA */
reg = <16>;
fsl,operational-mode = "transparent";
fsl,reverse-data;
fsl,tx-ts-mask = <0x00000000 0x000000aa>;
fsl,rx-ts-mask = <0x00000000 0x000000aa>;
};
channel@17 {
/* Ch17 : First 4 odd TS from all routed from TSA */
reg = <17>;
fsl,operational-mode = "transparent";
fsl,reverse-data;
fsl,tx-ts-mask = <0x00000000 0x00000055>;
fsl,rx-ts-mask = <0x00000000 0x00000055>;
};
channel@19 {
/* Ch19 : 8 TS (TS 8..15) from all routed from TSA */
compatible = "fsl,mpc8321-ucc-qmc-hdlc",
"fsl,qe-ucc-qmc-hdlc",
"fsl,qmc-hdlc";
reg = <19>;
fsl,operational-mode = "hdlc";
fsl,tx-ts-mask = <0x00000000 0x0000ff00>;
fsl,rx-ts-mask = <0x00000000 0x0000ff00>;
fsl,framer = <&framer>;
};
};

View File

@ -8996,6 +8996,7 @@ M: Herve Codina <herve.codina@bootlin.com>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-scc-qmc.yaml
F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-ucc-qmc.yaml
F: drivers/soc/fsl/qe/qmc.c
F: include/soc/fsl/qe/qmc.h
@ -9011,9 +9012,11 @@ M: Herve Codina <herve.codina@bootlin.com>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,cpm1-tsa.yaml
F: Documentation/devicetree/bindings/soc/fsl/cpm_qe/fsl,qe-tsa.yaml
F: drivers/soc/fsl/qe/tsa.c
F: drivers/soc/fsl/qe/tsa.h
F: include/dt-bindings/soc/cpm1-fsl,tsa.h
F: include/dt-bindings/soc/qe-fsl,tsa.h
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
L: netdev@vger.kernel.org

View File

@ -791,8 +791,6 @@ static int fsl_qman_probe(struct platform_device *pdev)
* FQD memory MUST be zero'd by software
*/
zero_priv_mem(fqd_a, fqd_sz);
#else
WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
#endif
dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);

View File

@ -48,9 +48,10 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
struct device *dev = pcfg->dev;
int ret;
pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
if (!pcfg->iommu_domain) {
pcfg->iommu_domain = iommu_paging_domain_alloc(dev);
if (IS_ERR(pcfg->iommu_domain)) {
dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
pcfg->iommu_domain = NULL;
goto no_iommu;
}
ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);

View File

@ -17,7 +17,7 @@ config QUICC_ENGINE
config UCC_SLOW
bool
default y if SERIAL_QE
default y if SERIAL_QE || (CPM_QMC && QUICC_ENGINE)
help
This option provides qe_lib support to UCC slow
protocols: UART, BISYNC, QMC
@ -31,26 +31,28 @@ config UCC_FAST
config UCC
bool
default y if UCC_FAST || UCC_SLOW
default y if UCC_FAST || UCC_SLOW || (CPM_TSA && QUICC_ENGINE)
config CPM_TSA
tristate "CPM TSA support"
tristate "CPM/QE TSA support"
depends on OF && HAS_IOMEM
depends on CPM1 || (CPM && COMPILE_TEST)
depends on CPM1 || QUICC_ENGINE || \
((CPM || QUICC_ENGINE) && COMPILE_TEST)
help
Freescale CPM Time Slot Assigner (TSA)
Freescale CPM/QE Time Slot Assigner (TSA)
controller.
This option enables support for this
controller
config CPM_QMC
tristate "CPM QMC support"
tristate "CPM/QE QMC support"
depends on OF && HAS_IOMEM
depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST)
depends on CPM1 || QUICC_ENGINE || \
(FSL_SOC && (CPM || QUICC_ENGINE) && COMPILE_TEST)
depends on CPM_TSA
help
Freescale CPM QUICC Multichannel Controller
Freescale CPM/QE QUICC Multichannel Controller
(QMC)
This option enables support for this

View File

@ -13,6 +13,7 @@
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*/
#include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/init.h>
#include <linux/list.h>
@ -187,6 +188,49 @@ void cpm_muram_free(s32 offset)
}
EXPORT_SYMBOL(cpm_muram_free);
static void devm_cpm_muram_release(struct device *dev, void *res)
{
s32 *info = res;
cpm_muram_free(*info);
}
/**
* devm_cpm_muram_alloc - Resource-managed cpm_muram_alloc
* @dev: Device to allocate memory for
* @size: number of bytes to allocate
* @align: requested alignment, in bytes
*
* This function returns a non-negative offset into the muram area, or
* a negative errno on failure as cpm_muram_alloc() does.
* Use cpm_muram_addr() to get the virtual address of the area.
*
* Compare against cpm_muram_alloc(), the memory allocated by this
* resource-managed version is automatically freed on driver detach and so,
* cpm_muram_free() must not be called to release the allocated memory.
*/
s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
unsigned long align)
{
s32 info;
s32 *dr;
dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
info = cpm_muram_alloc(size, align);
if (info >= 0) {
*dr = info;
devres_add(dev, dr);
} else {
devres_free(dr);
}
return info;
}
EXPORT_SYMBOL(devm_cpm_muram_alloc);
/*
* cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
* @offset: offset of allocation start address
@ -211,6 +255,42 @@ s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
}
EXPORT_SYMBOL(cpm_muram_alloc_fixed);
/**
* devm_cpm_muram_alloc_fixed - Resource-managed cpm_muram_alloc_fixed
* @dev: Device to allocate memory for
* @offset: offset of allocation start address
* @size: number of bytes to allocate
*
* This function returns a non-negative offset into the muram area, or
* a negative errno on failure as cpm_muram_alloc_fixed() does.
* Use cpm_muram_addr() to get the virtual address of the area.
*
* Compare against cpm_muram_alloc_fixed(), the memory allocated by this
* resource-managed version is automatically freed on driver detach and so,
* cpm_muram_free() must not be called to release the allocated memory.
*/
s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
unsigned long size)
{
s32 info;
s32 *dr;
dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return -ENOMEM;
info = cpm_muram_alloc_fixed(offset, size);
if (info >= 0) {
*dr = info;
devres_add(dev, dr);
} else {
devres_free(dr);
}
return info;
}
EXPORT_SYMBOL(devm_cpm_muram_alloc_fixed);
/**
* cpm_muram_addr - turn a muram offset into a virtual address
* @offset: muram offset to convert

View File

@ -8,7 +8,9 @@
*/
#include <soc/fsl/qe/qmc.h>
#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/hdlc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@ -18,31 +20,41 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <soc/fsl/cpm.h>
#include <soc/fsl/qe/ucc_slow.h>
#include <soc/fsl/qe/qe.h>
#include <sysdev/fsl_soc.h>
#include "tsa.h"
/* SCC general mode register high (32 bits) */
/* SCC general mode register low (32 bits) (GUMR_L in QE) */
#define SCC_GSMRL 0x00
#define SCC_GSMRL_ENR (1 << 5)
#define SCC_GSMRL_ENT (1 << 4)
#define SCC_GSMRL_MODE_QMC (0x0A << 0)
#define SCC_GSMRL_ENR BIT(5)
#define SCC_GSMRL_ENT BIT(4)
#define SCC_GSMRL_MODE_MASK GENMASK(3, 0)
#define SCC_CPM1_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x0A)
#define SCC_QE_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x02)
/* SCC general mode register low (32 bits) */
/* SCC general mode register high (32 bits) (identical to GUMR_H in QE) */
#define SCC_GSMRH 0x04
#define SCC_GSMRH_CTSS (1 << 7)
#define SCC_GSMRH_CDS (1 << 8)
#define SCC_GSMRH_CTSP (1 << 9)
#define SCC_GSMRH_CDP (1 << 10)
#define SCC_GSMRH_CTSS BIT(7)
#define SCC_GSMRH_CDS BIT(8)
#define SCC_GSMRH_CTSP BIT(9)
#define SCC_GSMRH_CDP BIT(10)
#define SCC_GSMRH_TTX BIT(11)
#define SCC_GSMRH_TRX BIT(12)
/* SCC event register (16 bits) */
/* SCC event register (16 bits) (identical to UCCE in QE) */
#define SCC_SCCE 0x10
#define SCC_SCCE_IQOV (1 << 3)
#define SCC_SCCE_GINT (1 << 2)
#define SCC_SCCE_GUN (1 << 1)
#define SCC_SCCE_GOV (1 << 0)
#define SCC_SCCE_IQOV BIT(3)
#define SCC_SCCE_GINT BIT(2)
#define SCC_SCCE_GUN BIT(1)
#define SCC_SCCE_GOV BIT(0)
/* SCC mask register (16 bits) */
#define SCC_SCCM 0x14
/* UCC Extended Mode Register (8 bits, QE only) */
#define SCC_QE_UCC_GUEMR 0x90
/* Multichannel base pointer (32 bits) */
#define QMC_GBL_MCBASE 0x00
/* Multichannel controller state (16 bits) */
@ -73,27 +85,42 @@
#define QMC_GBL_TSATTX 0x60
/* CRC constant (16 bits) */
#define QMC_GBL_C_MASK16 0xA0
/* Rx framer base pointer (16 bits, QE only) */
#define QMC_QE_GBL_RX_FRM_BASE 0xAC
/* Tx framer base pointer (16 bits, QE only) */
#define QMC_QE_GBL_TX_FRM_BASE 0xAE
/* A reserved area (0xB0 -> 0xC3) that must be initialized to 0 (QE only) */
#define QMC_QE_GBL_RSV_B0_START 0xB0
#define QMC_QE_GBL_RSV_B0_SIZE 0x14
/* QMC Global Channel specific base (32 bits, QE only) */
#define QMC_QE_GBL_GCSBASE 0xC4
/* TSA entry (16bit entry in TSATRX and TSATTX) */
#define QMC_TSA_VALID (1 << 15)
#define QMC_TSA_WRAP (1 << 14)
#define QMC_TSA_MASK (0x303F)
#define QMC_TSA_CHANNEL(x) ((x) << 6)
#define QMC_TSA_VALID BIT(15)
#define QMC_TSA_WRAP BIT(14)
#define QMC_TSA_MASK_MASKH GENMASK(13, 12)
#define QMC_TSA_MASK_MASKL GENMASK(5, 0)
#define QMC_TSA_MASK_8BIT (FIELD_PREP_CONST(QMC_TSA_MASK_MASKH, 0x3) | \
FIELD_PREP_CONST(QMC_TSA_MASK_MASKL, 0x3F))
#define QMC_TSA_CHANNEL_MASK GENMASK(11, 6)
#define QMC_TSA_CHANNEL(x) FIELD_PREP(QMC_TSA_CHANNEL_MASK, x)
/* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
#define QMC_SPE_TBASE 0x00
/* Channel mode register (16 bits) */
#define QMC_SPE_CHAMR 0x02
#define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
#define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
#define QMC_SPE_CHAMR_ENT (1 << 12)
#define QMC_SPE_CHAMR_POL (1 << 8)
#define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
#define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
#define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
#define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
#define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
#define QMC_SPE_CHAMR_MODE_MASK GENMASK(15, 15)
#define QMC_SPE_CHAMR_MODE_HDLC FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 1)
#define QMC_SPE_CHAMR_MODE_TRANSP (FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 0) | BIT(13))
#define QMC_SPE_CHAMR_ENT BIT(12)
#define QMC_SPE_CHAMR_POL BIT(8)
#define QMC_SPE_CHAMR_HDLC_IDLM BIT(13)
#define QMC_SPE_CHAMR_HDLC_CRC BIT(7)
#define QMC_SPE_CHAMR_HDLC_NOF_MASK GENMASK(3, 0)
#define QMC_SPE_CHAMR_HDLC_NOF(x) FIELD_PREP(QMC_SPE_CHAMR_HDLC_NOF_MASK, x)
#define QMC_SPE_CHAMR_TRANSP_RD BIT(14)
#define QMC_SPE_CHAMR_TRANSP_SYNC BIT(10)
/* Tx internal state (32 bits) */
#define QMC_SPE_TSTATE 0x04
@ -120,43 +147,47 @@
/* Transparent synchronization (16 bits) */
#define QMC_SPE_TRNSYNC 0x3C
#define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
#define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
#define QMC_SPE_TRNSYNC_RX_MASK GENMASK(15, 8)
#define QMC_SPE_TRNSYNC_RX(x) FIELD_PREP(QMC_SPE_TRNSYNC_RX_MASK, x)
#define QMC_SPE_TRNSYNC_TX_MASK GENMASK(7, 0)
#define QMC_SPE_TRNSYNC_TX(x) FIELD_PREP(QMC_SPE_TRNSYNC_TX_MASK, x)
/* Interrupt related registers bits */
#define QMC_INT_V (1 << 15)
#define QMC_INT_W (1 << 14)
#define QMC_INT_NID (1 << 13)
#define QMC_INT_IDL (1 << 12)
#define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
#define QMC_INT_MRF (1 << 5)
#define QMC_INT_UN (1 << 4)
#define QMC_INT_RXF (1 << 3)
#define QMC_INT_BSY (1 << 2)
#define QMC_INT_TXB (1 << 1)
#define QMC_INT_RXB (1 << 0)
#define QMC_INT_V BIT(15)
#define QMC_INT_W BIT(14)
#define QMC_INT_NID BIT(13)
#define QMC_INT_IDL BIT(12)
#define QMC_INT_CHANNEL_MASK GENMASK(11, 6)
#define QMC_INT_GET_CHANNEL(x) FIELD_GET(QMC_INT_CHANNEL_MASK, x)
#define QMC_INT_MRF BIT(5)
#define QMC_INT_UN BIT(4)
#define QMC_INT_RXF BIT(3)
#define QMC_INT_BSY BIT(2)
#define QMC_INT_TXB BIT(1)
#define QMC_INT_RXB BIT(0)
/* BD related registers bits */
#define QMC_BD_RX_E (1 << 15)
#define QMC_BD_RX_W (1 << 13)
#define QMC_BD_RX_I (1 << 12)
#define QMC_BD_RX_L (1 << 11)
#define QMC_BD_RX_F (1 << 10)
#define QMC_BD_RX_CM (1 << 9)
#define QMC_BD_RX_UB (1 << 7)
#define QMC_BD_RX_LG (1 << 5)
#define QMC_BD_RX_NO (1 << 4)
#define QMC_BD_RX_AB (1 << 3)
#define QMC_BD_RX_CR (1 << 2)
#define QMC_BD_RX_E BIT(15)
#define QMC_BD_RX_W BIT(13)
#define QMC_BD_RX_I BIT(12)
#define QMC_BD_RX_L BIT(11)
#define QMC_BD_RX_F BIT(10)
#define QMC_BD_RX_CM BIT(9)
#define QMC_BD_RX_UB BIT(7)
#define QMC_BD_RX_LG BIT(5)
#define QMC_BD_RX_NO BIT(4)
#define QMC_BD_RX_AB BIT(3)
#define QMC_BD_RX_CR BIT(2)
#define QMC_BD_TX_R (1 << 15)
#define QMC_BD_TX_W (1 << 13)
#define QMC_BD_TX_I (1 << 12)
#define QMC_BD_TX_L (1 << 11)
#define QMC_BD_TX_TC (1 << 10)
#define QMC_BD_TX_CM (1 << 9)
#define QMC_BD_TX_UB (1 << 7)
#define QMC_BD_TX_PAD (0x0f << 0)
#define QMC_BD_TX_R BIT(15)
#define QMC_BD_TX_W BIT(13)
#define QMC_BD_TX_I BIT(12)
#define QMC_BD_TX_L BIT(11)
#define QMC_BD_TX_TC BIT(10)
#define QMC_BD_TX_CM BIT(9)
#define QMC_BD_TX_UB BIT(7)
#define QMC_BD_TX_PAD_MASK GENMASK(3, 0)
#define QMC_BD_TX_PAD(x) FIELD_PREP(QMC_BD_TX_PAD_MASK, x)
/* Numbers of BDs and interrupt items */
#define QMC_NB_TXBDS 8
@ -184,7 +215,7 @@ struct qmc_chan {
u64 rx_ts_mask;
bool is_reverse_data;
spinlock_t tx_lock;
spinlock_t tx_lock; /* Protect Tx related data */
cbd_t __iomem *txbds;
cbd_t __iomem *txbd_free;
cbd_t __iomem *txbd_done;
@ -192,7 +223,7 @@ struct qmc_chan {
u64 nb_tx_underrun;
bool is_tx_stopped;
spinlock_t rx_lock;
spinlock_t rx_lock; /* Protect Rx related data */
cbd_t __iomem *rxbds;
cbd_t __iomem *rxbd_free;
cbd_t __iomem *rxbd_done;
@ -203,13 +234,31 @@ struct qmc_chan {
bool is_rx_stopped;
};
enum qmc_version {
QMC_CPM1,
QMC_QE,
};
struct qmc_data {
enum qmc_version version;
u32 tstate; /* Initial TSTATE value */
u32 rstate; /* Initial RSTATE value */
u32 zistate; /* Initial ZISTATE value */
u32 zdstate_hdlc; /* Initial ZDSTATE value (HDLC mode) */
u32 zdstate_transp; /* Initial ZDSTATE value (Transparent mode) */
u32 rpack; /* Initial RPACK value */
};
struct qmc {
struct device *dev;
const struct qmc_data *data;
struct tsa_serial *tsa_serial;
void __iomem *scc_regs;
void __iomem *scc_pram;
void __iomem *dpram;
u16 scc_pram_offset;
u32 dpram_offset;
u32 qe_subblock;
cbd_t __iomem *bd_table;
dma_addr_t bd_dma_addr;
size_t bd_size;
@ -222,6 +271,11 @@ struct qmc {
struct qmc_chan *chans[64];
};
static void qmc_write8(void __iomem *addr, u8 val)
{
iowrite8(val, addr);
}
static void qmc_write16(void __iomem *addr, u16 val)
{
iowrite16be(val, addr);
@ -262,6 +316,13 @@ static void qmc_setbits32(void __iomem *addr, u32 set)
qmc_write32(addr, qmc_read32(addr) | set);
}
static bool qmc_is_qe(const struct qmc *qmc)
{
if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
return qmc->data->version == QMC_QE;
return IS_ENABLED(CONFIG_QUICC_ENGINE);
}
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
{
@ -348,8 +409,8 @@ int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param
switch (param->mode) {
case QMC_HDLC:
if ((param->hdlc.max_rx_buf_size % 4) ||
(param->hdlc.max_rx_buf_size < 8))
if (param->hdlc.max_rx_buf_size % 4 ||
param->hdlc.max_rx_buf_size < 8)
return -EINVAL;
qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
@ -532,11 +593,12 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
/* Restart receiver if needed */
if (chan->is_rx_halted && !chan->is_rx_stopped) {
/* Restart receiver */
if (chan->mode == QMC_TRANSPARENT)
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
else
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
chan->mode == QMC_TRANSPARENT ?
chan->qmc->data->zdstate_transp :
chan->qmc->data->zdstate_hdlc);
qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
chan->is_rx_halted = false;
}
chan->rx_pending++;
@ -641,7 +703,7 @@ static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_ser
return -EINVAL;
}
val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
/* Check entries based on Rx stuff*/
for (i = 0; i < info->nb_rx_ts; i++) {
@ -662,7 +724,7 @@ static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_ser
continue;
qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
~QMC_TSA_WRAP, enable ? val : 0x0000);
(u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
}
return 0;
@ -677,7 +739,7 @@ static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_seria
/* Use a Rx 32 entries table */
val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
/* Check entries based on Rx stuff */
for (i = 0; i < info->nb_rx_ts; i++) {
@ -698,7 +760,7 @@ static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_seria
continue;
qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
~QMC_TSA_WRAP, enable ? val : 0x0000);
(u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
}
return 0;
@ -713,7 +775,7 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria
/* Use a Tx 32 entries table */
val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
/* Check entries based on Tx stuff */
for (i = 0; i < info->nb_tx_ts; i++) {
@ -734,7 +796,7 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria
continue;
qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
~QMC_TSA_WRAP, enable ? val : 0x0000);
(u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
}
return 0;
@ -774,11 +836,18 @@ static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
return qmc_chan_setup_tsa_32rx(chan, &info, enable);
}
static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
static int qmc_chan_cpm1_command(struct qmc_chan *chan, u8 qmc_opcode)
{
return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
}
static int qmc_chan_qe_command(struct qmc_chan *chan, u32 cmd)
{
if (!qe_issue_cmd(cmd, chan->qmc->qe_subblock, chan->id, 0))
return -EIO;
return 0;
}
static int qmc_chan_stop_rx(struct qmc_chan *chan)
{
unsigned long flags;
@ -793,7 +862,9 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
}
/* Send STOP RECEIVE command */
ret = qmc_chan_command(chan, 0x0);
ret = qmc_is_qe(chan->qmc) ?
qmc_chan_qe_command(chan, QE_QMC_STOP_RX) :
qmc_chan_cpm1_command(chan, 0x0);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
chan->id, ret);
@ -830,7 +901,9 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
}
/* Send STOP TRANSMIT command */
ret = qmc_chan_command(chan, 0x1);
ret = qmc_is_qe(chan->qmc) ?
qmc_chan_qe_command(chan, QE_QMC_STOP_TX) :
qmc_chan_cpm1_command(chan, 0x1);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
chan->id, ret);
@ -889,6 +962,7 @@ EXPORT_SYMBOL(qmc_chan_stop);
static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
{
struct tsa_serial_info info;
unsigned int w_rx, w_tx;
u16 first_rx, last_tx;
u16 trnsync;
int ret;
@ -898,6 +972,14 @@ static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
if (ret)
return ret;
w_rx = hweight64(chan->rx_ts_mask);
w_tx = hweight64(chan->tx_ts_mask);
if (w_rx <= 1 && w_tx <= 1) {
dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n");
qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
return 0;
}
/* Find the first Rx TS allocated to the channel */
first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
@ -911,6 +993,7 @@ static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
chan->id, trnsync,
@ -940,19 +1023,22 @@ static int qmc_chan_start_rx(struct qmc_chan *chan)
goto end;
}
if (chan->mode == QMC_TRANSPARENT) {
ret = qmc_setup_chan_trnsync(chan->qmc, chan);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
chan->id, ret);
goto end;
}
}
/* Restart the receiver */
if (chan->mode == QMC_TRANSPARENT)
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
else
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
chan->mode == QMC_TRANSPARENT ?
chan->qmc->data->zdstate_transp :
chan->qmc->data->zdstate_hdlc);
qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
chan->is_rx_halted = false;
chan->is_rx_stopped = false;
@ -982,12 +1068,14 @@ static int qmc_chan_start_tx(struct qmc_chan *chan)
goto end;
}
if (chan->mode == QMC_TRANSPARENT) {
ret = qmc_setup_chan_trnsync(chan->qmc, chan);
if (ret) {
dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
chan->id, ret);
goto end;
}
}
/*
* Enable channel transmitter as it could be disabled if
@ -1096,8 +1184,8 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
qmc_read16(chan->s_param + QMC_SPE_TBASE));
/* Reset TSTATE and ZISTATE to their initial value */
qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
spin_unlock_irqrestore(&chan->tx_lock, flags);
}
@ -1127,7 +1215,7 @@ static int qmc_check_chans(struct qmc *qmc)
if (ret)
return ret;
if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
if (info.nb_tx_ts > 64 || info.nb_rx_ts > 64) {
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
return -EINVAL;
}
@ -1136,7 +1224,7 @@ static int qmc_check_chans(struct qmc *qmc)
* If more than 32 TS are assigned to this serial, one common table is
* used for Tx and Rx and so masks must be equal for all channels.
*/
if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32) {
if (info.nb_tx_ts != info.nb_rx_ts) {
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
return -EINVAL;
@ -1368,13 +1456,14 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
if (chan->mode == QMC_TRANSPARENT) {
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_transp);
qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
val = QMC_SPE_CHAMR_MODE_TRANSP;
if (chan->is_reverse_data)
val |= QMC_SPE_CHAMR_TRANSP_RD;
qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
@ -1382,7 +1471,7 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
if (ret)
return ret;
} else {
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_hdlc);
qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
qmc_write16(chan->s_param + QMC_SPE_CHAMR,
QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
@ -1510,11 +1599,14 @@ static void qmc_irq_gint(struct qmc *qmc)
/* Restart the receiver if needed */
spin_lock_irqsave(&chan->rx_lock, flags);
if (chan->rx_pending && !chan->is_rx_stopped) {
if (chan->mode == QMC_TRANSPARENT)
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
else
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
qmc_write32(chan->s_param + QMC_SPE_RPACK,
chan->qmc->data->rpack);
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
chan->mode == QMC_TRANSPARENT ?
chan->qmc->data->zdstate_transp :
chan->qmc->data->zdstate_hdlc);
qmc_write32(chan->s_param + QMC_SPE_RSTATE,
chan->qmc->data->rstate);
chan->is_rx_halted = false;
} else {
chan->is_rx_halted = true;
@ -1558,27 +1650,74 @@ static irqreturn_t qmc_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
static int qmc_probe(struct platform_device *pdev)
static int qmc_qe_soft_qmc_init(struct qmc *qmc, struct device_node *np)
{
struct device_node *np = pdev->dev.of_node;
unsigned int nb_chans;
struct resource *res;
struct qmc *qmc;
int irq;
struct qe_firmware_info *qe_fw_info;
const struct qe_firmware *qe_fw;
const struct firmware *fw;
const char *filename;
int ret;
qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
if (!qmc)
return -ENOMEM;
ret = of_property_read_string(np, "fsl,soft-qmc", &filename);
switch (ret) {
case 0:
break;
case -EINVAL:
/* fsl,soft-qmc property not set -> Simply do nothing */
return 0;
default:
dev_err(qmc->dev, "%pOF: failed to read fsl,soft-qmc\n",
np);
return ret;
}
qmc->dev = &pdev->dev;
INIT_LIST_HEAD(&qmc->chan_head);
qe_fw_info = qe_get_firmware_info();
if (qe_fw_info) {
if (!strstr(qe_fw_info->id, "Soft-QMC")) {
dev_err(qmc->dev, "Another Firmware is already loaded\n");
return -EALREADY;
}
dev_info(qmc->dev, "Firmware already loaded\n");
return 0;
}
dev_info(qmc->dev, "Using firmware %s\n", filename);
ret = request_firmware(&fw, filename, qmc->dev);
if (ret) {
dev_err(qmc->dev, "Failed to request firmware %s\n", filename);
return ret;
}
qe_fw = (const struct qe_firmware *)fw->data;
if (fw->size < sizeof(qe_fw->header) ||
be32_to_cpu(qe_fw->header.length) != fw->size) {
dev_err(qmc->dev, "Invalid firmware %s\n", filename);
ret = -EINVAL;
goto end;
}
ret = qe_upload_firmware(qe_fw);
if (ret) {
dev_err(qmc->dev, "Failed to load firmware %s\n", filename);
goto end;
}
ret = 0;
end:
release_firmware(fw);
return ret;
}
static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev)
{
struct resource *res;
qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
if (IS_ERR(qmc->scc_regs))
return PTR_ERR(qmc->scc_regs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
if (!res)
return -EINVAL;
@ -1591,33 +1730,205 @@ static int qmc_probe(struct platform_device *pdev)
if (IS_ERR(qmc->dpram))
return PTR_ERR(qmc->dpram);
return 0;
}
static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
{
struct resource *res;
int ucc_num;
s32 info;
qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "ucc_regs");
if (IS_ERR(qmc->scc_regs))
return PTR_ERR(qmc->scc_regs);
ucc_num = tsa_serial_get_num(qmc->tsa_serial);
if (ucc_num < 0)
return dev_err_probe(qmc->dev, ucc_num, "Failed to get UCC num\n");
qmc->qe_subblock = ucc_slow_get_qe_cr_subblock(ucc_num);
if (qmc->qe_subblock == QE_CR_SUBBLOCK_INVALID) {
dev_err(qmc->dev, "Unsupported ucc num %u\n", ucc_num);
return -EINVAL;
}
/* Allocate the 'Global Multichannel Parameters' and the
* 'Framer parameters' areas. The 'Framer parameters' area
* is located right after the 'Global Multichannel Parameters'.
* The 'Framer parameters' need 1 byte per receive and transmit
* channel. The maximum number of receive or transmit channel
* is 64. So reserve 2 * 64 bytes for the 'Framer parameters'.
*/
info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64,
ALIGNMENT_OF_UCC_SLOW_PRAM);
if (IS_ERR_VALUE(info)) {
dev_err(qmc->dev, "cannot allocate MURAM for PRAM");
return -ENOMEM;
}
if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, info)) {
dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed");
return -EIO;
}
qmc->scc_pram = qe_muram_addr(info);
qmc->scc_pram_offset = info;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpram");
if (!res)
return -EINVAL;
qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
qmc->dpram = devm_ioremap_resource(qmc->dev, res);
if (IS_ERR(qmc->scc_pram))
return PTR_ERR(qmc->scc_pram);
return 0;
}
static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev)
{
return qmc_is_qe(qmc) ?
qmc_qe_init_resources(qmc, pdev) :
qmc_cpm1_init_resources(qmc, pdev);
}
static int qmc_cpm1_init_scc(struct qmc *qmc)
{
u32 val;
int ret;
/* Connect the serial (SCC) to TSA */
ret = tsa_serial_connect(qmc->tsa_serial);
if (ret)
return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
/* Init GMSR_H and GMSR_L registers */
val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP;
qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
/* enable QMC mode */
qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_CPM1_GSMRL_MODE_QMC);
/* Disable and clear interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
return 0;
}
static int qmc_qe_init_ucc(struct qmc *qmc)
{
u32 val;
int ret;
/* Set the UCC in slow mode */
qmc_write8(qmc->scc_regs + SCC_QE_UCC_GUEMR,
UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
/* Connect the serial (UCC) to TSA */
ret = tsa_serial_connect(qmc->tsa_serial);
if (ret)
return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
/* Initialize the QMC tx startup addresses */
if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0x80)) {
dev_err(qmc->dev, "QE_CMD_PUSH_SCHED tx cmd failed");
ret = -EIO;
goto err_tsa_serial_disconnect;
}
/* Initialize the QMC rx startup addresses */
if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock | 0x00020000,
QE_CR_PROTOCOL_UNSPECIFIED, 0x82)) {
dev_err(qmc->dev, "QE_CMD_PUSH_SCHED rx cmd failed");
ret = -EIO;
goto err_tsa_serial_disconnect;
}
/* Re-init RXPTR and TXPTR with the content of RX_S_PTR and
* TX_S_PTR (RX_S_PTR and TX_S_PTR are initialized during
* qmc_setup_tsa() call
*/
val = qmc_read16(qmc->scc_pram + QMC_GBL_RX_S_PTR);
qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
val = qmc_read16(qmc->scc_pram + QMC_GBL_TX_S_PTR);
qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
/* Init GUMR_H and GUMR_L registers (SCC GSMR_H and GSMR_L) */
val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP |
SCC_GSMRH_TRX | SCC_GSMRH_TTX;
qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
/* enable QMC mode */
qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_QE_GSMRL_MODE_QMC);
/* Disable and clear interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
return 0;
err_tsa_serial_disconnect:
tsa_serial_disconnect(qmc->tsa_serial);
return ret;
}
static int qmc_init_xcc(struct qmc *qmc)
{
return qmc_is_qe(qmc) ?
qmc_qe_init_ucc(qmc) :
qmc_cpm1_init_scc(qmc);
}
static void qmc_exit_xcc(struct qmc *qmc)
{
/* Disconnect the serial from TSA */
tsa_serial_disconnect(qmc->tsa_serial);
}
static int qmc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
unsigned int nb_chans;
struct qmc *qmc;
int irq;
int ret;
qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
if (!qmc)
return -ENOMEM;
qmc->dev = &pdev->dev;
qmc->data = of_device_get_match_data(&pdev->dev);
if (!qmc->data) {
dev_err(qmc->dev, "Missing match data\n");
return -EINVAL;
}
INIT_LIST_HEAD(&qmc->chan_head);
qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
if (IS_ERR(qmc->tsa_serial)) {
return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
"Failed to get TSA serial\n");
}
/* Connect the serial (SCC) to TSA */
ret = tsa_serial_connect(qmc->tsa_serial);
if (ret) {
dev_err(qmc->dev, "Failed to connect TSA serial\n");
ret = qmc_init_resources(qmc, pdev);
if (ret)
return ret;
if (qmc_is_qe(qmc)) {
ret = qmc_qe_soft_qmc_init(qmc, np);
if (ret)
return ret;
}
/* Parse channels informationss */
ret = qmc_of_parse_chans(qmc, np);
if (ret)
goto err_tsa_serial_disconnect;
return ret;
nb_chans = qmc_nb_chans(qmc);
/* Init GMSR_H and GMSR_L registers */
qmc_write32(qmc->scc_regs + SCC_GSMRH,
SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
/* enable QMC mode */
qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
/*
* Allocate the buffer descriptor table
* 8 rx and 8 tx descriptors per channel
@ -1627,8 +1938,7 @@ static int qmc_probe(struct platform_device *pdev)
&qmc->bd_dma_addr, GFP_KERNEL);
if (!qmc->bd_table) {
dev_err(qmc->dev, "Failed to allocate bd table\n");
ret = -ENOMEM;
goto err_tsa_serial_disconnect;
return -ENOMEM;
}
memset(qmc->bd_table, 0, qmc->bd_size);
@ -1640,8 +1950,7 @@ static int qmc_probe(struct platform_device *pdev)
&qmc->int_dma_addr, GFP_KERNEL);
if (!qmc->int_table) {
dev_err(qmc->dev, "Failed to allocate interrupt table\n");
ret = -ENOMEM;
goto err_tsa_serial_disconnect;
return -ENOMEM;
}
memset(qmc->int_table, 0, qmc->int_size);
@ -1658,30 +1967,49 @@ static int qmc_probe(struct platform_device *pdev)
qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
if (qmc_is_qe(qmc)) {
/* Zeroed the reserved area */
memset_io(qmc->scc_pram + QMC_QE_GBL_RSV_B0_START, 0,
QMC_QE_GBL_RSV_B0_SIZE);
qmc_write32(qmc->scc_pram + QMC_QE_GBL_GCSBASE, qmc->dpram_offset);
/* Init 'framer parameters' area and set the base addresses */
memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE, 0x01, 64);
memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE + 64, 0x01, 64);
qmc_write16(qmc->scc_pram + QMC_QE_GBL_RX_FRM_BASE,
qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE);
qmc_write16(qmc->scc_pram + QMC_QE_GBL_TX_FRM_BASE,
qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE + 64);
}
ret = qmc_init_tsa(qmc);
if (ret)
goto err_tsa_serial_disconnect;
return ret;
qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
ret = qmc_setup_chans(qmc);
if (ret)
goto err_tsa_serial_disconnect;
return ret;
/* Init interrupts table */
ret = qmc_setup_ints(qmc);
if (ret)
goto err_tsa_serial_disconnect;
return ret;
/* Disable and clear interrupts, set the irq handler */
qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
/* Init SCC (CPM1) or UCC (QE) */
ret = qmc_init_xcc(qmc);
if (ret)
return ret;
/* Set the irq handler */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err_tsa_serial_disconnect;
goto err_exit_xcc;
ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
if (ret < 0)
goto err_tsa_serial_disconnect;
goto err_exit_xcc;
/* Enable interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM,
@ -1691,7 +2019,7 @@ static int qmc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_disable_intr;
/* Enable transmiter and receiver */
/* Enable transmitter and receiver */
qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
platform_set_drvdata(pdev, qmc);
@ -1709,8 +2037,8 @@ err_disable_txrx:
err_disable_intr:
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
err_tsa_serial_disconnect:
tsa_serial_disconnect(qmc->tsa_serial);
err_exit_xcc:
qmc_exit_xcc(qmc);
return ret;
}
@ -1718,18 +2046,43 @@ static void qmc_remove(struct platform_device *pdev)
{
struct qmc *qmc = platform_get_drvdata(pdev);
/* Disable transmiter and receiver */
/* Disable transmitter and receiver */
qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
/* Disable interrupts */
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
/* Disconnect the serial from TSA */
tsa_serial_disconnect(qmc->tsa_serial);
/* Exit SCC (CPM1) or UCC (QE) */
qmc_exit_xcc(qmc);
}
static const struct qmc_data qmc_data_cpm1 = {
.version = QMC_CPM1,
.tstate = 0x30000000,
.rstate = 0x31000000,
.zistate = 0x00000100,
.zdstate_hdlc = 0x00000080,
.zdstate_transp = 0x18000080,
.rpack = 0x00000000,
};
static const struct qmc_data qmc_data_qe = {
.version = QMC_QE,
.tstate = 0x30000000,
.rstate = 0x30000000,
.zistate = 0x00000200,
.zdstate_hdlc = 0x80FFFFE0,
.zdstate_transp = 0x003FFFE2,
.rpack = 0x80000000,
};
static const struct of_device_id qmc_id_table[] = {
{ .compatible = "fsl,cpm1-scc-qmc" },
#if IS_ENABLED(CONFIG_CPM1)
{ .compatible = "fsl,cpm1-scc-qmc", .data = &qmc_data_cpm1 },
#endif
#if IS_ENABLED(CONFIG_QUICC_ENGINE)
{ .compatible = "fsl,qe-ucc-qmc", .data = &qmc_data_qe },
#endif
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, qmc_id_table);
@ -1889,5 +2242,5 @@ struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
MODULE_DESCRIPTION("CPM QMC driver");
MODULE_DESCRIPTION("CPM/QE QMC driver");
MODULE_LICENSE("GPL");

View File

@ -9,6 +9,8 @@
#include "tsa.h"
#include <dt-bindings/soc/cpm1-fsl,tsa.h>
#include <dt-bindings/soc/qe-fsl,tsa.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@ -16,86 +18,116 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <soc/fsl/qe/ucc.h>
/* TSA SI RAM routing tables entry (CPM1) */
#define TSA_CPM1_SIRAM_ENTRY_LAST BIT(16)
#define TSA_CPM1_SIRAM_ENTRY_BYTE BIT(17)
#define TSA_CPM1_SIRAM_ENTRY_CNT_MASK GENMASK(21, 18)
#define TSA_CPM1_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_CPM1_SIRAM_ENTRY_CNT_MASK, x)
#define TSA_CPM1_SIRAM_ENTRY_CSEL_MASK GENMASK(24, 22)
#define TSA_CPM1_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x0)
#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x2)
#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x3)
#define TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x4)
#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x5)
#define TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2 FIELD_PREP_CONST(TSA_CPM1_SIRAM_ENTRY_CSEL_MASK, 0x6)
/* TSA SI RAM routing tables entry */
#define TSA_SIRAM_ENTRY_LAST (1 << 16)
#define TSA_SIRAM_ENTRY_BYTE (1 << 17)
#define TSA_SIRAM_ENTRY_CNT(x) (((x) & 0x0f) << 18)
#define TSA_SIRAM_ENTRY_CSEL_MASK (0x7 << 22)
#define TSA_SIRAM_ENTRY_CSEL_NU (0x0 << 22)
#define TSA_SIRAM_ENTRY_CSEL_SCC2 (0x2 << 22)
#define TSA_SIRAM_ENTRY_CSEL_SCC3 (0x3 << 22)
#define TSA_SIRAM_ENTRY_CSEL_SCC4 (0x4 << 22)
#define TSA_SIRAM_ENTRY_CSEL_SMC1 (0x5 << 22)
#define TSA_SIRAM_ENTRY_CSEL_SMC2 (0x6 << 22)
/* TSA SI RAM routing tables entry (QE) */
#define TSA_QE_SIRAM_ENTRY_LAST BIT(0)
#define TSA_QE_SIRAM_ENTRY_BYTE BIT(1)
#define TSA_QE_SIRAM_ENTRY_CNT_MASK GENMASK(4, 2)
#define TSA_QE_SIRAM_ENTRY_CNT(x) FIELD_PREP(TSA_QE_SIRAM_ENTRY_CNT_MASK, x)
#define TSA_QE_SIRAM_ENTRY_CSEL_MASK GENMASK(8, 5)
#define TSA_QE_SIRAM_ENTRY_CSEL_NU FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x0)
#define TSA_QE_SIRAM_ENTRY_CSEL_UCC5 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x1)
#define TSA_QE_SIRAM_ENTRY_CSEL_UCC1 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0x9)
#define TSA_QE_SIRAM_ENTRY_CSEL_UCC2 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xa)
#define TSA_QE_SIRAM_ENTRY_CSEL_UCC3 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xb)
#define TSA_QE_SIRAM_ENTRY_CSEL_UCC4 FIELD_PREP_CONST(TSA_QE_SIRAM_ENTRY_CSEL_MASK, 0xc)
/* SI mode register (32 bits) */
#define TSA_SIMODE 0x00
#define TSA_SIMODE_SMC2 0x80000000
#define TSA_SIMODE_SMC1 0x00008000
#define TSA_SIMODE_TDMA(x) ((x) << 0)
#define TSA_SIMODE_TDMB(x) ((x) << 16)
#define TSA_SIMODE_TDM_MASK 0x0fff
#define TSA_SIMODE_TDM_SDM_MASK 0x0c00
#define TSA_SIMODE_TDM_SDM_NORM 0x0000
#define TSA_SIMODE_TDM_SDM_ECHO 0x0400
#define TSA_SIMODE_TDM_SDM_INTL_LOOP 0x0800
#define TSA_SIMODE_TDM_SDM_LOOP_CTRL 0x0c00
#define TSA_SIMODE_TDM_RFSD(x) ((x) << 8)
#define TSA_SIMODE_TDM_DSC 0x0080
#define TSA_SIMODE_TDM_CRT 0x0040
#define TSA_SIMODE_TDM_STZ 0x0020
#define TSA_SIMODE_TDM_CE 0x0010
#define TSA_SIMODE_TDM_FE 0x0008
#define TSA_SIMODE_TDM_GM 0x0004
#define TSA_SIMODE_TDM_TFSD(x) ((x) << 0)
/*
* SI mode register :
* - CPM1: 32bit register split in 2*16bit (16bit TDM)
* - QE: 4x16bit registers, one per TDM
*/
#define TSA_CPM1_SIMODE 0x00
#define TSA_QE_SIAMR 0x00
#define TSA_QE_SIBMR 0x02
#define TSA_QE_SICMR 0x04
#define TSA_QE_SIDMR 0x06
#define TSA_CPM1_SIMODE_SMC2 BIT(31)
#define TSA_CPM1_SIMODE_SMC1 BIT(15)
#define TSA_CPM1_SIMODE_TDMA_MASK GENMASK(11, 0)
#define TSA_CPM1_SIMODE_TDMA(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMA_MASK, x)
#define TSA_CPM1_SIMODE_TDMB_MASK GENMASK(27, 16)
#define TSA_CPM1_SIMODE_TDMB(x) FIELD_PREP(TSA_CPM1_SIMODE_TDMB_MASK, x)
#define TSA_QE_SIMODE_TDM_SAD_MASK GENMASK(15, 12)
#define TSA_QE_SIMODE_TDM_SAD(x) FIELD_PREP(TSA_QE_SIMODE_TDM_SAD_MASK, x)
#define TSA_CPM1_SIMODE_TDM_MASK GENMASK(11, 0)
#define TSA_SIMODE_TDM_SDM_MASK GENMASK(11, 10)
#define TSA_SIMODE_TDM_SDM_NORM FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x0)
#define TSA_SIMODE_TDM_SDM_ECHO FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x1)
#define TSA_SIMODE_TDM_SDM_INTL_LOOP FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x2)
#define TSA_SIMODE_TDM_SDM_LOOP_CTRL FIELD_PREP_CONST(TSA_SIMODE_TDM_SDM_MASK, 0x3)
#define TSA_SIMODE_TDM_RFSD_MASK GENMASK(9, 8)
#define TSA_SIMODE_TDM_RFSD(x) FIELD_PREP(TSA_SIMODE_TDM_RFSD_MASK, x)
#define TSA_SIMODE_TDM_DSC BIT(7)
#define TSA_SIMODE_TDM_CRT BIT(6)
#define TSA_CPM1_SIMODE_TDM_STZ BIT(5) /* bit 5: STZ in CPM1 */
#define TSA_QE_SIMODE_TDM_SL BIT(5) /* bit 5: SL in QE */
#define TSA_SIMODE_TDM_CE BIT(4)
#define TSA_SIMODE_TDM_FE BIT(3)
#define TSA_SIMODE_TDM_GM BIT(2)
#define TSA_SIMODE_TDM_TFSD_MASK GENMASK(1, 0)
#define TSA_SIMODE_TDM_TFSD(x) FIELD_PREP(TSA_SIMODE_TDM_TFSD_MASK, x)
/* SI global mode register (8 bits) */
#define TSA_SIGMR 0x04
#define TSA_SIGMR_ENB (1<<3)
#define TSA_SIGMR_ENA (1<<2)
#define TSA_SIGMR_RDM_MASK 0x03
#define TSA_SIGMR_RDM_STATIC_TDMA 0x00
#define TSA_SIGMR_RDM_DYN_TDMA 0x01
#define TSA_SIGMR_RDM_STATIC_TDMAB 0x02
#define TSA_SIGMR_RDM_DYN_TDMAB 0x03
/* CPM SI global mode register (8 bits) */
#define TSA_CPM1_SIGMR 0x04
#define TSA_CPM1_SIGMR_ENB BIT(3)
#define TSA_CPM1_SIGMR_ENA BIT(2)
#define TSA_CPM1_SIGMR_RDM_MASK GENMASK(1, 0)
#define TSA_CPM1_SIGMR_RDM_STATIC_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x0)
#define TSA_CPM1_SIGMR_RDM_DYN_TDMA FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x1)
#define TSA_CPM1_SIGMR_RDM_STATIC_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x2)
#define TSA_CPM1_SIGMR_RDM_DYN_TDMAB FIELD_PREP_CONST(TSA_CPM1_SIGMR_RDM_MASK, 0x3)
/* SI status register (8 bits) */
#define TSA_SISTR 0x06
/* SI command register (8 bits) */
#define TSA_SICMR 0x07
/* QE SI global mode register high (8 bits) */
#define TSA_QE_SIGLMRH 0x08
#define TSA_QE_SIGLMRH_END BIT(3)
#define TSA_QE_SIGLMRH_ENC BIT(2)
#define TSA_QE_SIGLMRH_ENB BIT(1)
#define TSA_QE_SIGLMRH_ENA BIT(0)
/* SI clock route register (32 bits) */
#define TSA_SICR 0x0C
#define TSA_SICR_SCC2(x) ((x) << 8)
#define TSA_SICR_SCC3(x) ((x) << 16)
#define TSA_SICR_SCC4(x) ((x) << 24)
#define TSA_SICR_SCC_MASK 0x0ff
#define TSA_SICR_SCC_GRX (1 << 7)
#define TSA_SICR_SCC_SCX_TSA (1 << 6)
#define TSA_SICR_SCC_RXCS_MASK (0x7 << 3)
#define TSA_SICR_SCC_RXCS_BRG1 (0x0 << 3)
#define TSA_SICR_SCC_RXCS_BRG2 (0x1 << 3)
#define TSA_SICR_SCC_RXCS_BRG3 (0x2 << 3)
#define TSA_SICR_SCC_RXCS_BRG4 (0x3 << 3)
#define TSA_SICR_SCC_RXCS_CLK15 (0x4 << 3)
#define TSA_SICR_SCC_RXCS_CLK26 (0x5 << 3)
#define TSA_SICR_SCC_RXCS_CLK37 (0x6 << 3)
#define TSA_SICR_SCC_RXCS_CLK48 (0x7 << 3)
#define TSA_SICR_SCC_TXCS_MASK (0x7 << 0)
#define TSA_SICR_SCC_TXCS_BRG1 (0x0 << 0)
#define TSA_SICR_SCC_TXCS_BRG2 (0x1 << 0)
#define TSA_SICR_SCC_TXCS_BRG3 (0x2 << 0)
#define TSA_SICR_SCC_TXCS_BRG4 (0x3 << 0)
#define TSA_SICR_SCC_TXCS_CLK15 (0x4 << 0)
#define TSA_SICR_SCC_TXCS_CLK26 (0x5 << 0)
#define TSA_SICR_SCC_TXCS_CLK37 (0x6 << 0)
#define TSA_SICR_SCC_TXCS_CLK48 (0x7 << 0)
/* Serial interface RAM pointer register (32 bits) */
#define TSA_SIRP 0x10
#define TSA_CPM1_SICR 0x0C
#define TSA_CPM1_SICR_SCC2_MASK GENMASK(15, 8)
#define TSA_CPM1_SICR_SCC2(x) FIELD_PREP(TSA_CPM1_SICR_SCC2_MASK, x)
#define TSA_CPM1_SICR_SCC3_MASK GENMASK(23, 16)
#define TSA_CPM1_SICR_SCC3(x) FIELD_PREP(TSA_CPM1_SICR_SCC3_MASK, x)
#define TSA_CPM1_SICR_SCC4_MASK GENMASK(31, 24)
#define TSA_CPM1_SICR_SCC4(x) FIELD_PREP(TSA_CPM1_SICR_SCC4_MASK, x)
#define TSA_CPM1_SICR_SCC_MASK GENMASK(7, 0)
#define TSA_CPM1_SICR_SCC_GRX BIT(7)
#define TSA_CPM1_SICR_SCC_SCX_TSA BIT(6)
#define TSA_CPM1_SICR_SCC_RXCS_MASK GENMASK(5, 3)
#define TSA_CPM1_SICR_SCC_RXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x0)
#define TSA_CPM1_SICR_SCC_RXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x1)
#define TSA_CPM1_SICR_SCC_RXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x2)
#define TSA_CPM1_SICR_SCC_RXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x3)
#define TSA_CPM1_SICR_SCC_RXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x4)
#define TSA_CPM1_SICR_SCC_RXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x5)
#define TSA_CPM1_SICR_SCC_RXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x6)
#define TSA_CPM1_SICR_SCC_RXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_RXCS_MASK, 0x7)
#define TSA_CPM1_SICR_SCC_TXCS_MASK GENMASK(2, 0)
#define TSA_CPM1_SICR_SCC_TXCS_BRG1 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x0)
#define TSA_CPM1_SICR_SCC_TXCS_BRG2 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x1)
#define TSA_CPM1_SICR_SCC_TXCS_BRG3 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x2)
#define TSA_CPM1_SICR_SCC_TXCS_BRG4 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x3)
#define TSA_CPM1_SICR_SCC_TXCS_CLK15 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x4)
#define TSA_CPM1_SICR_SCC_TXCS_CLK26 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x5)
#define TSA_CPM1_SICR_SCC_TXCS_CLK37 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x6)
#define TSA_CPM1_SICR_SCC_TXCS_CLK48 FIELD_PREP_CONST(TSA_CPM1_SICR_SCC_TXCS_MASK, 0x7)
struct tsa_entries_area {
void __iomem *entries_start;
@ -114,15 +146,31 @@ struct tsa_tdm {
#define TSA_TDMA 0
#define TSA_TDMB 1
#define TSA_TDMC 2 /* QE implementation only */
#define TSA_TDMD 3 /* QE implementation only */
enum tsa_version {
TSA_CPM1 = 1, /* Avoid 0 value */
TSA_QE,
};
struct tsa {
struct device *dev;
void __iomem *si_regs;
void __iomem *si_ram;
resource_size_t si_ram_sz;
spinlock_t lock;
spinlock_t lock; /* Lock for read/modify/write sequence */
enum tsa_version version;
int tdms; /* TSA_TDMx ORed */
#if IS_ENABLED(CONFIG_QUICC_ENGINE)
struct tsa_tdm tdm[4]; /* TDMa, TDMb, TDMc and TDMd */
#else
struct tsa_tdm tdm[2]; /* TDMa and TDMb */
#endif
/* Same number of serials for CPM1 and QE:
* CPM1: NU, 3 SCCs and 2 SMCs
* QE: NU and 5 UCCs
*/
struct tsa_serial {
unsigned int id;
struct tsa_serial_info info;
@ -140,7 +188,12 @@ static inline void tsa_write32(void __iomem *addr, u32 val)
iowrite32be(val, addr);
}
static inline void tsa_write8(void __iomem *addr, u32 val)
static inline void tsa_write16(void __iomem *addr, u16 val)
{
iowrite16be(val, addr);
}
static inline void tsa_write8(void __iomem *addr, u8 val)
{
iowrite8(val, addr);
}
@ -150,17 +203,68 @@ static inline u32 tsa_read32(void __iomem *addr)
return ioread32be(addr);
}
static inline u16 tsa_read16(void __iomem *addr)
{
return ioread16be(addr);
}
static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
{
tsa_write32(addr, tsa_read32(addr) & ~clr);
}
static inline void tsa_clrbits16(void __iomem *addr, u16 clr)
{
tsa_write16(addr, tsa_read16(addr) & ~clr);
}
static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
{
tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
}
int tsa_serial_connect(struct tsa_serial *tsa_serial)
static bool tsa_is_qe(const struct tsa *tsa)
{
if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
return tsa->version == TSA_QE;
return IS_ENABLED(CONFIG_QUICC_ENGINE);
}
static int tsa_qe_serial_get_num(struct tsa_serial *tsa_serial)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
switch (tsa_serial->id) {
case FSL_QE_TSA_UCC1: return 0;
case FSL_QE_TSA_UCC2: return 1;
case FSL_QE_TSA_UCC3: return 2;
case FSL_QE_TSA_UCC4: return 3;
case FSL_QE_TSA_UCC5: return 4;
default:
break;
}
dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
return -EINVAL;
}
int tsa_serial_get_num(struct tsa_serial *tsa_serial)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
/*
* There is no need to get the serial num out of the TSA driver in the
* CPM case.
* Further more, in CPM, we can have 2 types of serial SCCs and FCCs.
* What kind of numbering to use that can be global to both SCCs and
* FCCs ?
*/
return tsa_is_qe(tsa) ? tsa_qe_serial_get_num(tsa_serial) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(tsa_serial_get_num);
static int tsa_cpm1_serial_connect(struct tsa_serial *tsa_serial, bool connect)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
unsigned long flags;
@ -169,16 +273,16 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial)
switch (tsa_serial->id) {
case FSL_CPM_TSA_SCC2:
clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
clear = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_MASK);
set = TSA_CPM1_SICR_SCC2(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
case FSL_CPM_TSA_SCC3:
clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
clear = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_MASK);
set = TSA_CPM1_SICR_SCC3(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
case FSL_CPM_TSA_SCC4:
clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
clear = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_MASK);
set = TSA_CPM1_SICR_SCC4(TSA_CPM1_SICR_SCC_SCX_TSA);
break;
default:
dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
@ -186,39 +290,52 @@ int tsa_serial_connect(struct tsa_serial *tsa_serial)
}
spin_lock_irqsave(&tsa->lock, flags);
tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SICR, clear,
connect ? set : 0);
spin_unlock_irqrestore(&tsa->lock, flags);
return 0;
}
static int tsa_qe_serial_connect(struct tsa_serial *tsa_serial, bool connect)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
unsigned long flags;
int ucc_num;
int ret;
ucc_num = tsa_qe_serial_get_num(tsa_serial);
if (ucc_num < 0)
return ucc_num;
spin_lock_irqsave(&tsa->lock, flags);
ret = ucc_set_qe_mux_tsa(ucc_num, connect);
spin_unlock_irqrestore(&tsa->lock, flags);
if (ret) {
dev_err(tsa->dev, "Connect serial id %u to TSA failed (%d)\n",
tsa_serial->id, ret);
return ret;
}
return 0;
}
int tsa_serial_connect(struct tsa_serial *tsa_serial)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
return tsa_is_qe(tsa) ?
tsa_qe_serial_connect(tsa_serial, true) :
tsa_cpm1_serial_connect(tsa_serial, true);
}
EXPORT_SYMBOL(tsa_serial_connect);
int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
{
struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
unsigned long flags;
u32 clear;
switch (tsa_serial->id) {
case FSL_CPM_TSA_SCC2:
clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
break;
case FSL_CPM_TSA_SCC3:
clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
break;
case FSL_CPM_TSA_SCC4:
clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
break;
default:
dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
return -EINVAL;
}
spin_lock_irqsave(&tsa->lock, flags);
tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
spin_unlock_irqrestore(&tsa->lock, flags);
return 0;
return tsa_is_qe(tsa) ?
tsa_qe_serial_connect(tsa_serial, false) :
tsa_cpm1_serial_connect(tsa_serial, false);
}
EXPORT_SYMBOL(tsa_serial_disconnect);
@ -229,14 +346,14 @@ int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *i
}
EXPORT_SYMBOL(tsa_serial_get_info);
static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
static void tsa_cpm1_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
u32 tdms, u32 tdm_id, bool is_rx)
{
resource_size_t quarter;
resource_size_t half;
quarter = tsa->si_ram_sz/4;
half = tsa->si_ram_sz/2;
quarter = tsa->si_ram_sz / 4;
half = tsa->si_ram_sz / 2;
if (tdms == BIT(TSA_TDMA)) {
/* Only TDMA */
@ -281,7 +398,42 @@ static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area
}
}
static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
static void tsa_qe_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
u32 tdms, u32 tdm_id, bool is_rx)
{
resource_size_t eighth;
resource_size_t half;
eighth = tsa->si_ram_sz / 8;
half = tsa->si_ram_sz / 2;
/*
* One half of the SI RAM used for Tx, the other one for Rx.
* In each half, 1/4 of the area is assigned to each TDM.
*/
if (is_rx) {
/* Rx: Second half of si_ram */
area->entries_start = tsa->si_ram + half + (eighth * tdm_id);
area->entries_next = area->entries_start + eighth;
area->last_entry = NULL;
} else {
/* Tx: First half of si_ram */
area->entries_start = tsa->si_ram + (eighth * tdm_id);
area->entries_next = area->entries_start + eighth;
area->last_entry = NULL;
}
}
static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
u32 tdms, u32 tdm_id, bool is_rx)
{
if (tsa_is_qe(tsa))
tsa_qe_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
else
tsa_cpm1_init_entries_area(tsa, area, tdms, tdm_id, is_rx);
}
static const char *tsa_cpm1_serial_id2name(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
case FSL_CPM_TSA_NU: return "Not used";
@ -296,21 +448,43 @@ static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
return NULL;
}
static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
static const char *tsa_qe_serial_id2name(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
case FSL_CPM_TSA_SCC2: return TSA_SIRAM_ENTRY_CSEL_SCC2;
case FSL_CPM_TSA_SCC3: return TSA_SIRAM_ENTRY_CSEL_SCC3;
case FSL_CPM_TSA_SCC4: return TSA_SIRAM_ENTRY_CSEL_SCC4;
case FSL_CPM_TSA_SMC1: return TSA_SIRAM_ENTRY_CSEL_SMC1;
case FSL_CPM_TSA_SMC2: return TSA_SIRAM_ENTRY_CSEL_SMC2;
case FSL_QE_TSA_NU: return "Not used";
case FSL_QE_TSA_UCC1: return "UCC1";
case FSL_QE_TSA_UCC2: return "UCC2";
case FSL_QE_TSA_UCC3: return "UCC3";
case FSL_QE_TSA_UCC4: return "UCC4";
case FSL_QE_TSA_UCC5: return "UCC5";
default:
break;
}
return TSA_SIRAM_ENTRY_CSEL_NU;
return NULL;
}
static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
{
return tsa_is_qe(tsa) ?
tsa_qe_serial_id2name(tsa, serial_id) :
tsa_cpm1_serial_id2name(tsa, serial_id);
}
static u32 tsa_cpm1_serial_id2csel(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
case FSL_CPM_TSA_SCC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC2;
case FSL_CPM_TSA_SCC3: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC3;
case FSL_CPM_TSA_SCC4: return TSA_CPM1_SIRAM_ENTRY_CSEL_SCC4;
case FSL_CPM_TSA_SMC1: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC1;
case FSL_CPM_TSA_SMC2: return TSA_CPM1_SIRAM_ENTRY_CSEL_SMC2;
default:
break;
}
return TSA_CPM1_SIRAM_ENTRY_CSEL_NU;
}
static int tsa_cpm1_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
u32 count, u32 serial_id)
{
void __iomem *addr;
@ -329,21 +503,21 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
if (area->last_entry) {
/* Clear last flag */
tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
tsa_clrbits32(area->last_entry, TSA_CPM1_SIRAM_ENTRY_LAST);
}
left = count;
while (left) {
val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
val = TSA_CPM1_SIRAM_ENTRY_BYTE | tsa_cpm1_serial_id2csel(tsa, serial_id);
if (left > 16) {
cnt = 16;
} else {
cnt = left;
val |= TSA_SIRAM_ENTRY_LAST;
val |= TSA_CPM1_SIRAM_ENTRY_LAST;
area->last_entry = addr;
}
val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
val |= TSA_CPM1_SIRAM_ENTRY_CNT(cnt - 1);
tsa_write32(addr, val);
addr += 4;
@ -353,6 +527,71 @@ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
return 0;
}
static u32 tsa_qe_serial_id2csel(struct tsa *tsa, u32 serial_id)
{
switch (serial_id) {
case FSL_QE_TSA_UCC1: return TSA_QE_SIRAM_ENTRY_CSEL_UCC1;
case FSL_QE_TSA_UCC2: return TSA_QE_SIRAM_ENTRY_CSEL_UCC2;
case FSL_QE_TSA_UCC3: return TSA_QE_SIRAM_ENTRY_CSEL_UCC3;
case FSL_QE_TSA_UCC4: return TSA_QE_SIRAM_ENTRY_CSEL_UCC4;
case FSL_QE_TSA_UCC5: return TSA_QE_SIRAM_ENTRY_CSEL_UCC5;
default:
break;
}
return TSA_QE_SIRAM_ENTRY_CSEL_NU;
}
static int tsa_qe_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
u32 count, u32 serial_id)
{
void __iomem *addr;
u32 left;
u32 val;
u32 cnt;
u32 nb;
addr = area->last_entry ? area->last_entry + 2 : area->entries_start;
nb = DIV_ROUND_UP(count, 8);
if ((addr + (nb * 2)) > area->entries_next) {
dev_err(tsa->dev, "si ram area full\n");
return -ENOSPC;
}
if (area->last_entry) {
/* Clear last flag */
tsa_clrbits16(area->last_entry, TSA_QE_SIRAM_ENTRY_LAST);
}
left = count;
while (left) {
val = TSA_QE_SIRAM_ENTRY_BYTE | tsa_qe_serial_id2csel(tsa, serial_id);
if (left > 8) {
cnt = 8;
} else {
cnt = left;
val |= TSA_QE_SIRAM_ENTRY_LAST;
area->last_entry = addr;
}
val |= TSA_QE_SIRAM_ENTRY_CNT(cnt - 1);
tsa_write16(addr, val);
addr += 2;
left -= cnt;
}
return 0;
}
static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
u32 count, u32 serial_id)
{
return tsa_is_qe(tsa) ?
tsa_qe_add_entry(tsa, area, count, serial_id) :
tsa_cpm1_add_entry(tsa, area, count, serial_id);
}
static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
u32 tdms, u32 tdm_id, bool is_rx)
{
@ -399,7 +638,7 @@ static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
}
dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
tdm_id, route_name, ts, ts+count-1, serial_name);
tdm_id, route_name, ts, ts + count - 1, serial_name);
ts += count;
ret = tsa_add_entry(tsa, &area, count, serial_id);
@ -449,8 +688,8 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
int i;
tsa->tdms = 0;
tsa->tdm[0].is_enable = false;
tsa->tdm[1].is_enable = false;
for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++)
tsa->tdm[i].is_enable = false;
for_each_available_child_of_node(np, tdm_np) {
ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
@ -466,7 +705,18 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
case 1:
tsa->tdms |= BIT(TSA_TDMB);
break;
case 2:
if (!tsa_is_qe(tsa))
goto invalid_tdm; /* Not available on CPM1 */
tsa->tdms |= BIT(TSA_TDMC);
break;
case 3:
if (!tsa_is_qe(tsa))
goto invalid_tdm; /* Not available on CPM1 */
tsa->tdms |= BIT(TSA_TDMD);
break;
default:
invalid_tdm:
dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
tdm_id);
of_node_put(tdm_np);
@ -532,10 +782,14 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
if (tsa_is_qe(tsa) &&
of_property_read_bool(tdm_np, "fsl,fsync-active-low"))
tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SL;
if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
clk = of_clk_get_by_name(tdm_np, "l1rsync");
clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rsync" : "l1rsync");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@ -549,7 +803,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
}
tdm->l1rsync_clk = clk;
clk = of_clk_get_by_name(tdm_np, "l1rclk");
clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "rclk" : "l1rclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@ -564,7 +818,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
tdm->l1rclk_clk = clk;
if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
clk = of_clk_get_by_name(tdm_np, "l1tsync");
clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tsync" : "l1tsync");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@ -578,7 +832,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
}
tdm->l1tsync_clk = clk;
clk = of_clk_get_by_name(tdm_np, "l1tclk");
clk = of_clk_get_by_name(tdm_np, tsa_is_qe(tsa) ? "tclk" : "l1tclk");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
of_node_put(tdm_np);
@ -593,6 +847,17 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
tdm->l1tclk_clk = clk;
}
if (tsa_is_qe(tsa)) {
/*
* The starting address for TSA table must be set.
* 512 entries for Tx and 512 entries for Rx are
* available for 4 TDMs.
* We assign entries equally -> 128 Rx/Tx entries per
* TDM. In other words, 4 blocks of 32 entries per TDM.
*/
tdm->simode_tdm |= TSA_QE_SIMODE_TDM_SAD(4 * tdm_id);
}
ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
if (ret) {
of_node_put(tdm_np);
@ -610,7 +875,7 @@ static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
return 0;
err:
for (i = 0; i < 2; i++) {
for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
if (tsa->tdm[i].l1rsync_clk) {
clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
clk_put(tsa->tdm[i].l1rsync_clk);
@ -636,8 +901,87 @@ static void tsa_init_si_ram(struct tsa *tsa)
resource_size_t i;
/* Fill all entries as the last one */
if (tsa_is_qe(tsa)) {
for (i = 0; i < tsa->si_ram_sz; i += 2)
tsa_write16(tsa->si_ram + i, TSA_QE_SIRAM_ENTRY_LAST);
} else {
for (i = 0; i < tsa->si_ram_sz; i += 4)
tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
tsa_write32(tsa->si_ram + i, TSA_CPM1_SIRAM_ENTRY_LAST);
}
}
static int tsa_cpm1_setup(struct tsa *tsa)
{
u32 val;
/* Set SIMODE */
val = 0;
if (tsa->tdm[0].is_enable)
val |= TSA_CPM1_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
if (tsa->tdm[1].is_enable)
val |= TSA_CPM1_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
tsa_clrsetbits32(tsa->si_regs + TSA_CPM1_SIMODE,
TSA_CPM1_SIMODE_TDMA(TSA_CPM1_SIMODE_TDM_MASK) |
TSA_CPM1_SIMODE_TDMB(TSA_CPM1_SIMODE_TDM_MASK),
val);
/* Set SIGMR */
val = (tsa->tdms == BIT(TSA_TDMA)) ?
TSA_CPM1_SIGMR_RDM_STATIC_TDMA : TSA_CPM1_SIGMR_RDM_STATIC_TDMAB;
if (tsa->tdms & BIT(TSA_TDMA))
val |= TSA_CPM1_SIGMR_ENA;
if (tsa->tdms & BIT(TSA_TDMB))
val |= TSA_CPM1_SIGMR_ENB;
tsa_write8(tsa->si_regs + TSA_CPM1_SIGMR, val);
return 0;
}
static int tsa_qe_setup(struct tsa *tsa)
{
unsigned int sixmr;
u8 siglmrh = 0;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
if (!tsa->tdm[i].is_enable)
continue;
switch (i) {
case 0:
sixmr = TSA_QE_SIAMR;
siglmrh |= TSA_QE_SIGLMRH_ENA;
break;
case 1:
sixmr = TSA_QE_SIBMR;
siglmrh |= TSA_QE_SIGLMRH_ENB;
break;
case 2:
sixmr = TSA_QE_SICMR;
siglmrh |= TSA_QE_SIGLMRH_ENC;
break;
case 3:
sixmr = TSA_QE_SIDMR;
siglmrh |= TSA_QE_SIGLMRH_END;
break;
default:
return -EINVAL;
}
/* Set SI mode register */
tsa_write16(tsa->si_regs + sixmr, tsa->tdm[i].simode_tdm);
}
/* Enable TDMs */
tsa_write8(tsa->si_regs + TSA_QE_SIGLMRH, siglmrh);
return 0;
}
static int tsa_setup(struct tsa *tsa)
{
return tsa_is_qe(tsa) ? tsa_qe_setup(tsa) : tsa_cpm1_setup(tsa);
}
static int tsa_probe(struct platform_device *pdev)
@ -646,7 +990,6 @@ static int tsa_probe(struct platform_device *pdev)
struct resource *res;
struct tsa *tsa;
unsigned int i;
u32 val;
int ret;
tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
@ -654,6 +997,18 @@ static int tsa_probe(struct platform_device *pdev)
return -ENOMEM;
tsa->dev = &pdev->dev;
tsa->version = (enum tsa_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
switch (tsa->version) {
case TSA_CPM1:
dev_info(tsa->dev, "CPM1 version\n");
break;
case TSA_QE:
dev_info(tsa->dev, "QE version\n");
break;
default:
dev_err(tsa->dev, "Unknown version (%d)\n", tsa->version);
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
tsa->serials[i].id = i;
@ -680,26 +1035,9 @@ static int tsa_probe(struct platform_device *pdev)
if (ret)
return ret;
/* Set SIMODE */
val = 0;
if (tsa->tdm[0].is_enable)
val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
if (tsa->tdm[1].is_enable)
val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
val);
/* Set SIGMR */
val = (tsa->tdms == BIT(TSA_TDMA)) ?
TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
if (tsa->tdms & BIT(TSA_TDMA))
val |= TSA_SIGMR_ENA;
if (tsa->tdms & BIT(TSA_TDMB))
val |= TSA_SIGMR_ENB;
tsa_write8(tsa->si_regs + TSA_SIGMR, val);
ret = tsa_setup(tsa);
if (ret)
return ret;
platform_set_drvdata(pdev, tsa);
@ -711,7 +1049,7 @@ static void tsa_remove(struct platform_device *pdev)
struct tsa *tsa = platform_get_drvdata(pdev);
int i;
for (i = 0; i < 2; i++) {
for (i = 0; i < ARRAY_SIZE(tsa->tdm); i++) {
if (tsa->tdm[i].l1rsync_clk) {
clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
clk_put(tsa->tdm[i].l1rsync_clk);
@ -732,7 +1070,12 @@ static void tsa_remove(struct platform_device *pdev)
}
static const struct of_device_id tsa_id_table[] = {
{ .compatible = "fsl,cpm1-tsa" },
#if IS_ENABLED(CONFIG_CPM1)
{ .compatible = "fsl,cpm1-tsa", .data = (void *)TSA_CPM1 },
#endif
#if IS_ENABLED(CONFIG_QUICC_ENGINE)
{ .compatible = "fsl,qe-tsa", .data = (void *)TSA_QE },
#endif
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, tsa_id_table);
@ -841,5 +1184,5 @@ struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
MODULE_DESCRIPTION("CPM TSA driver");
MODULE_DESCRIPTION("CPM/QE TSA driver");
MODULE_LICENSE("GPL");

View File

@ -39,4 +39,7 @@ struct tsa_serial_info {
/* Get information */
int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info);
/* Get serial number */
int tsa_serial_get_num(struct tsa_serial *tsa_serial);
#endif /* __SOC_FSL_TSA_H__ */

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
#ifndef __DT_BINDINGS_SOC_FSL_QE_TSA_H
#define __DT_BINDINGS_SOC_FSL_QE_TSA_H
#define FSL_QE_TSA_NU 0
#define FSL_QE_TSA_UCC1 1
#define FSL_QE_TSA_UCC2 2
#define FSL_QE_TSA_UCC3 3
#define FSL_QE_TSA_UCC4 4
#define FSL_QE_TSA_UCC5 5
#endif

View File

@ -23,6 +23,8 @@
#include <linux/of_address.h>
#include <linux/types.h>
struct device;
#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */
#define QE_NUM_OF_BRGS 16
#define QE_NUM_OF_PORTS 1024
@ -93,8 +95,12 @@ int cpm_muram_init(void);
#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
s32 cpm_muram_alloc(unsigned long size, unsigned long align);
s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
unsigned long align);
void cpm_muram_free(s32 offset);
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(const void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
@ -106,6 +112,12 @@ static inline s32 cpm_muram_alloc(unsigned long size,
return -ENOSYS;
}
static inline s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
unsigned long align)
{
return -ENOSYS;
}
static inline void cpm_muram_free(s32 offset)
{
}
@ -116,6 +128,13 @@ static inline s32 cpm_muram_alloc_fixed(unsigned long offset,
return -ENOSYS;
}
static inline s32 devm_cpm_muram_alloc_fixed(struct device *dev,
unsigned long offset,
unsigned long size)
{
return -ENOSYS;
}
static inline void __iomem *cpm_muram_addr(unsigned long offset)
{
return NULL;
@ -172,7 +191,6 @@ static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
/*
* Pin multiplexing functions.
*/
struct device;
struct qe_pin;
#ifdef CONFIG_QE_GPIO
extern struct qe_pin *qe_pin_request(struct device *dev, int index);
@ -233,7 +251,9 @@ static inline int qe_alive_during_sleep(void)
/* we actually use cpm_muram implementation, define this for convenience */
#define qe_muram_init cpm_muram_init
#define qe_muram_alloc cpm_muram_alloc
#define devm_qe_muram_alloc devm_cpm_muram_alloc
#define qe_muram_alloc_fixed cpm_muram_alloc_fixed
#define devm_qe_muram_alloc_fixed devm_cpm_muram_alloc_fixed
#define qe_muram_free cpm_muram_free
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
@ -449,6 +469,7 @@ enum comm_dir {
#define QE_QMC_STOP_TX 0x0000000c
#define QE_QMC_STOP_RX 0x0000000d
#define QE_SS7_SU_FIL_RESET 0x0000000e
#define QE_PUSHSCHED 0x0000000f
/* jonathbr added from here down for 83xx */
#define QE_RESET_BCS 0x0000000a
#define QE_MCC_INIT_TX_RX_16 0x00000003