2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 07:04:00 +08:00

Merge branch 'lan966x-driver'

Horatiu Vultur says:

====================
net: lan966x: Add lan966x switch driver

This patch series add support for Microchip lan966x driver

The lan966x switch is a multi-port Gigabit AVB/TSN Ethernet Switch with
two integrated 10/100/1000Base-T PHYs. In addition to the integrated PHYs,
it supports up to 2RGMII/RMII, up to 3BASE-X/SERDES/2.5GBASE-X and up to
2 Quad-SGMII/Quad-USGMII interfaces.

Initially it adds support only for the ports to behave as simple
NIC cards. In the future patches it would be extended with other
functionality like Switchdev, PTP, Frame DMA, VCAP, etc.

v4->v5:
- more fixes to the reset of the switch, require all resources before
  activating the hardware
- fix to lan966x-switch binding
- implement get/set_pauseparam in ethtool_ops
- stop calling lan966x_port_link_down when calling lan966x_port_pcs_set and
  call it in lan966x_phylink_mac_link_down

v3->v4:
- add timeouts when injecting/extracting frames, in case the HW breaks
- simplify the creation of the IFH
- fix the order of operations in lan966x_cleanup_ports
- fixes to phylink based on Russel review

v2->v3:
- fix compiling issues for x86
- fix resource management in first patch

v1->v2:
- add new patch for MAINTAINERS
- add functions lan966x_mac_cpu_learn/forget
- fix build issues with second patch
- fix the reset of the switch, return error if there is no reset controller
- start to use phylink_mii_c22_pcs_decode_state and
  phylink_mii_c22_pcs_encode_advertisement to remove duplicate code
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-11-29 12:58:39 +00:00
commit 77a3124683
14 changed files with 3546 additions and 0 deletions

View File

@ -0,0 +1,158 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/microchip,lan966x-switch.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip Lan966x Ethernet switch controller
maintainers:
- Horatiu Vultur <horatiu.vultur@microchip.com>
description: |
The lan966x switch is a multi-port Gigabit AVB/TSN Ethernet Switch with
two integrated 10/100/1000Base-T PHYs. In addition to the integrated PHYs,
it supports up to 2RGMII/RMII, up to 3BASE-X/SERDES/2.5GBASE-X and up to
2 Quad-SGMII/Quad-USGMII interfaces.
properties:
$nodename:
pattern: "^switch@[0-9a-f]+$"
compatible:
const: microchip,lan966x-switch
reg:
items:
- description: cpu target
- description: general control block target
reg-names:
items:
- const: cpu
- const: gcb
interrupts:
minItems: 1
items:
- description: register based extraction
- description: frame dma based extraction
interrupt-names:
minItems: 1
items:
- const: xtr
- const: fdma
resets:
items:
- description: Reset controller used for switch core reset (soft reset)
- description: Reset controller used for releasing the phy from reset
reset-names:
items:
- const: switch
- const: phy
ethernet-ports:
type: object
patternProperties:
"^port@[0-9a-f]+$":
type: object
allOf:
- $ref: "http://devicetree.org/schemas/net/ethernet-controller.yaml#"
properties:
'#address-cells':
const: 1
'#size-cells':
const: 0
reg:
description:
Switch port number
phys:
description:
Phandle of a Ethernet SerDes PHY
phy-mode:
description:
This specifies the interface used by the Ethernet SerDes towards
the PHY or SFP.
enum:
- gmii
- sgmii
- qsgmii
- 1000base-x
- 2500base-x
phy-handle:
description:
Phandle of a Ethernet PHY.
sfp:
description:
Phandle of an SFP.
managed: true
required:
- reg
- phys
- phy-mode
oneOf:
- required:
- phy-handle
- required:
- sfp
- managed
required:
- compatible
- reg
- reg-names
- interrupts
- interrupt-names
- resets
- reset-names
- ethernet-ports
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
switch: switch@e0000000 {
compatible = "microchip,lan966x-switch";
reg = <0xe0000000 0x0100000>,
<0xe2000000 0x0800000>;
reg-names = "cpu", "gcb";
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "xtr";
resets = <&switch_reset 0>, <&phy_reset 0>;
reset-names = "switch", "phy";
ethernet-ports {
#address-cells = <1>;
#size-cells = <0>;
port0: port@0 {
reg = <0>;
phy-handle = <&phy0>;
phys = <&serdes 0 0>;
phy-mode = "gmii";
};
port1: port@1 {
reg = <1>;
sfp = <&sfp_eth1>;
managed = "in-band-status";
phys = <&serdes 2 4>;
phy-mode = "sgmii";
};
};
};
...

View File

@ -12524,6 +12524,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/microchip/lan743x_*
MICROCHIP LAN966X ETHERNET DRIVER
M: Horatiu Vultur <horatiu.vultur@microchip.com>
M: UNGLinuxDriver@microchip.com
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/microchip/lan966x/*
MICROCHIP LCDFB DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-fbdev@vger.kernel.org

View File

@ -55,6 +55,7 @@ config LAN743X
To compile this driver as a module, choose M here. The module will be
called lan743x.
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
endif # NET_VENDOR_MICROCHIP

View File

@ -9,4 +9,5 @@ obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/

View File

@ -0,0 +1,7 @@
config LAN966X_SWITCH
tristate "Lan966x switch driver"
depends on HAS_IOMEM
depends on OF
select PHYLINK
help
This driver supports the Lan966x network switch device.

View File

@ -0,0 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for the Microchip Lan966x network device drivers.
#
obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o

View File

@ -0,0 +1,682 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/netdevice.h>
#include "lan966x_main.h"
/* Number of traffic classes */
#define LAN966X_NUM_TC 8
#define LAN966X_STATS_CHECK_DELAY (2 * HZ)
static const struct lan966x_stat_layout lan966x_stats_layout[] = {
{ .name = "rx_octets", .offset = 0x00, },
{ .name = "rx_unicast", .offset = 0x01, },
{ .name = "rx_multicast", .offset = 0x02 },
{ .name = "rx_broadcast", .offset = 0x03 },
{ .name = "rx_short", .offset = 0x04 },
{ .name = "rx_frag", .offset = 0x05 },
{ .name = "rx_jabber", .offset = 0x06 },
{ .name = "rx_crc", .offset = 0x07 },
{ .name = "rx_symbol_err", .offset = 0x08 },
{ .name = "rx_sz_64", .offset = 0x09 },
{ .name = "rx_sz_65_127", .offset = 0x0a},
{ .name = "rx_sz_128_255", .offset = 0x0b},
{ .name = "rx_sz_256_511", .offset = 0x0c },
{ .name = "rx_sz_512_1023", .offset = 0x0d },
{ .name = "rx_sz_1024_1526", .offset = 0x0e },
{ .name = "rx_sz_jumbo", .offset = 0x0f },
{ .name = "rx_pause", .offset = 0x10 },
{ .name = "rx_control", .offset = 0x11 },
{ .name = "rx_long", .offset = 0x12 },
{ .name = "rx_cat_drop", .offset = 0x13 },
{ .name = "rx_red_prio_0", .offset = 0x14 },
{ .name = "rx_red_prio_1", .offset = 0x15 },
{ .name = "rx_red_prio_2", .offset = 0x16 },
{ .name = "rx_red_prio_3", .offset = 0x17 },
{ .name = "rx_red_prio_4", .offset = 0x18 },
{ .name = "rx_red_prio_5", .offset = 0x19 },
{ .name = "rx_red_prio_6", .offset = 0x1a },
{ .name = "rx_red_prio_7", .offset = 0x1b },
{ .name = "rx_yellow_prio_0", .offset = 0x1c },
{ .name = "rx_yellow_prio_1", .offset = 0x1d },
{ .name = "rx_yellow_prio_2", .offset = 0x1e },
{ .name = "rx_yellow_prio_3", .offset = 0x1f },
{ .name = "rx_yellow_prio_4", .offset = 0x20 },
{ .name = "rx_yellow_prio_5", .offset = 0x21 },
{ .name = "rx_yellow_prio_6", .offset = 0x22 },
{ .name = "rx_yellow_prio_7", .offset = 0x23 },
{ .name = "rx_green_prio_0", .offset = 0x24 },
{ .name = "rx_green_prio_1", .offset = 0x25 },
{ .name = "rx_green_prio_2", .offset = 0x26 },
{ .name = "rx_green_prio_3", .offset = 0x27 },
{ .name = "rx_green_prio_4", .offset = 0x28 },
{ .name = "rx_green_prio_5", .offset = 0x29 },
{ .name = "rx_green_prio_6", .offset = 0x2a },
{ .name = "rx_green_prio_7", .offset = 0x2b },
{ .name = "rx_assembly_err", .offset = 0x2c },
{ .name = "rx_smd_err", .offset = 0x2d },
{ .name = "rx_assembly_ok", .offset = 0x2e },
{ .name = "rx_merge_frag", .offset = 0x2f },
{ .name = "rx_pmac_octets", .offset = 0x30, },
{ .name = "rx_pmac_unicast", .offset = 0x31, },
{ .name = "rx_pmac_multicast", .offset = 0x32 },
{ .name = "rx_pmac_broadcast", .offset = 0x33 },
{ .name = "rx_pmac_short", .offset = 0x34 },
{ .name = "rx_pmac_frag", .offset = 0x35 },
{ .name = "rx_pmac_jabber", .offset = 0x36 },
{ .name = "rx_pmac_crc", .offset = 0x37 },
{ .name = "rx_pmac_symbol_err", .offset = 0x38 },
{ .name = "rx_pmac_sz_64", .offset = 0x39 },
{ .name = "rx_pmac_sz_65_127", .offset = 0x3a },
{ .name = "rx_pmac_sz_128_255", .offset = 0x3b },
{ .name = "rx_pmac_sz_256_511", .offset = 0x3c },
{ .name = "rx_pmac_sz_512_1023", .offset = 0x3d },
{ .name = "rx_pmac_sz_1024_1526", .offset = 0x3e },
{ .name = "rx_pmac_sz_jumbo", .offset = 0x3f },
{ .name = "rx_pmac_pause", .offset = 0x40 },
{ .name = "rx_pmac_control", .offset = 0x41 },
{ .name = "rx_pmac_long", .offset = 0x42 },
{ .name = "tx_octets", .offset = 0x80, },
{ .name = "tx_unicast", .offset = 0x81, },
{ .name = "tx_multicast", .offset = 0x82 },
{ .name = "tx_broadcast", .offset = 0x83 },
{ .name = "tx_col", .offset = 0x84 },
{ .name = "tx_drop", .offset = 0x85 },
{ .name = "tx_pause", .offset = 0x86 },
{ .name = "tx_sz_64", .offset = 0x87 },
{ .name = "tx_sz_65_127", .offset = 0x88 },
{ .name = "tx_sz_128_255", .offset = 0x89 },
{ .name = "tx_sz_256_511", .offset = 0x8a },
{ .name = "tx_sz_512_1023", .offset = 0x8b },
{ .name = "tx_sz_1024_1526", .offset = 0x8c },
{ .name = "tx_sz_jumbo", .offset = 0x8d },
{ .name = "tx_yellow_prio_0", .offset = 0x8e },
{ .name = "tx_yellow_prio_1", .offset = 0x8f },
{ .name = "tx_yellow_prio_2", .offset = 0x90 },
{ .name = "tx_yellow_prio_3", .offset = 0x91 },
{ .name = "tx_yellow_prio_4", .offset = 0x92 },
{ .name = "tx_yellow_prio_5", .offset = 0x93 },
{ .name = "tx_yellow_prio_6", .offset = 0x94 },
{ .name = "tx_yellow_prio_7", .offset = 0x95 },
{ .name = "tx_green_prio_0", .offset = 0x96 },
{ .name = "tx_green_prio_1", .offset = 0x97 },
{ .name = "tx_green_prio_2", .offset = 0x98 },
{ .name = "tx_green_prio_3", .offset = 0x99 },
{ .name = "tx_green_prio_4", .offset = 0x9a },
{ .name = "tx_green_prio_5", .offset = 0x9b },
{ .name = "tx_green_prio_6", .offset = 0x9c },
{ .name = "tx_green_prio_7", .offset = 0x9d },
{ .name = "tx_aged", .offset = 0x9e },
{ .name = "tx_llct", .offset = 0x9f },
{ .name = "tx_ct", .offset = 0xa0 },
{ .name = "tx_mm_hold", .offset = 0xa1 },
{ .name = "tx_merge_frag", .offset = 0xa2 },
{ .name = "tx_pmac_octets", .offset = 0xa3, },
{ .name = "tx_pmac_unicast", .offset = 0xa4, },
{ .name = "tx_pmac_multicast", .offset = 0xa5 },
{ .name = "tx_pmac_broadcast", .offset = 0xa6 },
{ .name = "tx_pmac_pause", .offset = 0xa7 },
{ .name = "tx_pmac_sz_64", .offset = 0xa8 },
{ .name = "tx_pmac_sz_65_127", .offset = 0xa9 },
{ .name = "tx_pmac_sz_128_255", .offset = 0xaa },
{ .name = "tx_pmac_sz_256_511", .offset = 0xab },
{ .name = "tx_pmac_sz_512_1023", .offset = 0xac },
{ .name = "tx_pmac_sz_1024_1526", .offset = 0xad },
{ .name = "tx_pmac_sz_jumbo", .offset = 0xae },
{ .name = "dr_local", .offset = 0x100 },
{ .name = "dr_tail", .offset = 0x101 },
{ .name = "dr_yellow_prio_0", .offset = 0x102 },
{ .name = "dr_yellow_prio_1", .offset = 0x103 },
{ .name = "dr_yellow_prio_2", .offset = 0x104 },
{ .name = "dr_yellow_prio_3", .offset = 0x105 },
{ .name = "dr_yellow_prio_4", .offset = 0x106 },
{ .name = "dr_yellow_prio_5", .offset = 0x107 },
{ .name = "dr_yellow_prio_6", .offset = 0x108 },
{ .name = "dr_yellow_prio_7", .offset = 0x109 },
{ .name = "dr_green_prio_0", .offset = 0x10a },
{ .name = "dr_green_prio_1", .offset = 0x10b },
{ .name = "dr_green_prio_2", .offset = 0x10c },
{ .name = "dr_green_prio_3", .offset = 0x10d },
{ .name = "dr_green_prio_4", .offset = 0x10e },
{ .name = "dr_green_prio_5", .offset = 0x10f },
{ .name = "dr_green_prio_6", .offset = 0x110 },
{ .name = "dr_green_prio_7", .offset = 0x111 },
};
/* The following numbers are indexes into lan966x_stats_layout[] */
#define SYS_COUNT_RX_OCT 0
#define SYS_COUNT_RX_UC 1
#define SYS_COUNT_RX_MC 2
#define SYS_COUNT_RX_BC 3
#define SYS_COUNT_RX_SHORT 4
#define SYS_COUNT_RX_FRAG 5
#define SYS_COUNT_RX_JABBER 6
#define SYS_COUNT_RX_CRC 7
#define SYS_COUNT_RX_SYMBOL_ERR 8
#define SYS_COUNT_RX_SZ_64 9
#define SYS_COUNT_RX_SZ_65_127 10
#define SYS_COUNT_RX_SZ_128_255 11
#define SYS_COUNT_RX_SZ_256_511 12
#define SYS_COUNT_RX_SZ_512_1023 13
#define SYS_COUNT_RX_SZ_1024_1526 14
#define SYS_COUNT_RX_SZ_JUMBO 15
#define SYS_COUNT_RX_PAUSE 16
#define SYS_COUNT_RX_CONTROL 17
#define SYS_COUNT_RX_LONG 18
#define SYS_COUNT_RX_CAT_DROP 19
#define SYS_COUNT_RX_RED_PRIO_0 20
#define SYS_COUNT_RX_RED_PRIO_1 21
#define SYS_COUNT_RX_RED_PRIO_2 22
#define SYS_COUNT_RX_RED_PRIO_3 23
#define SYS_COUNT_RX_RED_PRIO_4 24
#define SYS_COUNT_RX_RED_PRIO_5 25
#define SYS_COUNT_RX_RED_PRIO_6 26
#define SYS_COUNT_RX_RED_PRIO_7 27
#define SYS_COUNT_RX_YELLOW_PRIO_0 28
#define SYS_COUNT_RX_YELLOW_PRIO_1 29
#define SYS_COUNT_RX_YELLOW_PRIO_2 30
#define SYS_COUNT_RX_YELLOW_PRIO_3 31
#define SYS_COUNT_RX_YELLOW_PRIO_4 32
#define SYS_COUNT_RX_YELLOW_PRIO_5 33
#define SYS_COUNT_RX_YELLOW_PRIO_6 34
#define SYS_COUNT_RX_YELLOW_PRIO_7 35
#define SYS_COUNT_RX_GREEN_PRIO_0 36
#define SYS_COUNT_RX_GREEN_PRIO_1 37
#define SYS_COUNT_RX_GREEN_PRIO_2 38
#define SYS_COUNT_RX_GREEN_PRIO_3 39
#define SYS_COUNT_RX_GREEN_PRIO_4 40
#define SYS_COUNT_RX_GREEN_PRIO_5 41
#define SYS_COUNT_RX_GREEN_PRIO_6 42
#define SYS_COUNT_RX_GREEN_PRIO_7 43
#define SYS_COUNT_RX_ASSEMBLY_ERR 44
#define SYS_COUNT_RX_SMD_ERR 45
#define SYS_COUNT_RX_ASSEMBLY_OK 46
#define SYS_COUNT_RX_MERGE_FRAG 47
#define SYS_COUNT_RX_PMAC_OCT 48
#define SYS_COUNT_RX_PMAC_UC 49
#define SYS_COUNT_RX_PMAC_MC 50
#define SYS_COUNT_RX_PMAC_BC 51
#define SYS_COUNT_RX_PMAC_SHORT 52
#define SYS_COUNT_RX_PMAC_FRAG 53
#define SYS_COUNT_RX_PMAC_JABBER 54
#define SYS_COUNT_RX_PMAC_CRC 55
#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56
#define SYS_COUNT_RX_PMAC_SZ_64 57
#define SYS_COUNT_RX_PMAC_SZ_65_127 58
#define SYS_COUNT_RX_PMAC_SZ_128_255 59
#define SYS_COUNT_RX_PMAC_SZ_256_511 60
#define SYS_COUNT_RX_PMAC_SZ_512_1023 61
#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62
#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63
#define SYS_COUNT_RX_PMAC_PAUSE 64
#define SYS_COUNT_RX_PMAC_CONTROL 65
#define SYS_COUNT_RX_PMAC_LONG 66
#define SYS_COUNT_TX_OCT 67
#define SYS_COUNT_TX_UC 68
#define SYS_COUNT_TX_MC 69
#define SYS_COUNT_TX_BC 70
#define SYS_COUNT_TX_COL 71
#define SYS_COUNT_TX_DROP 72
#define SYS_COUNT_TX_PAUSE 73
#define SYS_COUNT_TX_SZ_64 74
#define SYS_COUNT_TX_SZ_65_127 75
#define SYS_COUNT_TX_SZ_128_255 76
#define SYS_COUNT_TX_SZ_256_511 77
#define SYS_COUNT_TX_SZ_512_1023 78
#define SYS_COUNT_TX_SZ_1024_1526 79
#define SYS_COUNT_TX_SZ_JUMBO 80
#define SYS_COUNT_TX_YELLOW_PRIO_0 81
#define SYS_COUNT_TX_YELLOW_PRIO_1 82
#define SYS_COUNT_TX_YELLOW_PRIO_2 83
#define SYS_COUNT_TX_YELLOW_PRIO_3 84
#define SYS_COUNT_TX_YELLOW_PRIO_4 85
#define SYS_COUNT_TX_YELLOW_PRIO_5 86
#define SYS_COUNT_TX_YELLOW_PRIO_6 87
#define SYS_COUNT_TX_YELLOW_PRIO_7 88
#define SYS_COUNT_TX_GREEN_PRIO_0 89
#define SYS_COUNT_TX_GREEN_PRIO_1 90
#define SYS_COUNT_TX_GREEN_PRIO_2 91
#define SYS_COUNT_TX_GREEN_PRIO_3 92
#define SYS_COUNT_TX_GREEN_PRIO_4 93
#define SYS_COUNT_TX_GREEN_PRIO_5 94
#define SYS_COUNT_TX_GREEN_PRIO_6 95
#define SYS_COUNT_TX_GREEN_PRIO_7 96
#define SYS_COUNT_TX_AGED 97
#define SYS_COUNT_TX_LLCT 98
#define SYS_COUNT_TX_CT 99
#define SYS_COUNT_TX_MM_HOLD 100
#define SYS_COUNT_TX_MERGE_FRAG 101
#define SYS_COUNT_TX_PMAC_OCT 102
#define SYS_COUNT_TX_PMAC_UC 103
#define SYS_COUNT_TX_PMAC_MC 104
#define SYS_COUNT_TX_PMAC_BC 105
#define SYS_COUNT_TX_PMAC_PAUSE 106
#define SYS_COUNT_TX_PMAC_SZ_64 107
#define SYS_COUNT_TX_PMAC_SZ_65_127 108
#define SYS_COUNT_TX_PMAC_SZ_128_255 109
#define SYS_COUNT_TX_PMAC_SZ_256_511 110
#define SYS_COUNT_TX_PMAC_SZ_512_1023 111
#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112
#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113
#define SYS_COUNT_DR_LOCAL 114
#define SYS_COUNT_DR_TAIL 115
#define SYS_COUNT_DR_YELLOW_PRIO_0 116
#define SYS_COUNT_DR_YELLOW_PRIO_1 117
#define SYS_COUNT_DR_YELLOW_PRIO_2 118
#define SYS_COUNT_DR_YELLOW_PRIO_3 119
#define SYS_COUNT_DR_YELLOW_PRIO_4 120
#define SYS_COUNT_DR_YELLOW_PRIO_5 121
#define SYS_COUNT_DR_YELLOW_PRIO_6 122
#define SYS_COUNT_DR_YELLOW_PRIO_7 123
#define SYS_COUNT_DR_GREEN_PRIO_0 124
#define SYS_COUNT_DR_GREEN_PRIO_1 125
#define SYS_COUNT_DR_GREEN_PRIO_2 126
#define SYS_COUNT_DR_GREEN_PRIO_3 127
#define SYS_COUNT_DR_GREEN_PRIO_4 128
#define SYS_COUNT_DR_GREEN_PRIO_5 129
#define SYS_COUNT_DR_GREEN_PRIO_6 130
#define SYS_COUNT_DR_GREEN_PRIO_7 131
/* Add a possibly wrapping 32 bit value to a 64 bit counter */
static void lan966x_add_cnt(u64 *cnt, u32 val)
{
if (val < (*cnt & U32_MAX))
*cnt += (u64)1 << 32; /* value has wrapped */
*cnt = (*cnt & ~(u64)U32_MAX) + val;
}
static void lan966x_stats_update(struct lan966x *lan966x)
{
int i, j;
mutex_lock(&lan966x->stats_lock);
for (i = 0; i < lan966x->num_phys_ports; i++) {
uint idx = i * lan966x->num_stats;
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i),
lan966x, SYS_STAT_CFG);
for (j = 0; j < lan966x->num_stats; j++) {
u32 offset = lan966x->stats_layout[j].offset;
lan966x_add_cnt(&lan966x->stats[idx++],
lan_rd(lan966x, SYS_CNT(offset)));
}
}
mutex_unlock(&lan966x->stats_lock);
}
static int lan966x_get_sset_count(struct net_device *dev, int sset)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
return lan966x->num_stats;
}
static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{
struct lan966x_port *port = netdev_priv(netdev);
struct lan966x *lan966x = port->lan966x;
int i;
if (sset != ETH_SS_STATS)
return;
for (i = 0; i < lan966x->num_stats; i++)
memcpy(data + i * ETH_GSTRING_LEN,
lan966x->stats_layout[i].name, ETH_GSTRING_LEN);
}
static void lan966x_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
int i;
/* check and update now */
lan966x_stats_update(lan966x);
/* Copy all counters */
for (i = 0; i < lan966x->num_stats; i++)
*data++ = lan966x->stats[port->chip_port *
lan966x->num_stats + i];
}
static void lan966x_get_eth_mac_stats(struct net_device *dev,
struct ethtool_eth_mac_stats *mac_stats)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
u32 idx;
lan966x_stats_update(lan966x);
idx = port->chip_port * lan966x->num_stats;
mutex_lock(&lan966x->stats_lock);
mac_stats->FramesTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_UC] +
lan966x->stats[idx + SYS_COUNT_TX_MC] +
lan966x->stats[idx + SYS_COUNT_TX_BC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
mac_stats->SingleCollisionFrames =
lan966x->stats[idx + SYS_COUNT_TX_COL];
mac_stats->MultipleCollisionFrames = 0;
mac_stats->FramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_UC] +
lan966x->stats[idx + SYS_COUNT_RX_MC] +
lan966x->stats[idx + SYS_COUNT_RX_BC];
mac_stats->FrameCheckSequenceErrors =
lan966x->stats[idx + SYS_COUNT_RX_CRC] +
lan966x->stats[idx + SYS_COUNT_RX_CRC];
mac_stats->AlignmentErrors = 0;
mac_stats->OctetsTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_OCT] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
mac_stats->FramesWithDeferredXmissions =
lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
mac_stats->LateCollisions = 0;
mac_stats->FramesAbortedDueToXSColls = 0;
mac_stats->FramesLostDueToIntMACXmitError = 0;
mac_stats->CarrierSenseErrors = 0;
mac_stats->OctetsReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_OCT];
mac_stats->FramesLostDueToIntMACRcvError = 0;
mac_stats->MulticastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_MC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
mac_stats->BroadcastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_BC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
mac_stats->FramesWithExcessiveDeferral = 0;
mac_stats->MulticastFramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_MC];
mac_stats->BroadcastFramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_BC];
mac_stats->InRangeLengthErrors =
lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
lan966x->stats[idx + SYS_COUNT_RX_CRC] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC];
mac_stats->OutOfRangeLengthField =
lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
mac_stats->FrameTooLongErrors =
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
mutex_unlock(&lan966x->stats_lock);
}
static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
{ 0, 64 },
{ 65, 127 },
{ 128, 255 },
{ 256, 511 },
{ 512, 1023 },
{ 1024, 1518 },
{ 1519, 10239 },
{}
};
static void lan966x_get_eth_rmon_stats(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
u32 idx;
lan966x_stats_update(lan966x);
idx = port->chip_port * lan966x->num_stats;
mutex_lock(&lan966x->stats_lock);
rmon_stats->undersize_pkts =
lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT];
rmon_stats->oversize_pkts =
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
rmon_stats->fragments =
lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG];
rmon_stats->jabbers =
lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER];
rmon_stats->hist[0] =
lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64];
rmon_stats->hist[1] =
lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127];
rmon_stats->hist[2] =
lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255];
rmon_stats->hist[3] =
lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511];
rmon_stats->hist[4] =
lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023];
rmon_stats->hist[5] =
lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
rmon_stats->hist[6] =
lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
rmon_stats->hist_tx[0] =
lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64];
rmon_stats->hist_tx[1] =
lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127];
rmon_stats->hist_tx[2] =
lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255];
rmon_stats->hist_tx[3] =
lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511];
rmon_stats->hist_tx[4] =
lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023];
rmon_stats->hist_tx[5] =
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
rmon_stats->hist_tx[6] =
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
mutex_unlock(&lan966x->stats_lock);
*ranges = lan966x_rmon_ranges;
}
static int lan966x_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
struct lan966x_port *port = netdev_priv(ndev);
return phylink_ethtool_ksettings_get(port->phylink, cmd);
}
static int lan966x_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd)
{
struct lan966x_port *port = netdev_priv(ndev);
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
static void lan966x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan966x_port *port = netdev_priv(dev);
phylink_ethtool_get_pauseparam(port->phylink, pause);
}
static int lan966x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan966x_port *port = netdev_priv(dev);
return phylink_ethtool_set_pauseparam(port->phylink, pause);
}
const struct ethtool_ops lan966x_ethtool_ops = {
.get_link_ksettings = lan966x_get_link_ksettings,
.set_link_ksettings = lan966x_set_link_ksettings,
.get_pauseparam = lan966x_get_pauseparam,
.set_pauseparam = lan966x_set_pauseparam,
.get_sset_count = lan966x_get_sset_count,
.get_strings = lan966x_get_strings,
.get_ethtool_stats = lan966x_get_ethtool_stats,
.get_eth_mac_stats = lan966x_get_eth_mac_stats,
.get_rmon_stats = lan966x_get_eth_rmon_stats,
.get_link = ethtool_op_get_link,
};
static void lan966x_check_stats_work(struct work_struct *work)
{
struct delayed_work *del_work = to_delayed_work(work);
struct lan966x *lan966x = container_of(del_work, struct lan966x,
stats_work);
lan966x_stats_update(lan966x);
queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
LAN966X_STATS_CHECK_DELAY);
}
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
u32 idx;
int i;
idx = port->chip_port * lan966x->num_stats;
mutex_lock(&lan966x->stats_lock);
stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
lan966x->stats[idx + SYS_COUNT_RX_CRC] +
lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] +
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO];
stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC];
stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
lan966x->stats[idx + SYS_COUNT_RX_CRC] +
lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
lan966x->stats[idx + SYS_COUNT_RX_LONG];
stats->rx_dropped = dev->stats.rx_dropped +
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
lan966x->stats[idx + SYS_COUNT_DR_TAIL];
for (i = 0; i < LAN966X_NUM_TC; i++) {
stats->rx_dropped +=
(lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] +
lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]);
}
/* Get Tx stats */
stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO];
stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] +
lan966x->stats[idx + SYS_COUNT_TX_AGED];
stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
mutex_unlock(&lan966x->stats_lock);
}
int lan966x_stats_init(struct lan966x *lan966x)
{
char queue_name[32];
lan966x->stats_layout = lan966x_stats_layout;
lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout);
lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports *
lan966x->num_stats,
sizeof(u64), GFP_KERNEL);
if (!lan966x->stats)
return -ENOMEM;
/* Init stats worker */
mutex_init(&lan966x->stats_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(lan966x->dev));
lan966x->stats_queue = create_singlethread_workqueue(queue_name);
INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
LAN966X_STATS_CHECK_DELAY);
return 0;
}

View File

@ -0,0 +1,173 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __LAN966X_IFH_H__
#define __LAN966X_IFH_H__
/* Fields with description (*) should just be cleared upon injection
* IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte)
*/
#define IFH_LEN 7
/* Timestamp for frame */
#define IFH_POS_TIMESTAMP 192
/* Bypass analyzer with a prefilled IFH */
#define IFH_POS_BYPASS 191
/* Masqueraded injection with masq_port defining logical source port */
#define IFH_POS_MASQ 190
/* Masqueraded port number for injection */
#define IFH_POS_MASQ_PORT 186
/* Frame length (*) */
#define IFH_POS_LEN 178
/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */
#define IFH_POS_WRDMODE 176
/* Frame has 16 bits rtag removed compared to line data */
#define IFH_POS_RTAG48 175
/* Frame has a redundancy tag */
#define IFH_POS_HAS_RED_TAG 174
/* Frame has been cut through forwarded (*) */
#define IFH_POS_CUTTHRU 173
/* Rewriter command */
#define IFH_POS_REW_CMD 163
/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */
#define IFH_POS_REW_OAM 162
/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN,
* 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM).
*/
#define IFH_POS_PDU_TYPE 158
/* Update FCS before transmission */
#define IFH_POS_FCS_UPD 157
/* Classified DSCP value of frame */
#define IFH_POS_DSCP 151
/* Yellow indication */
#define IFH_POS_DP 150
/* Process in RTE/inbound */
#define IFH_POS_RTE_INB_UPDATE 149
/* Number of tags to pop from frame */
#define IFH_POS_POP_CNT 147
/* Number of tags in front of the ethertype */
#define IFH_POS_ETYPE_OFS 145
/* Logical source port of frame (*) */
#define IFH_POS_SRCPORT 141
/* Sequence number in redundancy tag */
#define IFH_POS_SEQ_NUM 120
/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */
#define IFH_POS_TCI 103
/* Classified internal priority for queuing */
#define IFH_POS_QOS_CLASS 100
/* Bit mask with eight cpu copy classses */
#define IFH_POS_CPUQ 92
/* Relearn + learn flags (*) */
#define IFH_POS_LEARN_FLAGS 90
/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */
#define IFH_POS_SFLOW_ID 86
/* Set if an ACL/S2 rule was hit (*).
* Super priority: acl_hit=0 and acl_hit(4)=1.
*/
#define IFH_POS_ACL_HIT 85
/* S2 rule index hit (*) */
#define IFH_POS_ACL_IDX 79
/* ISDX as classified by S1 */
#define IFH_POS_ISDX 71
/* Destination ports for frame */
#define IFH_POS_DSTS 62
/* Storm policer to be applied: None/Uni/Multi/Broad (*) */
#define IFH_POS_FLOOD 60
/* Redundancy tag operation */
#define IFH_POS_SEQ_OP 58
/* Classified internal priority for resourcemgt, tagging etc */
#define IFH_POS_IPV 55
/* Frame is for AFI use */
#define IFH_POS_AFI 54
/* Internal aging value (*) */
#define IFH_POS_AGED 52
/* RTP Identifier */
#define IFH_POS_RTP_ID 42
/* RTP MRPD flow */
#define IFH_POS_RTP_SUBID 41
/* Profinet DataStatus or opcua GroupVersion MSB */
#define IFH_POS_PN_DATA_STATUS 33
/* Profinet transfer status (1 iff the status is 0) */
#define IFH_POS_PN_TRANSF_STATUS_ZERO 32
/* Profinet cycle counter or opcua NetworkMessageNumber */
#define IFH_POS_PN_CC 16
#define IFH_WID_TIMESTAMP 32
#define IFH_WID_BYPASS 1
#define IFH_WID_MASQ 1
#define IFH_WID_MASQ_PORT 4
#define IFH_WID_LEN 14
#define IFH_WID_WRDMODE 2
#define IFH_WID_RTAG48 1
#define IFH_WID_HAS_RED_TAG 1
#define IFH_WID_CUTTHRU 1
#define IFH_WID_REW_CMD 10
#define IFH_WID_REW_OAM 1
#define IFH_WID_PDU_TYPE 4
#define IFH_WID_FCS_UPD 1
#define IFH_WID_DSCP 6
#define IFH_WID_DP 1
#define IFH_WID_RTE_INB_UPDATE 1
#define IFH_WID_POP_CNT 2
#define IFH_WID_ETYPE_OFS 2
#define IFH_WID_SRCPORT 4
#define IFH_WID_SEQ_NUM 16
#define IFH_WID_TCI 17
#define IFH_WID_QOS_CLASS 3
#define IFH_WID_CPUQ 8
#define IFH_WID_LEARN_FLAGS 2
#define IFH_WID_SFLOW_ID 4
#define IFH_WID_ACL_HIT 1
#define IFH_WID_ACL_IDX 6
#define IFH_WID_ISDX 8
#define IFH_WID_DSTS 9
#define IFH_WID_FLOOD 2
#define IFH_WID_SEQ_OP 2
#define IFH_WID_IPV 3
#define IFH_WID_AFI 1
#define IFH_WID_AGED 2
#define IFH_WID_RTP_ID 10
#define IFH_WID_RTP_SUBID 1
#define IFH_WID_PN_DATA_STATUS 8
#define IFH_WID_PN_TRANSF_STATUS_ZERO 1
#define IFH_WID_PN_CC 16
#endif /* __LAN966X_IFH_H__ */

View File

@ -0,0 +1,101 @@
// SPDX-License-Identifier: GPL-2.0+
#include "lan966x_main.h"
#define LAN966X_MAC_COLUMNS 4
#define MACACCESS_CMD_IDLE 0
#define MACACCESS_CMD_LEARN 1
#define MACACCESS_CMD_FORGET 2
#define MACACCESS_CMD_AGE 3
#define MACACCESS_CMD_GET_NEXT 4
#define MACACCESS_CMD_INIT 5
#define MACACCESS_CMD_READ 6
#define MACACCESS_CMD_WRITE 7
#define MACACCESS_CMD_SYNC_GET_NEXT 8
static int lan966x_mac_get_status(struct lan966x *lan966x)
{
return lan_rd(lan966x, ANA_MACACCESS);
}
static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
{
u32 val;
return readx_poll_timeout(lan966x_mac_get_status,
lan966x, val,
(ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
MACACCESS_CMD_IDLE,
TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
static void lan966x_mac_select(struct lan966x *lan966x,
const unsigned char mac[ETH_ALEN],
unsigned int vid)
{
u32 macl = 0, mach = 0;
/* Set the MAC address to handle and the vlan associated in a format
* understood by the hardware.
*/
mach |= vid << 16;
mach |= mac[0] << 8;
mach |= mac[1] << 0;
macl |= mac[2] << 24;
macl |= mac[3] << 16;
macl |= mac[4] << 8;
macl |= mac[5] << 0;
lan_wr(macl, lan966x, ANA_MACLDATA);
lan_wr(mach, lan966x, ANA_MACHDATA);
}
int lan966x_mac_learn(struct lan966x *lan966x, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type)
{
lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */
lan_wr(ANA_MACACCESS_VALID_SET(1) |
ANA_MACACCESS_CHANGE2SW_SET(0) |
ANA_MACACCESS_DEST_IDX_SET(port) |
ANA_MACACCESS_ENTRYTYPE_SET(type) |
ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
lan966x, ANA_MACACCESS);
return lan966x_mac_wait_for_completion(lan966x);
}
int lan966x_mac_forget(struct lan966x *lan966x,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type)
{
lan966x_mac_select(lan966x, mac, vid);
/* Issue a forget command */
lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
lan966x, ANA_MACACCESS);
return lan966x_mac_wait_for_completion(lan966x);
}
int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
{
return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
}
int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
{
return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
}
void lan966x_mac_init(struct lan966x *lan966x)
{
/* Clear the MAC table */
lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
lan966x_mac_wait_for_completion(lan966x);
}

View File

@ -0,0 +1,946 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/module.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/packing.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
#include "lan966x_main.h"
#define XTR_EOF_0 0x00000080U
#define XTR_EOF_1 0x01000080U
#define XTR_EOF_2 0x02000080U
#define XTR_EOF_3 0x03000080U
#define XTR_PRUNED 0x04000080U
#define XTR_ABORT 0x05000080U
#define XTR_ESCAPE 0x06000080U
#define XTR_NOT_READY 0x07000080U
#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
#define READL_SLEEP_US 10
#define READL_TIMEOUT_US 100000000
#define IO_RANGES 2
static const struct of_device_id lan966x_match[] = {
{ .compatible = "microchip,lan966x-switch" },
{ }
};
MODULE_DEVICE_TABLE(of, lan966x_match);
struct lan966x_main_io_resource {
enum lan966x_target id;
phys_addr_t offset;
int range;
};
static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
{ TARGET_ORG, 0, 1 }, /* 0xe2000000 */
{ TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
{ TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
{ TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
{ TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
{ TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
{ TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */
{ TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */
{ TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */
{ TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */
{ TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */
{ TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */
{ TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */
{ TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */
{ TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */
{ TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */
{ TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */
};
static int lan966x_create_targets(struct platform_device *pdev,
struct lan966x *lan966x)
{
struct resource *iores[IO_RANGES];
void __iomem *begin[IO_RANGES];
int idx;
/* Initially map the entire range and after that update each target to
* point inside the region at the correct offset. It is possible that
* other devices access the same region so don't add any checks about
* this.
*/
for (idx = 0; idx < IO_RANGES; idx++) {
iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
idx);
if (!iores[idx]) {
dev_err(&pdev->dev, "Invalid resource\n");
return -EINVAL;
}
begin[idx] = devm_ioremap(&pdev->dev,
iores[idx]->start,
resource_size(iores[idx]));
if (IS_ERR(begin[idx])) {
dev_err(&pdev->dev, "Unable to get registers: %s\n",
iores[idx]->name);
return PTR_ERR(begin[idx]);
}
}
for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
const struct lan966x_main_io_resource *iomap =
&lan966x_main_iomap[idx];
lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
}
return 0;
}
static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
const struct sockaddr *addr = p;
int ret;
/* Learn the new net device MAC address in the mac table. */
ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, port->pvid);
if (ret)
return ret;
/* Then forget the previous one. */
ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, port->pvid);
if (ret)
return ret;
eth_hw_addr_set(dev, addr->sa_data);
return ret;
}
static int lan966x_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
struct lan966x_port *port = netdev_priv(dev);
int ret;
ret = snprintf(buf, len, "p%d", port->chip_port);
if (ret >= len)
return -EINVAL;
return 0;
}
static int lan966x_port_open(struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
int err;
/* Enable receiving frames on the port, and activate auto-learning of
* MAC addresses.
*/
lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
ANA_PORT_CFG_RECV_ENA_SET(1) |
ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
ANA_PORT_CFG_LEARNAUTO |
ANA_PORT_CFG_RECV_ENA |
ANA_PORT_CFG_PORTID_VAL,
lan966x, ANA_PORT_CFG(port->chip_port));
err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
}
phylink_start(port->phylink);
return 0;
}
static int lan966x_port_stop(struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
lan966x_port_config_down(port);
phylink_stop(port->phylink);
phylink_disconnect_phy(port->phylink);
return 0;
}
static int lan966x_port_inj_status(struct lan966x *lan966x)
{
return lan_rd(lan966x, QS_INJ_STATUS);
}
static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
{
u32 val;
return readx_poll_timeout(lan966x_port_inj_status, lan966x, val,
QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
READL_SLEEP_US, READL_TIMEOUT_US);
}
static int lan966x_port_ifh_xmit(struct sk_buff *skb,
__be32 *ifh,
struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
u32 i, count, last;
u8 grp = 0;
u32 val;
int err;
val = lan_rd(lan966x, QS_INJ_STATUS);
if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
(QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
return NETDEV_TX_BUSY;
/* Write start of frame */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
QS_INJ_CTRL_SOF_SET(1),
lan966x, QS_INJ_CTRL(grp));
/* Write IFH header */
for (i = 0; i < IFH_LEN; ++i) {
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
return NETDEV_TX_BUSY;
lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
}
/* Write frame */
count = DIV_ROUND_UP(skb->len, 4);
last = skb->len % 4;
for (i = 0; i < count; ++i) {
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
return NETDEV_TX_BUSY;
lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
}
/* Add padding */
while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
return NETDEV_TX_BUSY;
lan_wr(0, lan966x, QS_INJ_WR(grp));
++i;
}
/* Inidcate EOF and valid bytes in the last word */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
0 : last) |
QS_INJ_CTRL_EOF_SET(1),
lan966x, QS_INJ_CTRL(grp));
/* Add dummy CRC */
lan_wr(0, lan966x, QS_INJ_WR(grp));
skb_tx_timestamp(skb);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
}
static void lan966x_ifh_set_port(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
}
static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1,
IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0);
}
static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1,
IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
}
static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
__be32 ifh[IFH_LEN];
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
lan966x_ifh_set_bypass(ifh, 1);
lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
return lan966x_port_ifh_xmit(skb, ifh, dev);
}
static void lan966x_set_promisc(struct lan966x_port *port, bool enable)
{
struct lan966x *lan966x = port->lan966x;
lan_rmw(ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(enable),
ANA_CPU_FWD_CFG_SRC_COPY_ENA,
lan966x, ANA_CPU_FWD_CFG(port->chip_port));
}
static void lan966x_port_change_rx_flags(struct net_device *dev, int flags)
{
struct lan966x_port *port = netdev_priv(dev);
if (!(flags & IFF_PROMISC))
return;
if (dev->flags & IFF_PROMISC)
lan966x_set_promisc(port, true);
else
lan966x_set_promisc(port, false);
}
static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
dev->mtu = new_mtu;
return 0;
}
static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
return lan966x_mac_forget(lan966x, addr, port->pvid, ENTRYTYPE_LOCKED);
}
static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
return lan966x_mac_cpu_learn(lan966x, addr, port->pvid);
}
static void lan966x_port_set_rx_mode(struct net_device *dev)
{
__dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
}
static int lan966x_port_get_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
ppid->id_len = sizeof(lan966x->base_mac);
memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
return 0;
}
static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_open = lan966x_port_open,
.ndo_stop = lan966x_port_stop,
.ndo_start_xmit = lan966x_port_xmit,
.ndo_change_rx_flags = lan966x_port_change_rx_flags,
.ndo_change_mtu = lan966x_port_change_mtu,
.ndo_set_rx_mode = lan966x_port_set_rx_mode,
.ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
.ndo_get_stats64 = lan966x_stats_get,
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
};
static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
{
return lan_rd(lan966x, QS_XTR_RD(grp));
}
static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
{
u32 val;
return read_poll_timeout(lan966x_port_xtr_status, val,
val != XTR_NOT_READY,
READL_SLEEP_US, READL_TIMEOUT_US, false,
lan966x, grp);
}
static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
{
u32 bytes_valid;
u32 val;
int err;
val = lan_rd(lan966x, QS_XTR_RD(grp));
if (val == XTR_NOT_READY) {
err = lan966x_port_xtr_ready(lan966x, grp);
if (err)
return -EIO;
}
switch (val) {
case XTR_ABORT:
return -EIO;
case XTR_EOF_0:
case XTR_EOF_1:
case XTR_EOF_2:
case XTR_EOF_3:
case XTR_PRUNED:
bytes_valid = XTR_VALID_BYTES(val);
val = lan_rd(lan966x, QS_XTR_RD(grp));
if (val == XTR_ESCAPE)
*rval = lan_rd(lan966x, QS_XTR_RD(grp));
else
*rval = val;
return bytes_valid;
case XTR_ESCAPE:
*rval = lan_rd(lan966x, QS_XTR_RD(grp));
return 4;
default:
*rval = val;
return 4;
}
}
static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
{
packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
}
static void lan966x_ifh_get_len(void *ifh, u64 *len)
{
packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1,
IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
}
static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
{
struct lan966x *lan966x = args;
int i, grp = 0, err = 0;
if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
return IRQ_NONE;
do {
struct net_device *dev;
struct sk_buff *skb;
int sz = 0, buf_len;
u64 src_port, len;
u32 ifh[IFH_LEN];
u32 *buf;
u32 val;
for (i = 0; i < IFH_LEN; i++) {
err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
if (err != 4)
goto recover;
}
err = 0;
lan966x_ifh_get_src_port(ifh, &src_port);
lan966x_ifh_get_len(ifh, &len);
WARN_ON(src_port >= lan966x->num_phys_ports);
dev = lan966x->ports[src_port]->dev;
skb = netdev_alloc_skb(dev, len);
if (unlikely(!skb)) {
netdev_err(dev, "Unable to allocate sk_buff\n");
err = -ENOMEM;
break;
}
buf_len = len - ETH_FCS_LEN;
buf = (u32 *)skb_put(skb, buf_len);
len = 0;
do {
sz = lan966x_rx_frame_word(lan966x, grp, &val);
if (sz < 0) {
kfree_skb(skb);
goto recover;
}
*buf++ = val;
len += sz;
} while (len < buf_len);
/* Read the FCS */
sz = lan966x_rx_frame_word(lan966x, grp, &val);
if (sz < 0) {
kfree_skb(skb);
goto recover;
}
/* Update the statistics if part of the FCS was read before */
len -= ETH_FCS_LEN - sz;
if (unlikely(dev->features & NETIF_F_RXFCS)) {
buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
*buf = val;
}
if (sz < 0) {
err = sz;
break;
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx_ni(skb);
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
recover:
if (sz < 0 || err)
lan_rd(lan966x, QS_XTR_RD(grp));
} while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
return IRQ_HANDLED;
}
static void lan966x_cleanup_ports(struct lan966x *lan966x)
{
struct lan966x_port *port;
int p;
for (p = 0; p < lan966x->num_phys_ports; p++) {
port = lan966x->ports[p];
if (!port)
continue;
if (port->dev)
unregister_netdev(port->dev);
if (port->phylink) {
rtnl_lock();
lan966x_port_stop(port->dev);
rtnl_unlock();
phylink_destroy(port->phylink);
port->phylink = NULL;
}
if (port->fwnode)
fwnode_handle_put(port->fwnode);
}
disable_irq(lan966x->xtr_irq);
lan966x->xtr_irq = -ENXIO;
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
phy_interface_t phy_mode,
struct fwnode_handle *portnp)
{
struct lan966x_port *port;
struct phylink *phylink;
struct net_device *dev;
int err;
if (p >= lan966x->num_phys_ports)
return -EINVAL;
dev = devm_alloc_etherdev_mqs(lan966x->dev,
sizeof(struct lan966x_port), 8, 1);
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, lan966x->dev);
port = netdev_priv(dev);
port->dev = dev;
port->lan966x = lan966x;
port->chip_port = p;
port->pvid = PORT_PVID;
lan966x->ports[p] = port;
dev->max_mtu = ETH_MAX_MTU;
dev->netdev_ops = &lan966x_port_netdev_ops;
dev->ethtool_ops = &lan966x_ethtool_ops;
dev->needed_headroom = IFH_LEN * sizeof(u32);
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, port->pvid,
ENTRYTYPE_LOCKED);
port->phylink_config.dev = &port->dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
port->phylink_pcs.poll = true;
port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
__set_bit(PHY_INTERFACE_MODE_MII,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
port->phylink_config.supported_interfaces);
phylink = phylink_create(&port->phylink_config,
portnp,
phy_mode,
&lan966x_phylink_mac_ops);
if (IS_ERR(phylink)) {
port->dev = NULL;
return PTR_ERR(phylink);
}
port->phylink = phylink;
phylink_set_pcs(phylink, &port->phylink_pcs);
err = register_netdev(dev);
if (err) {
dev_err(lan966x->dev, "register_netdev failed\n");
return err;
}
return 0;
}
static void lan966x_init(struct lan966x *lan966x)
{
u32 p, i;
/* MAC table initialization */
lan966x_mac_init(lan966x);
/* Flush queues */
lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
GENMASK(1, 0),
lan966x, QS_XTR_FLUSH);
/* Allow to drain */
mdelay(1);
/* All Queues normal */
lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
~(GENMASK(1, 0)),
lan966x, QS_XTR_FLUSH);
/* Set MAC age time to default value, the entry is aged after
* 2 * AGE_PERIOD
*/
lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
lan966x, ANA_AUTOAGE);
/* Disable learning for frames discarded by VLAN ingress filtering */
lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
ANA_ADVLEARN_VLAN_CHK,
lan966x, ANA_ADVLEARN);
/* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
(20000000 / 65),
lan966x, SYS_FRM_AGING);
/* Map the 8 CPU extraction queues to CPU port */
lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
/* Do byte-swap and expect status after last data word
* Extraction: Mode: manual extraction) | Byte_swap
*/
lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_XTR_GRP_CFG(0));
/* Injection: Mode: manual injection | Byte_swap */
lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
lan966x, QS_INJ_GRP_CFG(0));
lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
QS_INJ_CTRL_GAP_SIZE,
lan966x, QS_INJ_CTRL(0));
/* Enable IFH insertion/parsing on CPU ports */
lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
lan966x, SYS_PORT_MODE(CPU_PORT));
/* Setup flooding PGIDs */
lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
lan966x, ANA_FLOODING_IPMC);
/* There are 8 priorities */
for (i = 0; i < 8; ++i)
lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
ANA_FLOODING_FLD_MULTICAST |
ANA_FLOODING_FLD_BROADCAST,
lan966x, ANA_FLOODING(i));
for (i = 0; i < PGID_ENTRIES; ++i)
/* Set all the entries to obey VLAN_VLAN */
lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
ANA_PGID_CFG_OBEY_VLAN,
lan966x, ANA_PGID_CFG(i));
for (p = 0; p < lan966x->num_phys_ports; p++) {
/* Disable bridging by default */
lan_rmw(ANA_PGID_PGID_SET(0x0),
ANA_PGID_PGID,
lan966x, ANA_PGID(p + PGID_SRC));
/* Do not forward BPDU frames to the front ports and copy them
* to CPU
*/
lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
}
/* Set source buffer size for each priority and each port to 1500 bytes */
for (i = 0; i <= QSYS_Q_RSRV; ++i) {
lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
}
/* Enable switching to/from cpu port */
lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
/* Configure and enable the CPU port */
lan_rmw(ANA_PGID_PGID_SET(0),
ANA_PGID_PGID,
lan966x, ANA_PGID(CPU_PORT));
lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_CPU));
/* Multicast to all other ports */
lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_MC));
/* This will be controlled by mrouter ports */
lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_MCIPV4));
/* Broadcast to the CPU port and to other ports */
lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_BC));
lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
lan966x, REW_PORT_CFG(CPU_PORT));
lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
ANA_ANAINTR_INTR_ENA,
lan966x, ANA_ANAINTR);
}
static int lan966x_ram_init(struct lan966x *lan966x)
{
return lan_rd(lan966x, SYS_RAM_INIT);
}
static int lan966x_reset_switch(struct lan966x *lan966x)
{
struct reset_control *switch_reset, *phy_reset;
int val = 0;
int ret;
switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
if (IS_ERR(switch_reset))
return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
"Could not obtain switch reset");
phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy");
if (IS_ERR(phy_reset))
return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset),
"Could not obtain phy reset\n");
reset_control_reset(switch_reset);
reset_control_reset(phy_reset);
lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
ret = readx_poll_timeout(lan966x_ram_init, lan966x,
val, (val & BIT(1)) == 0, READL_SLEEP_US,
READL_TIMEOUT_US);
if (ret)
return ret;
lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
return 0;
}
static int lan966x_probe(struct platform_device *pdev)
{
struct fwnode_handle *ports, *portnp;
struct lan966x *lan966x;
u8 mac_addr[ETH_ALEN];
int err, i;
lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
if (!lan966x)
return -ENOMEM;
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
pr_info("MAC addr was not set, use random MAC\n");
eth_random_addr(lan966x->base_mac);
lan966x->base_mac[5] &= 0xf0;
}
ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
if (!ports)
return dev_err_probe(&pdev->dev, -ENODEV,
"no ethernet-ports child found\n");
err = lan966x_create_targets(pdev, lan966x);
if (err)
return dev_err_probe(&pdev->dev, err,
"Failed to create targets");
err = lan966x_reset_switch(lan966x);
if (err)
return dev_err_probe(&pdev->dev, err, "Reset failed");
i = 0;
fwnode_for_each_available_child_node(ports, portnp)
++i;
lan966x->num_phys_ports = i;
lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
sizeof(struct lan966x_port *),
GFP_KERNEL);
if (!lan966x->ports)
return -ENOMEM;
/* There QS system has 32KB of memory */
lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
/* set irq */
lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
if (lan966x->xtr_irq <= 0)
return -EINVAL;
err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
lan966x_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", lan966x);
if (err) {
pr_err("Unable to use xtr irq");
return -ENODEV;
}
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
/* go over the child nodes */
fwnode_for_each_available_child_node(ports, portnp) {
phy_interface_t phy_mode;
struct phy *serdes;
u32 p;
if (fwnode_property_read_u32(portnp, "reg", &p))
continue;
phy_mode = fwnode_get_phy_mode(portnp);
err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
if (err)
goto cleanup_ports;
/* Read needed configuration */
lan966x->ports[p]->config.portmode = phy_mode;
lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
if (!IS_ERR(serdes))
lan966x->ports[p]->serdes = serdes;
lan966x_port_init(lan966x->ports[p]);
}
return 0;
cleanup_ports:
fwnode_handle_put(portnp);
lan966x_cleanup_ports(lan966x);
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
return err;
}
static int lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
lan966x_cleanup_ports(lan966x);
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
return 0;
}
static struct platform_driver lan966x_driver = {
.probe = lan966x_probe,
.remove = lan966x_remove,
.driver = {
.name = "lan966x-switch",
.of_match_table = lan966x_match,
},
};
module_platform_driver(lan966x_driver);
MODULE_DESCRIPTION("Microchip LAN966X switch driver");
MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
MODULE_LICENSE("Dual MIT/GPL");

View File

@ -0,0 +1,192 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __LAN966X_MAIN_H__
#define __LAN966X_MAIN_H__
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include "lan966x_regs.h"
#include "lan966x_ifh.h"
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
#define LAN966X_BUFFER_CELL_SZ 64
#define LAN966X_BUFFER_MEMORY (160 * 1024)
#define LAN966X_BUFFER_MIN_SZ 60
#define PGID_AGGR 64
#define PGID_SRC 80
#define PGID_ENTRIES 89
#define PORT_PVID 0
/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
#define QSYS_Q_RSRV 95
/* Reserved PGIDs */
#define PGID_CPU (PGID_AGGR - 6)
#define PGID_UC (PGID_AGGR - 5)
#define PGID_BC (PGID_AGGR - 4)
#define PGID_MC (PGID_AGGR - 3)
#define PGID_MCIPV4 (PGID_AGGR - 2)
#define PGID_MCIPV6 (PGID_AGGR - 1)
#define LAN966X_SPEED_NONE 0
#define LAN966X_SPEED_2500 1
#define LAN966X_SPEED_1000 1
#define LAN966X_SPEED_100 2
#define LAN966X_SPEED_10 3
#define CPU_PORT 8
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
* ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
* ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
*/
enum macaccess_entry_type {
ENTRYTYPE_NORMAL = 0,
ENTRYTYPE_LOCKED,
ENTRYTYPE_MACV4,
ENTRYTYPE_MACV6,
};
struct lan966x_port;
struct lan966x_stat_layout {
u32 offset;
char name[ETH_GSTRING_LEN];
};
struct lan966x {
struct device *dev;
u8 num_phys_ports;
struct lan966x_port **ports;
void __iomem *regs[NUM_TARGETS];
int shared_queue_sz;
u8 base_mac[ETH_ALEN];
/* stats */
const struct lan966x_stat_layout *stats_layout;
u32 num_stats;
/* workqueue for reading stats */
struct mutex stats_lock;
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
/* interrupts */
int xtr_irq;
};
struct lan966x_port_config {
phy_interface_t portmode;
const unsigned long *advertising;
int speed;
int duplex;
u32 pause;
bool inband;
bool autoneg;
};
struct lan966x_port {
struct net_device *dev;
struct lan966x *lan966x;
u8 chip_port;
u16 pvid;
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
struct lan966x_port_config config;
struct phylink *phylink;
struct phy *serdes;
struct fwnode_handle *fwnode;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
extern const struct ethtool_ops lan966x_ethtool_ops;
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
int lan966x_stats_init(struct lan966x *lan966x);
void lan966x_port_config_down(struct lan966x_port *port);
void lan966x_port_config_up(struct lan966x_port *port);
void lan966x_port_status_get(struct lan966x_port *port,
struct phylink_link_state *state);
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config);
void lan966x_port_init(struct lan966x_port *port);
int lan966x_mac_learn(struct lan966x *lan966x, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type);
int lan966x_mac_forget(struct lan966x *lan966x,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type);
int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid);
int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid);
void lan966x_mac_init(struct lan966x *lan966x);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
int gbase, int ginst,
int gcnt, int gwidth,
int raddr, int rinst,
int rcnt, int rwidth)
{
WARN_ON((tinst) >= tcnt);
WARN_ON((ginst) >= gcnt);
WARN_ON((rinst) >= rcnt);
return base[id + (tinst)] +
gbase + ((ginst) * gwidth) +
raddr + ((rinst) * rwidth);
}
static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt,
int gbase, int ginst, int gcnt, int gwidth,
int raddr, int rinst, int rcnt, int rwidth)
{
return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
gcnt, gwidth, raddr, rinst, rcnt, rwidth));
}
static inline void lan_wr(u32 val, struct lan966x *lan966x,
int id, int tinst, int tcnt,
int gbase, int ginst, int gcnt, int gwidth,
int raddr, int rinst, int rcnt, int rwidth)
{
writel(val, lan_addr(lan966x->regs, id, tinst, tcnt,
gbase, ginst, gcnt, gwidth,
raddr, rinst, rcnt, rwidth));
}
static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x,
int id, int tinst, int tcnt,
int gbase, int ginst, int gcnt, int gwidth,
int raddr, int rinst, int rcnt, int rwidth)
{
u32 nval;
nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
gcnt, gwidth, raddr, rinst, rcnt, rwidth));
nval = (nval & ~mask) | (val & mask);
writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
gcnt, gwidth, raddr, rinst, rcnt, rwidth));
}
#endif /* __LAN966X_MAIN_H__ */

View File

@ -0,0 +1,127 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/module.h>
#include <linux/phylink.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/phy/phy.h>
#include <linux/sfp.h>
#include "lan966x_main.h"
static void lan966x_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
}
static int lan966x_phylink_mac_prepare(struct phylink_config *config,
unsigned int mode,
phy_interface_t iface)
{
struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
int err;
if (port->serdes) {
err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
iface);
if (err) {
netdev_err(to_net_dev(config->dev),
"Could not set mode of SerDes\n");
return err;
}
}
return 0;
}
static void lan966x_phylink_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode,
phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
struct lan966x_port_config *port_config = &port->config;
port_config->duplex = duplex;
port_config->speed = speed;
port_config->pause = 0;
port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
lan966x_port_config_up(port);
}
static void lan966x_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
struct lan966x *lan966x = port->lan966x;
lan966x_port_config_down(port);
/* Take PCS out of reset */
lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
DEV_CLOCK_CFG_PCS_RX_RST |
DEV_CLOCK_CFG_PCS_TX_RST,
lan966x, DEV_CLOCK_CFG(port->chip_port));
}
static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
{
return container_of(pcs, struct lan966x_port, phylink_pcs);
}
static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
lan966x_port_status_get(port, state);
}
static int lan966x_pcs_config(struct phylink_pcs *pcs,
unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
struct lan966x_port_config config;
int ret;
config = port->config;
config.portmode = interface;
config.inband = phylink_autoneg_inband(mode);
config.autoneg = phylink_test(advertising, Autoneg);
config.advertising = advertising;
ret = lan966x_port_pcs_set(port, &config);
if (ret)
netdev_err(port->dev, "port PCS config failed: %d\n", ret);
return ret;
}
static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
{
/* Currently not used */
}
const struct phylink_mac_ops lan966x_phylink_mac_ops = {
.validate = phylink_generic_validate,
.mac_config = lan966x_phylink_mac_config,
.mac_prepare = lan966x_phylink_mac_prepare,
.mac_link_down = lan966x_phylink_mac_link_down,
.mac_link_up = lan966x_phylink_mac_link_up,
};
const struct phylink_pcs_ops lan966x_phylink_pcs_ops = {
.pcs_get_state = lan966x_pcs_get_state,
.pcs_config = lan966x_pcs_config,
.pcs_an_restart = lan966x_pcs_aneg_restart,
};

View File

@ -0,0 +1,412 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/netdevice.h>
#include <linux/phy/phy.h>
#include "lan966x_main.h"
/* Watermark encode */
#define MULTIPLIER_BIT BIT(8)
static u32 lan966x_wm_enc(u32 value)
{
value /= LAN966X_BUFFER_CELL_SZ;
if (value >= MULTIPLIER_BIT) {
value /= 16;
if (value >= MULTIPLIER_BIT)
value = (MULTIPLIER_BIT - 1);
value |= MULTIPLIER_BIT;
}
return value;
}
static void lan966x_port_link_down(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
u32 val, delay = 0;
/* 0.5: Disable any AFI */
lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) |
AFI_PORT_CFG_FRM_OUT_MAX_SET(0),
AFI_PORT_CFG_FC_SKIP_TTI_INJ |
AFI_PORT_CFG_FRM_OUT_MAX,
lan966x, AFI_PORT_CFG(port->chip_port));
/* wait for reg afi_port_frm_out to become 0 for the port */
while (true) {
val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port));
if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val))
break;
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
delay++;
if (delay == 2000) {
pr_err("AFI timeout chip port %u", port->chip_port);
break;
}
}
delay = 0;
/* 1: Reset the PCS Rx clock domain */
lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1),
DEV_CLOCK_CFG_PCS_RX_RST,
lan966x, DEV_CLOCK_CFG(port->chip_port));
/* 2: Disable MAC frame reception */
lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0),
DEV_MAC_ENA_CFG_RX_ENA,
lan966x, DEV_MAC_ENA_CFG(port->chip_port));
/* 3: Disable traffic being sent to or from switch port */
lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
QSYS_SW_PORT_MODE_PORT_ENA,
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
/* 4: Disable dequeuing from the egress queues */
lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1),
QSYS_PORT_MODE_DEQUEUE_DIS,
lan966x, QSYS_PORT_MODE(port->chip_port));
/* 5: Disable Flowcontrol */
lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0),
SYS_PAUSE_CFG_PAUSE_ENA,
lan966x, SYS_PAUSE_CFG(port->chip_port));
/* 5.1: Disable PFC */
lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0),
QSYS_SW_PORT_MODE_TX_PFC_ENA,
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
/* 6: Wait a worst case time 8ms (jumbo/10Mbit) */
usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC);
/* 7: Disable HDX backpressure */
lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0),
SYS_FRONT_PORT_MODE_HDX_MODE,
lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
/* 8: Flush the queues accociated with the port */
lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
QSYS_SW_PORT_MODE_AGING_MODE,
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
/* 9: Enable dequeuing from the egress queues */
lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0),
QSYS_PORT_MODE_DEQUEUE_DIS,
lan966x, QSYS_PORT_MODE(port->chip_port));
/* 10: Wait until flushing is complete */
while (true) {
val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port));
if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val))
break;
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
delay++;
if (delay == 2000) {
pr_err("Flush timeout chip port %u", port->chip_port);
break;
}
}
/* 11: Reset the Port and MAC clock domains */
lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0),
DEV_MAC_ENA_CFG_TX_ENA,
lan966x, DEV_MAC_ENA_CFG(port->chip_port));
lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1),
DEV_CLOCK_CFG_PORT_RST,
lan966x, DEV_CLOCK_CFG(port->chip_port));
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) |
DEV_CLOCK_CFG_MAC_RX_RST_SET(1) |
DEV_CLOCK_CFG_PORT_RST_SET(1),
DEV_CLOCK_CFG_MAC_TX_RST |
DEV_CLOCK_CFG_MAC_RX_RST |
DEV_CLOCK_CFG_PORT_RST,
lan966x, DEV_CLOCK_CFG(port->chip_port));
/* 12: Clear flushing */
lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2),
QSYS_SW_PORT_MODE_AGING_MODE,
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
/* The port is disabled and flushed, now set up the port in the
* new operating mode
*/
}
static void lan966x_port_link_up(struct lan966x_port *port)
{
struct lan966x_port_config *config = &port->config;
struct lan966x *lan966x = port->lan966x;
int speed = 0, mode = 0;
int atop_wm = 0;
switch (config->speed) {
case SPEED_10:
speed = LAN966X_SPEED_10;
break;
case SPEED_100:
speed = LAN966X_SPEED_100;
break;
case SPEED_1000:
speed = LAN966X_SPEED_1000;
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
break;
case SPEED_2500:
speed = LAN966X_SPEED_2500;
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
break;
}
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
* port speed for QSGMII ports.
*/
if (config->portmode == PHY_INTERFACE_MODE_QSGMII)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
lan_wr(config->duplex | mode,
lan966x, DEV_MAC_MODE_CFG(port->chip_port));
lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) |
DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) |
DEV_MAC_IFG_CFG_RX_IFG2_SET(2),
DEV_MAC_IFG_CFG_TX_IFG |
DEV_MAC_IFG_CFG_RX_IFG1 |
DEV_MAC_IFG_CFG_RX_IFG2,
lan966x, DEV_MAC_IFG_CFG(port->chip_port));
lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) |
DEV_MAC_HDX_CFG_SEED_LOAD_SET(1),
DEV_MAC_HDX_CFG_SEED |
DEV_MAC_HDX_CFG_SEED_LOAD,
lan966x, DEV_MAC_HDX_CFG(port->chip_port));
if (config->portmode == PHY_INTERFACE_MODE_GMII) {
if (config->speed == SPEED_1000)
lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1),
CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
lan966x,
CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
else
lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0),
CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
lan966x,
CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
}
/* No PFC */
lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed),
lan966x, ANA_PFC_CFG(port->chip_port));
lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1),
DEV_PCS1G_CFG_PCS_ENA,
lan966x, DEV_PCS1G_CFG(port->chip_port));
lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0),
DEV_PCS1G_SD_CFG_SD_ENA,
lan966x, DEV_PCS1G_SD_CFG(port->chip_port));
/* Set Pause WM hysteresis, start/stop are in 1518 byte units */
lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) |
SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) |
SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)),
lan966x, SYS_PAUSE_CFG(port->chip_port));
/* Set SMAC of Pause frame (00:00:00:00:00:00) */
lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port));
lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port));
/* Flow control */
lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) |
SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) |
SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) |
SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) |
SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) |
SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0),
SYS_MAC_FC_CFG_FC_LINK_SPEED |
SYS_MAC_FC_CFG_FC_LATENCY_CFG |
SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
SYS_MAC_FC_CFG_PAUSE_VAL_CFG |
SYS_MAC_FC_CFG_RX_FC_ENA |
SYS_MAC_FC_CFG_TX_FC_ENA,
lan966x, SYS_MAC_FC_CFG(port->chip_port));
/* Tail dropping watermark */
atop_wm = lan966x->shared_queue_sz;
/* The total memory size is diveded by number of front ports plus CPU
* port
*/
lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x,
SYS_ATOP(port->chip_port));
lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG);
/* This needs to be at the end */
/* Enable MAC module */
lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) |
DEV_MAC_ENA_CFG_TX_ENA_SET(1),
lan966x, DEV_MAC_ENA_CFG(port->chip_port));
/* Take out the clock from reset */
lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed),
lan966x, DEV_CLOCK_CFG(port->chip_port));
/* Core: Enable port for frame transfer */
lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) |
AFI_PORT_CFG_FRM_OUT_MAX_SET(16),
AFI_PORT_CFG_FC_SKIP_TTI_INJ |
AFI_PORT_CFG_FRM_OUT_MAX,
lan966x, AFI_PORT_CFG(port->chip_port));
}
void lan966x_port_config_down(struct lan966x_port *port)
{
lan966x_port_link_down(port);
}
void lan966x_port_config_up(struct lan966x_port *port)
{
lan966x_port_link_up(port);
}
void lan966x_port_status_get(struct lan966x_port *port,
struct phylink_link_state *state)
{
struct lan966x *lan966x = port->lan966x;
bool link_down;
u16 bmsr = 0;
u16 lp_adv;
u32 val;
val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port));
link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val);
if (link_down)
lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port));
/* Get both current Link and Sync status */
val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port));
state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) &&
DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val);
state->link &= !link_down;
/* Get PCS ANEG status register */
val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port));
/* Aneg complete provides more information */
if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) {
state->an_complete = true;
bmsr |= state->link ? BMSR_LSTATUS : 0;
bmsr |= BMSR_ANEGCOMPLETE;
lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
} else {
if (!state->link)
return;
if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
state->speed = SPEED_1000;
else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
state->speed = SPEED_2500;
state->duplex = DUPLEX_FULL;
}
}
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config)
{
struct lan966x *lan966x = port->lan966x;
bool inband_aneg = false;
bool outband;
int err;
if (config->inband) {
if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
config->portmode == PHY_INTERFACE_MODE_QSGMII)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
config->autoneg)
inband_aneg = true; /* Clause-37 in-band-aneg */
if (config->speed > 0) {
err = phy_set_speed(port->serdes, config->speed);
if (err)
return err;
}
outband = false;
} else {
outband = true;
}
/* Disable or enable inband */
lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband),
DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA,
lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
/* Enable PCS */
lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1),
lan966x, DEV_PCS1G_CFG(port->chip_port));
if (inband_aneg) {
int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode,
config->advertising);
if (adv >= 0)
/* Enable in-band aneg */
lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) |
DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
DEV_PCS1G_ANEG_CFG_ENA_SET(1) |
DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1),
lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
} else {
lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
}
/* Take PCS out of reset */
lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) |
DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
DEV_CLOCK_CFG_LINK_SPEED |
DEV_CLOCK_CFG_PCS_RX_RST |
DEV_CLOCK_CFG_PCS_TX_RST,
lan966x, DEV_CLOCK_CFG(port->chip_port));
port->config = *config;
return 0;
}
void lan966x_port_init(struct lan966x_port *port)
{
struct lan966x_port_config *config = &port->config;
struct lan966x *lan966x = port->lan966x;
lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0),
ANA_PORT_CFG_LEARN_ENA,
lan966x, ANA_PORT_CFG(port->chip_port));
lan966x_port_config_down(port);
if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
return;
lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
DEV_CLOCK_CFG_PCS_TX_RST_SET(0) |
DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000),
DEV_CLOCK_CFG_PCS_RX_RST |
DEV_CLOCK_CFG_PCS_TX_RST |
DEV_CLOCK_CFG_LINK_SPEED,
lan966x, DEV_CLOCK_CFG(port->chip_port));
}

View File

@ -0,0 +1,730 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200.
* Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty)
*/
#ifndef _LAN966X_REGS_H_
#define _LAN966X_REGS_H_
#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/bug.h>
enum lan966x_target {
TARGET_AFI = 2,
TARGET_ANA = 3,
TARGET_CHIP_TOP = 5,
TARGET_CPU = 6,
TARGET_DEV = 13,
TARGET_GCB = 27,
TARGET_ORG = 36,
TARGET_QS = 42,
TARGET_QSYS = 46,
TARGET_REW = 47,
TARGET_SYS = 52,
NUM_TARGETS = 66
};
#define __REG(...) __VA_ARGS__
/* AFI:PORT_TBL:PORT_FRM_OUT */
#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4)
#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16)
#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\
FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\
FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
/* AFI:PORT_TBL:PORT_CFG */
#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4)
#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16)
#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\
FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\
FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0)
#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\
FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x)
#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\
FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x)
/* ANA:ANA:ADVLEARN */
#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4)
#define ANA_ADVLEARN_VLAN_CHK BIT(0)
#define ANA_ADVLEARN_VLAN_CHK_SET(x)\
FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x)
#define ANA_ADVLEARN_VLAN_CHK_GET(x)\
FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x)
/* ANA:ANA:ANAINTR */
#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4)
#define ANA_ANAINTR_INTR BIT(1)
#define ANA_ANAINTR_INTR_SET(x)\
FIELD_PREP(ANA_ANAINTR_INTR, x)
#define ANA_ANAINTR_INTR_GET(x)\
FIELD_GET(ANA_ANAINTR_INTR, x)
#define ANA_ANAINTR_INTR_ENA BIT(0)
#define ANA_ANAINTR_INTR_ENA_SET(x)\
FIELD_PREP(ANA_ANAINTR_INTR_ENA, x)
#define ANA_ANAINTR_INTR_ENA_GET(x)\
FIELD_GET(ANA_ANAINTR_INTR_ENA, x)
/* ANA:ANA:AUTOAGE */
#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4)
#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1)
#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\
FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x)
#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
/* ANA:ANA:FLOODING */
#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6)
#define ANA_FLOODING_FLD_BROADCAST_SET(x)\
FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x)
#define ANA_FLOODING_FLD_BROADCAST_GET(x)\
FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x)
#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0)
#define ANA_FLOODING_FLD_MULTICAST_SET(x)\
FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x)
#define ANA_FLOODING_FLD_MULTICAST_GET(x)\
FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x)
/* ANA:ANA:FLOODING_IPMC */
#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4)
#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18)
#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\
FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\
FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12)
#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\
FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\
FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6)
#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\
FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\
FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0)
#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\
FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\
FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
/* ANA:PGID:PGID */
#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4)
#define ANA_PGID_PGID GENMASK(8, 0)
#define ANA_PGID_PGID_SET(x)\
FIELD_PREP(ANA_PGID_PGID, x)
#define ANA_PGID_PGID_GET(x)\
FIELD_GET(ANA_PGID_PGID, x)
/* ANA:PGID:PGID_CFG */
#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4)
#define ANA_PGID_CFG_OBEY_VLAN BIT(0)
#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\
FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x)
#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\
FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x)
/* ANA:ANA_TABLES:MACHDATA */
#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4)
/* ANA:ANA_TABLES:MACLDATA */
#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4)
/* ANA:ANA_TABLES:MACACCESS */
#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4)
#define ANA_MACACCESS_CHANGE2SW BIT(17)
#define ANA_MACACCESS_CHANGE2SW_SET(x)\
FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x)
#define ANA_MACACCESS_CHANGE2SW_GET(x)\
FIELD_GET(ANA_MACACCESS_CHANGE2SW, x)
#define ANA_MACACCESS_VALID BIT(12)
#define ANA_MACACCESS_VALID_SET(x)\
FIELD_PREP(ANA_MACACCESS_VALID, x)
#define ANA_MACACCESS_VALID_GET(x)\
FIELD_GET(ANA_MACACCESS_VALID, x)
#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10)
#define ANA_MACACCESS_ENTRYTYPE_SET(x)\
FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x)
#define ANA_MACACCESS_ENTRYTYPE_GET(x)\
FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x)
#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4)
#define ANA_MACACCESS_DEST_IDX_SET(x)\
FIELD_PREP(ANA_MACACCESS_DEST_IDX, x)
#define ANA_MACACCESS_DEST_IDX_GET(x)\
FIELD_GET(ANA_MACACCESS_DEST_IDX, x)
#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0)
#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\
FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x)
#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\
FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x)
/* ANA:PORT:CPU_FWD_CFG */
#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\
FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
/* ANA:PORT:CPU_FWD_BPDU_CFG */
#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4)
/* ANA:PORT:PORT_CFG */
#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
#define ANA_PORT_CFG_LEARNAUTO BIT(6)
#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
#define ANA_PORT_CFG_LEARNAUTO_GET(x)\
FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x)
#define ANA_PORT_CFG_LEARN_ENA BIT(5)
#define ANA_PORT_CFG_LEARN_ENA_SET(x)\
FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x)
#define ANA_PORT_CFG_LEARN_ENA_GET(x)\
FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x)
#define ANA_PORT_CFG_RECV_ENA BIT(4)
#define ANA_PORT_CFG_RECV_ENA_SET(x)\
FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x)
#define ANA_PORT_CFG_RECV_ENA_GET(x)\
FIELD_GET(ANA_PORT_CFG_RECV_ENA, x)
#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0)
#define ANA_PORT_CFG_PORTID_VAL_SET(x)\
FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x)
#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
/* ANA:PFC:PFC_CFG */
#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0)
#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\
FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x)
#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0)
#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\
FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\
FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
/* DEV:PORT_MODE:CLOCK_CFG */
#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4)
#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\
FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x)
#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\
FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x)
#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\
FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x)
#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\
FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x)
#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\
FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x)
#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\
FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x)
#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\
FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x)
#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\
FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x)
#define DEV_CLOCK_CFG_PORT_RST BIT(3)
#define DEV_CLOCK_CFG_PORT_RST_SET(x)\
FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x)
#define DEV_CLOCK_CFG_PORT_RST_GET(x)\
FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x)
#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0)
#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\
FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x)
#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\
FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x)
/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */
#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4)
#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\
FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x)
#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\
FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x)
#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\
FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x)
#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x)
/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */
#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4)
#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4)
#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */
#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4)
#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\
FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x)
#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\
FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x)
#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\
FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x)
#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\
FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x)
#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\
FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x)
#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\
FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x)
/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */
#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4)
#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16)
#define DEV_MAC_HDX_CFG_SEED_SET(x)\
FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x)
#define DEV_MAC_HDX_CFG_SEED_GET(x)\
FIELD_GET(DEV_MAC_HDX_CFG_SEED, x)
#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\
FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x)
#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\
FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x)
/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */
#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4)
/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */
#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4)
/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */
#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4)
#define DEV_PCS1G_CFG_PCS_ENA BIT(0)
#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\
FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x)
#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x)
/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4)
#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0)
#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\
FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x)
#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x)
/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4)
#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1)
#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\
FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\
FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
#define DEV_PCS1G_ANEG_CFG_ENA BIT(0)
#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\
FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x)
#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x)
/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4)
#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16)
#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\
FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\
FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4)
#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */
#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4)
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
/* DEVCPU_QS:XTR:XTR_GRP_CFG */
#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
#define QS_XTR_GRP_CFG_MODE_SET(x)\
FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
#define QS_XTR_GRP_CFG_MODE_GET(x)\
FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
/* DEVCPU_QS:XTR:XTR_RD */
#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
/* DEVCPU_QS:XTR:XTR_FLUSH */
#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
/* DEVCPU_QS:INJ:INJ_GRP_CFG */
#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
#define QS_INJ_GRP_CFG_MODE_SET(x)\
FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
#define QS_INJ_GRP_CFG_MODE_GET(x)\
FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
/* DEVCPU_QS:INJ:INJ_WR */
#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
/* DEVCPU_QS:INJ:INJ_CTRL */
#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
#define QS_INJ_CTRL_EOF BIT(19)
#define QS_INJ_CTRL_EOF_SET(x)\
FIELD_PREP(QS_INJ_CTRL_EOF, x)
#define QS_INJ_CTRL_EOF_GET(x)\
FIELD_GET(QS_INJ_CTRL_EOF, x)
#define QS_INJ_CTRL_SOF BIT(18)
#define QS_INJ_CTRL_SOF_SET(x)\
FIELD_PREP(QS_INJ_CTRL_SOF, x)
#define QS_INJ_CTRL_SOF_GET(x)\
FIELD_GET(QS_INJ_CTRL_SOF, x)
#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
/* DEVCPU_QS:INJ:INJ_STATUS */
#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
/* QSYS:SYSTEM:PORT_MODE */
#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4)
#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\
FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x)
#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\
FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x)
/* QSYS:SYSTEM:SWITCH_PORT_MODE */
#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4)
#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18)
#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\
FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x)
#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\
FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x)
#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14)
#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\
FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\
FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12)
#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4)
#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\
FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\
FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0)
#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\
FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x)
#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\
FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x)
/* QSYS:SYSTEM:SW_STATUS */
#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4)
#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0)
#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\
FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x)
#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\
FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x)
/* QSYS:SYSTEM:CPU_GROUP_MAP */
#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4)
/* QSYS:RES_CTRL:RES_CFG */
#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
/* REW:PORT:PORT_CFG */
#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4)
#define REW_PORT_CFG_NO_REWRITE BIT(0)
#define REW_PORT_CFG_NO_REWRITE_SET(x)\
FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x)
#define REW_PORT_CFG_NO_REWRITE_GET(x)\
FIELD_GET(REW_PORT_CFG_NO_REWRITE, x)
/* SYS:SYSTEM:RESET_CFG */
#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4)
#define SYS_RESET_CFG_CORE_ENA BIT(0)
#define SYS_RESET_CFG_CORE_ENA_SET(x)\
FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x)
#define SYS_RESET_CFG_CORE_ENA_GET(x)\
FIELD_GET(SYS_RESET_CFG_CORE_ENA, x)
/* SYS:SYSTEM:PORT_MODE */
#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4)
#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4)
#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\
FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x)
#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\
FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x)
#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2)
#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\
FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x)
#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\
FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x)
/* SYS:SYSTEM:FRONT_PORT_MODE */
#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4)
#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1)
#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\
FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x)
#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\
FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x)
/* SYS:SYSTEM:FRM_AGING */
#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4)
#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\
FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x)
#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\
FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x)
/* SYS:SYSTEM:STAT_CFG */
#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4)
#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0)
#define SYS_STAT_CFG_STAT_VIEW_SET(x)\
FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x)
#define SYS_STAT_CFG_STAT_VIEW_GET(x)\
FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x)
/* SYS:PAUSE_CFG:PAUSE_CFG */
#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4)
#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10)
#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\
FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x)
#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\
FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x)
#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1)
#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x)
#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x)
#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x)
#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x)
/* SYS:PAUSE_CFG:ATOP */
#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4)
/* SYS:PAUSE_CFG:ATOP_TOT_CFG */
#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4)
/* SYS:PAUSE_CFG:MAC_FC_CFG */
#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4)
#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26)
#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\
FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\
FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20)
#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\
FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\
FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\
FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\
FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\
FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x)
#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\
FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x)
#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\
FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x)
#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\
FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x)
#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0)
#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\
FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\
FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
/* SYS:STAT:CNT */
#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4)
/* SYS:RAM_CTRL:RAM_INIT */
#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4)
#define SYS_RAM_INIT_RAM_INIT BIT(1)
#define SYS_RAM_INIT_RAM_INIT_SET(x)\
FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x)
#define SYS_RAM_INIT_RAM_INIT_GET(x)\
FIELD_GET(SYS_RAM_INIT_RAM_INIT, x)
#endif /* _LAN966X_REGS_H_ */