blob: 60dde29974bfea53f8092fda078f348d36c6a6ef [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Florian Fainelli80105be2014-04-24 18:08:57 -07002/*
3 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 *
5 * Copyright (C) 2014 Broadcom Corporation
Florian Fainelli80105be2014-04-24 18:08:57 -07006 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/init.h>
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
Vladimir Olteanf46b9b8e2021-01-07 03:24:00 +020015#include <linux/dsa/brcm.h>
Florian Fainelli80105be2014-04-24 18:08:57 -070016#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/of.h>
19#include <linux/of_net.h>
20#include <linux/of_mdio.h>
21#include <linux/phy.h>
22#include <linux/phy_fixed.h>
Andrew Lunnc6e970a2017-03-28 23:45:06 +020023#include <net/dsa.h>
Florian Fainelli31bc72d2020-09-01 14:43:47 -070024#include <linux/clk.h>
Florian Fainelli80105be2014-04-24 18:08:57 -070025#include <net/ip.h>
26#include <net/ipv6.h>
27
28#include "bcmsysport.h"
29
30/* I/O accessors register helpers */
31#define BCM_SYSPORT_IO_MACRO(name, offset) \
32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
33{ \
Florian Fainellif1dd1992017-08-29 13:35:15 -070034 u32 reg = readl_relaxed(priv->base + offset + off); \
Florian Fainelli80105be2014-04-24 18:08:57 -070035 return reg; \
36} \
37static inline void name##_writel(struct bcm_sysport_priv *priv, \
38 u32 val, u32 off) \
39{ \
Florian Fainellif1dd1992017-08-29 13:35:15 -070040 writel_relaxed(val, priv->base + offset + off); \
Florian Fainelli80105be2014-04-24 18:08:57 -070041} \
42
43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
Florian Fainelli44a45242017-01-20 11:08:27 -080046BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
Florian Fainelli80105be2014-04-24 18:08:57 -070047BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
Florian Fainelli80105be2014-04-24 18:08:57 -070048BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53
Florian Fainelli44a45242017-01-20 11:08:27 -080054/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
55 * same layout, except it has been moved by 4 bytes up, *sigh*
56 */
57static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
58{
59 if (priv->is_lite && off >= RDMA_STATUS)
60 off += 4;
Florian Fainellif1dd1992017-08-29 13:35:15 -070061 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
Florian Fainelli44a45242017-01-20 11:08:27 -080062}
63
64static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
65{
66 if (priv->is_lite && off >= RDMA_STATUS)
67 off += 4;
Florian Fainellif1dd1992017-08-29 13:35:15 -070068 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
Florian Fainelli44a45242017-01-20 11:08:27 -080069}
70
71static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
72{
73 if (!priv->is_lite) {
74 return BIT(bit);
75 } else {
76 if (bit >= ACB_ALGO)
77 return BIT(bit + 1);
78 else
79 return BIT(bit);
80 }
81}
82
Florian Fainelli80105be2014-04-24 18:08:57 -070083/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
84 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
85 */
86#define BCM_SYSPORT_INTR_L2(which) \
87static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
88 u32 mask) \
89{ \
Florian Fainelli80105be2014-04-24 18:08:57 -070090 priv->irq##which##_mask &= ~(mask); \
Florian Fainelli9a0a5c42016-08-24 14:21:41 -070091 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
Florian Fainelli80105be2014-04-24 18:08:57 -070092} \
93static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
94 u32 mask) \
95{ \
96 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
97 priv->irq##which##_mask |= (mask); \
98} \
99
100BCM_SYSPORT_INTR_L2(0)
101BCM_SYSPORT_INTR_L2(1)
102
103/* Register accesses to GISB/RBUS registers are expensive (few hundred
104 * nanoseconds), so keep the check for 64-bits explicit here to save
105 * one register write per-packet on 32-bits platforms.
106 */
107static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
108 void __iomem *d,
109 dma_addr_t addr)
110{
111#ifdef CONFIG_PHYS_ADDR_T_64BIT
Florian Fainellif1dd1992017-08-29 13:35:15 -0700112 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700113 d + DESC_ADDR_HI_STATUS_LEN);
Florian Fainelli80105be2014-04-24 18:08:57 -0700114#endif
Florian Fainellif1dd1992017-08-29 13:35:15 -0700115 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
Florian Fainelli80105be2014-04-24 18:08:57 -0700116}
117
Florian Fainelli80105be2014-04-24 18:08:57 -0700118/* Ethtool operations */
Florian Fainelli10b476c2018-09-27 15:36:10 -0700119static void bcm_sysport_set_rx_csum(struct net_device *dev,
120 netdev_features_t wanted)
Florian Fainelli80105be2014-04-24 18:08:57 -0700121{
122 struct bcm_sysport_priv *priv = netdev_priv(dev);
123 u32 reg;
124
Florian Fainelli9d34c1cb2014-07-01 21:08:39 -0700125 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
Florian Fainelli80105be2014-04-24 18:08:57 -0700126 reg = rxchk_readl(priv, RXCHK_CONTROL);
Florian Fainellia40061e2019-02-15 12:16:51 -0800127 /* Clear L2 header checks, which would prevent BPDUs
128 * from being received.
129 */
130 reg &= ~RXCHK_L2_HDR_DIS;
Florian Fainelli9d34c1cb2014-07-01 21:08:39 -0700131 if (priv->rx_chk_en)
Florian Fainelli80105be2014-04-24 18:08:57 -0700132 reg |= RXCHK_EN;
133 else
134 reg &= ~RXCHK_EN;
135
136 /* If UniMAC forwards CRC, we need to skip over it to get
137 * a valid CHK bit to be set in the per-packet status word
138 */
Florian Fainelli9d34c1cb2014-07-01 21:08:39 -0700139 if (priv->rx_chk_en && priv->crc_fwd)
Florian Fainelli80105be2014-04-24 18:08:57 -0700140 reg |= RXCHK_SKIP_FCS;
141 else
142 reg &= ~RXCHK_SKIP_FCS;
143
Florian Fainellid09d3032014-08-28 15:11:03 -0700144 /* If Broadcom tags are enabled (e.g: using a switch), make
145 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
146 * tag after the Ethernet MAC Source Address.
147 */
148 if (netdev_uses_dsa(dev))
149 reg |= RXCHK_BRCM_TAG_EN;
150 else
151 reg &= ~RXCHK_BRCM_TAG_EN;
152
Florian Fainelli80105be2014-04-24 18:08:57 -0700153 rxchk_writel(priv, reg, RXCHK_CONTROL);
Florian Fainelli80105be2014-04-24 18:08:57 -0700154}
155
Florian Fainelli10b476c2018-09-27 15:36:10 -0700156static void bcm_sysport_set_tx_csum(struct net_device *dev,
157 netdev_features_t wanted)
Florian Fainelli80105be2014-04-24 18:08:57 -0700158{
159 struct bcm_sysport_priv *priv = netdev_priv(dev);
160 u32 reg;
161
162 /* Hardware transmit checksum requires us to enable the Transmit status
163 * block prepended to the packet contents
164 */
Florian Fainelli6e9fdb62020-07-06 14:29:39 -0700165 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
166 NETIF_F_HW_VLAN_CTAG_TX));
Florian Fainelli80105be2014-04-24 18:08:57 -0700167 reg = tdma_readl(priv, TDMA_CONTROL);
168 if (priv->tsb_en)
Florian Fainelli44a45242017-01-20 11:08:27 -0800169 reg |= tdma_control_bit(priv, TSB_EN);
Florian Fainelli80105be2014-04-24 18:08:57 -0700170 else
Florian Fainelli44a45242017-01-20 11:08:27 -0800171 reg &= ~tdma_control_bit(priv, TSB_EN);
Florian Fainelli6e9fdb62020-07-06 14:29:39 -0700172 /* Indicating that software inserts Broadcom tags is needed for the TX
173 * checksum to be computed correctly when using VLAN HW acceleration,
174 * else it has no effect, so it can always be turned on.
175 */
176 if (netdev_uses_dsa(dev))
177 reg |= tdma_control_bit(priv, SW_BRCM_TAG);
178 else
179 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
Florian Fainelli80105be2014-04-24 18:08:57 -0700180 tdma_writel(priv, reg, TDMA_CONTROL);
Florian Fainelli6e9fdb62020-07-06 14:29:39 -0700181
182 /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
183 if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
184 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
Florian Fainelli80105be2014-04-24 18:08:57 -0700185}
186
187static int bcm_sysport_set_features(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700188 netdev_features_t features)
Florian Fainelli80105be2014-04-24 18:08:57 -0700189{
Florian Fainelli10b476c2018-09-27 15:36:10 -0700190 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli31bc72d2020-09-01 14:43:47 -0700191 int ret;
192
193 ret = clk_prepare_enable(priv->clk);
194 if (ret)
195 return ret;
Florian Fainelli80105be2014-04-24 18:08:57 -0700196
Florian Fainelli10b476c2018-09-27 15:36:10 -0700197 /* Read CRC forward */
198 if (!priv->is_lite)
199 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
200 else
201 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
202 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
Florian Fainelli80105be2014-04-24 18:08:57 -0700203
Florian Fainelli10b476c2018-09-27 15:36:10 -0700204 bcm_sysport_set_rx_csum(dev, features);
205 bcm_sysport_set_tx_csum(dev, features);
206
Florian Fainelli31bc72d2020-09-01 14:43:47 -0700207 clk_disable_unprepare(priv->clk);
208
Florian Fainelli10b476c2018-09-27 15:36:10 -0700209 return 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700210}
211
212/* Hardware counters must be kept in sync because the order/offset
213 * is important here (order in structure declaration = order in hardware)
214 */
215static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
216 /* general stats */
kiki good10377ba2017-08-04 00:07:45 +0100217 STAT_NETDEV64(rx_packets),
218 STAT_NETDEV64(tx_packets),
219 STAT_NETDEV64(rx_bytes),
220 STAT_NETDEV64(tx_bytes),
Florian Fainelli80105be2014-04-24 18:08:57 -0700221 STAT_NETDEV(rx_errors),
222 STAT_NETDEV(tx_errors),
223 STAT_NETDEV(rx_dropped),
224 STAT_NETDEV(tx_dropped),
225 STAT_NETDEV(multicast),
226 /* UniMAC RSV counters */
227 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
228 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
229 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
230 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
231 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
232 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
233 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
234 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
235 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
236 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
237 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
238 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
239 STAT_MIB_RX("rx_multicast", mib.rx.mca),
240 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
241 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
242 STAT_MIB_RX("rx_control", mib.rx.cf),
243 STAT_MIB_RX("rx_pause", mib.rx.pf),
244 STAT_MIB_RX("rx_unknown", mib.rx.uo),
245 STAT_MIB_RX("rx_align", mib.rx.aln),
246 STAT_MIB_RX("rx_outrange", mib.rx.flr),
247 STAT_MIB_RX("rx_code", mib.rx.cde),
248 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
249 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
250 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
251 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
252 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
253 STAT_MIB_RX("rx_unicast", mib.rx.uc),
254 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
255 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
256 /* UniMAC TSV counters */
257 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
258 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
259 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
260 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
261 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
262 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
263 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
264 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
265 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
266 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
267 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
268 STAT_MIB_TX("tx_multicast", mib.tx.mca),
269 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
270 STAT_MIB_TX("tx_pause", mib.tx.pf),
271 STAT_MIB_TX("tx_control", mib.tx.cf),
272 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
273 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
274 STAT_MIB_TX("tx_defer", mib.tx.drf),
275 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
276 STAT_MIB_TX("tx_single_col", mib.tx.scl),
277 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
278 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
279 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
280 STAT_MIB_TX("tx_frags", mib.tx.frg),
281 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
282 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
283 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
284 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
285 STAT_MIB_TX("tx_unicast", mib.tx.uc),
286 /* UniMAC RUNT counters */
287 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
288 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
289 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
290 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
291 /* RXCHK misc statistics */
292 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
293 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700294 RXCHK_OTHER_DISC_CNTR),
Florian Fainelli80105be2014-04-24 18:08:57 -0700295 /* RBUF misc statistics */
296 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
297 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
Florian Fainelli55ff4ea2015-02-28 18:09:17 -0800298 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
299 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
300 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
Florian Fainellia5d78ce2018-09-27 15:36:14 -0700301 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
302 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
Florian Fainelli30defeb2017-03-23 10:36:46 -0700303 /* Per TX-queue statistics are dynamically appended */
Florian Fainelli80105be2014-04-24 18:08:57 -0700304};
305
306#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
307
308static void bcm_sysport_get_drvinfo(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700309 struct ethtool_drvinfo *info)
Florian Fainelli80105be2014-04-24 18:08:57 -0700310{
311 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Florian Fainelli80105be2014-04-24 18:08:57 -0700312 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
Florian Fainelli80105be2014-04-24 18:08:57 -0700313}
314
315static u32 bcm_sysport_get_msglvl(struct net_device *dev)
316{
317 struct bcm_sysport_priv *priv = netdev_priv(dev);
318
319 return priv->msg_enable;
320}
321
322static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
323{
324 struct bcm_sysport_priv *priv = netdev_priv(dev);
325
326 priv->msg_enable = enable;
327}
328
Florian Fainelli44a45242017-01-20 11:08:27 -0800329static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
330{
331 switch (type) {
332 case BCM_SYSPORT_STAT_NETDEV:
kiki good10377ba2017-08-04 00:07:45 +0100333 case BCM_SYSPORT_STAT_NETDEV64:
Florian Fainelli44a45242017-01-20 11:08:27 -0800334 case BCM_SYSPORT_STAT_RXCHK:
335 case BCM_SYSPORT_STAT_RBUF:
336 case BCM_SYSPORT_STAT_SOFT:
337 return true;
338 default:
339 return false;
340 }
341}
342
Florian Fainelli80105be2014-04-24 18:08:57 -0700343static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
344{
Florian Fainelli44a45242017-01-20 11:08:27 -0800345 struct bcm_sysport_priv *priv = netdev_priv(dev);
346 const struct bcm_sysport_stats *s;
347 unsigned int i, j;
348
Florian Fainelli80105be2014-04-24 18:08:57 -0700349 switch (string_set) {
350 case ETH_SS_STATS:
Florian Fainelli44a45242017-01-20 11:08:27 -0800351 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
352 s = &bcm_sysport_gstrings_stats[i];
353 if (priv->is_lite &&
354 !bcm_sysport_lite_stat_valid(s->type))
355 continue;
356 j++;
357 }
Florian Fainelli30defeb2017-03-23 10:36:46 -0700358 /* Include per-queue statistics */
359 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
Florian Fainelli80105be2014-04-24 18:08:57 -0700360 default:
361 return -EOPNOTSUPP;
362 }
363}
364
365static void bcm_sysport_get_strings(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700366 u32 stringset, u8 *data)
Florian Fainelli80105be2014-04-24 18:08:57 -0700367{
Florian Fainelli44a45242017-01-20 11:08:27 -0800368 struct bcm_sysport_priv *priv = netdev_priv(dev);
369 const struct bcm_sysport_stats *s;
Florian Fainelli30defeb2017-03-23 10:36:46 -0700370 char buf[128];
Florian Fainelli44a45242017-01-20 11:08:27 -0800371 int i, j;
Florian Fainelli80105be2014-04-24 18:08:57 -0700372
373 switch (stringset) {
374 case ETH_SS_STATS:
Florian Fainelli44a45242017-01-20 11:08:27 -0800375 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
376 s = &bcm_sysport_gstrings_stats[i];
377 if (priv->is_lite &&
378 !bcm_sysport_lite_stat_valid(s->type))
379 continue;
380
381 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700382 ETH_GSTRING_LEN);
Florian Fainelli44a45242017-01-20 11:08:27 -0800383 j++;
Florian Fainelli80105be2014-04-24 18:08:57 -0700384 }
Florian Fainelli30defeb2017-03-23 10:36:46 -0700385
386 for (i = 0; i < dev->num_tx_queues; i++) {
387 snprintf(buf, sizeof(buf), "txq%d_packets", i);
388 memcpy(data + j * ETH_GSTRING_LEN, buf,
389 ETH_GSTRING_LEN);
390 j++;
391
392 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
393 memcpy(data + j * ETH_GSTRING_LEN, buf,
394 ETH_GSTRING_LEN);
395 j++;
396 }
Florian Fainelli80105be2014-04-24 18:08:57 -0700397 break;
398 default:
399 break;
400 }
401}
402
403static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
404{
405 int i, j = 0;
406
407 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
408 const struct bcm_sysport_stats *s;
409 u8 offset = 0;
410 u32 val = 0;
411 char *p;
412
413 s = &bcm_sysport_gstrings_stats[i];
414 switch (s->type) {
415 case BCM_SYSPORT_STAT_NETDEV:
kiki good10377ba2017-08-04 00:07:45 +0100416 case BCM_SYSPORT_STAT_NETDEV64:
Florian Fainelli55ff4ea2015-02-28 18:09:17 -0800417 case BCM_SYSPORT_STAT_SOFT:
Florian Fainelli80105be2014-04-24 18:08:57 -0700418 continue;
419 case BCM_SYSPORT_STAT_MIB_RX:
420 case BCM_SYSPORT_STAT_MIB_TX:
421 case BCM_SYSPORT_STAT_RUNT:
Florian Fainelli44a45242017-01-20 11:08:27 -0800422 if (priv->is_lite)
423 continue;
424
Florian Fainelli80105be2014-04-24 18:08:57 -0700425 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
426 offset = UMAC_MIB_STAT_OFFSET;
427 val = umac_readl(priv, UMAC_MIB_START + j + offset);
428 break;
429 case BCM_SYSPORT_STAT_RXCHK:
430 val = rxchk_readl(priv, s->reg_offset);
431 if (val == ~0)
432 rxchk_writel(priv, 0, s->reg_offset);
433 break;
434 case BCM_SYSPORT_STAT_RBUF:
435 val = rbuf_readl(priv, s->reg_offset);
436 if (val == ~0)
437 rbuf_writel(priv, 0, s->reg_offset);
438 break;
439 }
440
441 j += s->stat_sizeof;
442 p = (char *)priv + s->stat_offset;
443 *(u32 *)p = val;
444 }
445
446 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
447}
448
Florian Fainelli8ecb1a22017-09-18 16:31:30 -0700449static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
450 u64 *tx_bytes, u64 *tx_packets)
451{
452 struct bcm_sysport_tx_ring *ring;
453 u64 bytes = 0, packets = 0;
454 unsigned int start;
455 unsigned int q;
456
457 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
458 ring = &priv->tx_rings[q];
459 do {
460 start = u64_stats_fetch_begin_irq(&priv->syncp);
461 bytes = ring->bytes;
462 packets = ring->packets;
463 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
464
465 *tx_bytes += bytes;
466 *tx_packets += packets;
467 }
468}
469
Florian Fainelli80105be2014-04-24 18:08:57 -0700470static void bcm_sysport_get_stats(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700471 struct ethtool_stats *stats, u64 *data)
Florian Fainelli80105be2014-04-24 18:08:57 -0700472{
473 struct bcm_sysport_priv *priv = netdev_priv(dev);
kiki good10377ba2017-08-04 00:07:45 +0100474 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
475 struct u64_stats_sync *syncp = &priv->syncp;
Florian Fainelli30defeb2017-03-23 10:36:46 -0700476 struct bcm_sysport_tx_ring *ring;
Florian Fainelli8ecb1a22017-09-18 16:31:30 -0700477 u64 tx_bytes = 0, tx_packets = 0;
kiki good10377ba2017-08-04 00:07:45 +0100478 unsigned int start;
Florian Fainelli44a45242017-01-20 11:08:27 -0800479 int i, j;
Florian Fainelli80105be2014-04-24 18:08:57 -0700480
Florian Fainelli8ecb1a22017-09-18 16:31:30 -0700481 if (netif_running(dev)) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700482 bcm_sysport_update_mib_counters(priv);
Florian Fainelli8ecb1a22017-09-18 16:31:30 -0700483 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
484 stats64->tx_bytes = tx_bytes;
485 stats64->tx_packets = tx_packets;
486 }
Florian Fainelli80105be2014-04-24 18:08:57 -0700487
Florian Fainelli44a45242017-01-20 11:08:27 -0800488 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700489 const struct bcm_sysport_stats *s;
490 char *p;
491
492 s = &bcm_sysport_gstrings_stats[i];
493 if (s->type == BCM_SYSPORT_STAT_NETDEV)
494 p = (char *)&dev->stats;
kiki good10377ba2017-08-04 00:07:45 +0100495 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
496 p = (char *)stats64;
Florian Fainelli80105be2014-04-24 18:08:57 -0700497 else
498 p = (char *)priv;
kiki good10377ba2017-08-04 00:07:45 +0100499
Florian Fainelli50ddfba2017-08-08 14:45:09 -0700500 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
501 continue;
Florian Fainelli80105be2014-04-24 18:08:57 -0700502 p += s->stat_offset;
kiki good10377ba2017-08-04 00:07:45 +0100503
Florian Fainelli8ecb1a22017-09-18 16:31:30 -0700504 if (s->stat_sizeof == sizeof(u64) &&
505 s->type == BCM_SYSPORT_STAT_NETDEV64) {
kiki good10377ba2017-08-04 00:07:45 +0100506 do {
507 start = u64_stats_fetch_begin_irq(syncp);
508 data[i] = *(u64 *)p;
509 } while (u64_stats_fetch_retry_irq(syncp, start));
Florian Fainelli8ecb1a22017-09-18 16:31:30 -0700510 } else
kiki good10377ba2017-08-04 00:07:45 +0100511 data[i] = *(u32 *)p;
Florian Fainelli44a45242017-01-20 11:08:27 -0800512 j++;
Florian Fainelli80105be2014-04-24 18:08:57 -0700513 }
Florian Fainelli30defeb2017-03-23 10:36:46 -0700514
515 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
516 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
517 * needs to point to how many total statistics we have minus the
518 * number of per TX queue statistics
519 */
520 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
521 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
522
523 for (i = 0; i < dev->num_tx_queues; i++) {
524 ring = &priv->tx_rings[i];
525 data[j] = ring->packets;
526 j++;
527 data[j] = ring->bytes;
528 j++;
529 }
Florian Fainelli80105be2014-04-24 18:08:57 -0700530}
531
Florian Fainelli83e82f42014-07-01 21:08:40 -0700532static void bcm_sysport_get_wol(struct net_device *dev,
533 struct ethtool_wolinfo *wol)
534{
535 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli83e82f42014-07-01 21:08:40 -0700536
Florian Fainellibb9051a22018-08-07 10:50:23 -0700537 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
Florian Fainelli83e82f42014-07-01 21:08:40 -0700538 wol->wolopts = priv->wolopts;
539
540 if (!(priv->wolopts & WAKE_MAGICSECURE))
541 return;
542
Florian Fainelli8dfb8d22019-02-01 13:23:38 -0800543 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
Florian Fainelli83e82f42014-07-01 21:08:40 -0700544}
545
546static int bcm_sysport_set_wol(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700547 struct ethtool_wolinfo *wol)
Florian Fainelli83e82f42014-07-01 21:08:40 -0700548{
549 struct bcm_sysport_priv *priv = netdev_priv(dev);
550 struct device *kdev = &priv->pdev->dev;
Florian Fainellibb9051a22018-08-07 10:50:23 -0700551 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
Florian Fainelli83e82f42014-07-01 21:08:40 -0700552
553 if (!device_can_wakeup(kdev))
554 return -ENOTSUPP;
555
556 if (wol->wolopts & ~supported)
557 return -EINVAL;
558
Florian Fainelli8dfb8d22019-02-01 13:23:38 -0800559 if (wol->wolopts & WAKE_MAGICSECURE)
560 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
Florian Fainelli83e82f42014-07-01 21:08:40 -0700561
562 /* Flag the device and relevant IRQ as wakeup capable */
563 if (wol->wolopts) {
564 device_set_wakeup_enable(kdev, 1);
Florian Fainelli61b423a2014-10-10 10:51:54 -0700565 if (priv->wol_irq_disabled)
566 enable_irq_wake(priv->wol_irq);
Florian Fainelli83e82f42014-07-01 21:08:40 -0700567 priv->wol_irq_disabled = 0;
568 } else {
569 device_set_wakeup_enable(kdev, 0);
570 /* Avoid unbalanced disable_irq_wake calls */
571 if (!priv->wol_irq_disabled)
572 disable_irq_wake(priv->wol_irq);
573 priv->wol_irq_disabled = 1;
574 }
575
576 priv->wolopts = wol->wolopts;
577
578 return 0;
579}
580
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700581static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
582 u32 usecs, u32 pkts)
Florian Fainellib6e0e872018-03-22 18:19:32 -0700583{
584 u32 reg;
585
586 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
587 reg &= ~(RDMA_INTR_THRESH_MASK |
588 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700589 reg |= pkts;
590 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
Florian Fainellib6e0e872018-03-22 18:19:32 -0700591 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
592}
593
Florian Fainellifd41f2b2018-03-28 15:15:36 -0700594static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
595 struct ethtool_coalesce *ec)
Florian Fainellib6e0e872018-03-22 18:19:32 -0700596{
597 struct bcm_sysport_priv *priv = ring->priv;
598 u32 reg;
599
600 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
601 reg &= ~(RING_INTR_THRESH_MASK |
602 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
Florian Fainellifd41f2b2018-03-28 15:15:36 -0700603 reg |= ec->tx_max_coalesced_frames;
604 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
Florian Fainellib6e0e872018-03-22 18:19:32 -0700605 RING_TIMEOUT_SHIFT;
606 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
607}
608
Florian Fainellib1a15e82015-05-11 15:12:41 -0700609static int bcm_sysport_get_coalesce(struct net_device *dev,
Yufeng Mof3ccfda12021-08-20 15:35:18 +0800610 struct ethtool_coalesce *ec,
611 struct kernel_ethtool_coalesce *kernel_coal,
612 struct netlink_ext_ack *extack)
Florian Fainellib1a15e82015-05-11 15:12:41 -0700613{
614 struct bcm_sysport_priv *priv = netdev_priv(dev);
615 u32 reg;
616
617 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
618
619 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
620 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
621
Florian Fainellid0634862015-05-11 15:12:42 -0700622 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
623
624 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
625 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
Florian Fainellib6e0e872018-03-22 18:19:32 -0700626 ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
Florian Fainellid0634862015-05-11 15:12:42 -0700627
Florian Fainellib1a15e82015-05-11 15:12:41 -0700628 return 0;
629}
630
631static int bcm_sysport_set_coalesce(struct net_device *dev,
Yufeng Mof3ccfda12021-08-20 15:35:18 +0800632 struct ethtool_coalesce *ec,
633 struct kernel_ethtool_coalesce *kernel_coal,
634 struct netlink_ext_ack *extack)
Florian Fainellib1a15e82015-05-11 15:12:41 -0700635{
636 struct bcm_sysport_priv *priv = netdev_priv(dev);
Tal Gilboa8960b382019-01-31 16:44:48 +0200637 struct dim_cq_moder moder;
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700638 u32 usecs, pkts;
Florian Fainellib1a15e82015-05-11 15:12:41 -0700639 unsigned int i;
Florian Fainellib1a15e82015-05-11 15:12:41 -0700640
Florian Fainellid0634862015-05-11 15:12:42 -0700641 /* Base system clock is 125Mhz, DMA timeout is this reference clock
642 * divided by 1024, which yield roughly 8.192 us, our maximum value has
643 * to fit in the RING_TIMEOUT_MASK (16 bits).
Florian Fainellib1a15e82015-05-11 15:12:41 -0700644 */
645 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
Florian Fainellid0634862015-05-11 15:12:42 -0700646 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
647 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
648 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
Florian Fainellib1a15e82015-05-11 15:12:41 -0700649 return -EINVAL;
650
Florian Fainellid0634862015-05-11 15:12:42 -0700651 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
Jakub Kicinskif4a76615f2020-03-09 19:15:00 -0700652 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
Florian Fainellib1a15e82015-05-11 15:12:41 -0700653 return -EINVAL;
654
Florian Fainellifd41f2b2018-03-28 15:15:36 -0700655 for (i = 0; i < dev->num_tx_queues; i++)
656 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
Florian Fainellib1a15e82015-05-11 15:12:41 -0700657
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700658 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
659 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
660 usecs = priv->rx_coalesce_usecs;
661 pkts = priv->rx_max_coalesced_frames;
Florian Fainellib6e0e872018-03-22 18:19:32 -0700662
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700663 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
Tal Gilboa026a8072018-04-24 13:36:01 +0300664 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700665 usecs = moder.usec;
666 pkts = moder.pkts;
Florian Fainellib6e0e872018-03-22 18:19:32 -0700667 }
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700668
Florian Fainellib6e0e872018-03-22 18:19:32 -0700669 priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -0700670
671 /* Apply desired coalescing parameters */
672 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
Florian Fainellid0634862015-05-11 15:12:42 -0700673
Florian Fainellib1a15e82015-05-11 15:12:41 -0700674 return 0;
675}
676
Florian Fainelli80105be2014-04-24 18:08:57 -0700677static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
678{
Florian Fainellic45182e2017-08-24 15:20:41 -0700679 dev_consume_skb_any(cb->skb);
Florian Fainelli80105be2014-04-24 18:08:57 -0700680 cb->skb = NULL;
681 dma_unmap_addr_set(cb, dma_addr, 0);
682}
683
Florian Fainellic73b0182015-05-28 15:24:43 -0700684static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
685 struct bcm_sysport_cb *cb)
Florian Fainelli80105be2014-04-24 18:08:57 -0700686{
687 struct device *kdev = &priv->pdev->dev;
688 struct net_device *ndev = priv->netdev;
Florian Fainellic73b0182015-05-28 15:24:43 -0700689 struct sk_buff *skb, *rx_skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700690 dma_addr_t mapping;
Florian Fainelli80105be2014-04-24 18:08:57 -0700691
Florian Fainellic73b0182015-05-28 15:24:43 -0700692 /* Allocate a new SKB for a new packet */
Doug Berger3554e542020-04-23 16:13:30 -0700693 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
694 GFP_ATOMIC | __GFP_NOWARN);
Florian Fainellic73b0182015-05-28 15:24:43 -0700695 if (!skb) {
696 priv->mib.alloc_rx_buff_failed++;
Florian Fainelli80105be2014-04-24 18:08:57 -0700697 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
Florian Fainellic73b0182015-05-28 15:24:43 -0700698 return NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -0700699 }
700
Florian Fainellic73b0182015-05-28 15:24:43 -0700701 mapping = dma_map_single(kdev, skb->data,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700702 RX_BUF_LENGTH, DMA_FROM_DEVICE);
Florian Fainellic73b0182015-05-28 15:24:43 -0700703 if (dma_mapping_error(kdev, mapping)) {
Florian Fainelli60b4ea12014-11-19 10:29:55 -0800704 priv->mib.rx_dma_failed++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700705 dev_kfree_skb_any(skb);
Florian Fainelli80105be2014-04-24 18:08:57 -0700706 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
Florian Fainellic73b0182015-05-28 15:24:43 -0700707 return NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -0700708 }
709
Florian Fainellic73b0182015-05-28 15:24:43 -0700710 /* Grab the current SKB on the ring */
711 rx_skb = cb->skb;
712 if (likely(rx_skb))
713 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
714 RX_BUF_LENGTH, DMA_FROM_DEVICE);
715
716 /* Put the new SKB on the ring */
717 cb->skb = skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700718 dma_unmap_addr_set(cb, dma_addr, mapping);
Florian Fainellibaf387a2015-05-28 15:24:42 -0700719 dma_desc_set_addr(priv, cb->bd_addr, mapping);
Florian Fainelli80105be2014-04-24 18:08:57 -0700720
721 netif_dbg(priv, rx_status, ndev, "RX refill\n");
722
Florian Fainellic73b0182015-05-28 15:24:43 -0700723 /* Return the current SKB to the caller */
724 return rx_skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700725}
726
727static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
728{
729 struct bcm_sysport_cb *cb;
Florian Fainellic73b0182015-05-28 15:24:43 -0700730 struct sk_buff *skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700731 unsigned int i;
732
733 for (i = 0; i < priv->num_rx_bds; i++) {
Florian Fainellibaf387a2015-05-28 15:24:42 -0700734 cb = &priv->rx_cbs[i];
Florian Fainellic73b0182015-05-28 15:24:43 -0700735 skb = bcm_sysport_rx_refill(priv, cb);
Markus Elfring399e06a2019-08-22 20:02:56 +0200736 dev_kfree_skb(skb);
Florian Fainellic73b0182015-05-28 15:24:43 -0700737 if (!cb->skb)
738 return -ENOMEM;
Florian Fainelli80105be2014-04-24 18:08:57 -0700739 }
740
Florian Fainellic73b0182015-05-28 15:24:43 -0700741 return 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700742}
743
744/* Poll the hardware for up to budget packets to process */
745static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
746 unsigned int budget)
747{
kiki good10377ba2017-08-04 00:07:45 +0100748 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
Florian Fainelli80105be2014-04-24 18:08:57 -0700749 struct net_device *ndev = priv->netdev;
750 unsigned int processed = 0, to_process;
Florian Fainellib6e0e872018-03-22 18:19:32 -0700751 unsigned int processed_bytes = 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700752 struct bcm_sysport_cb *cb;
753 struct sk_buff *skb;
754 unsigned int p_index;
755 u16 len, status;
Paul Gortmaker3afc5572014-05-30 15:39:30 -0400756 struct bcm_rsb *rsb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700757
Florian Fainelli6baa785a2017-03-23 10:36:47 -0700758 /* Clear status before servicing to reduce spurious interrupts */
759 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
760
Florian Fainelli44a45242017-01-20 11:08:27 -0800761 /* Determine how much we should process since last call, SYSTEMPORT Lite
762 * groups the producer and consumer indexes into the same 32-bit
763 * which we access using RDMA_CONS_INDEX
764 */
765 if (!priv->is_lite)
766 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
767 else
768 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
Florian Fainelli80105be2014-04-24 18:08:57 -0700769 p_index &= RDMA_PROD_INDEX_MASK;
770
Florian Fainellie9d7af72017-03-23 10:36:48 -0700771 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
Florian Fainelli80105be2014-04-24 18:08:57 -0700772
773 netif_dbg(priv, rx_status, ndev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700774 "p_index=%d rx_c_index=%d to_process=%d\n",
775 p_index, priv->rx_c_index, to_process);
Florian Fainelli80105be2014-04-24 18:08:57 -0700776
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700777 while ((processed < to_process) && (processed < budget)) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700778 cb = &priv->rx_cbs[priv->rx_read_ptr];
Florian Fainellic73b0182015-05-28 15:24:43 -0700779 skb = bcm_sysport_rx_refill(priv, cb);
Florian Fainellife24ba02014-09-08 11:37:51 -0700780
Florian Fainellife24ba02014-09-08 11:37:51 -0700781
782 /* We do not have a backing SKB, so we do not a corresponding
783 * DMA mapping for this incoming packet since
784 * bcm_sysport_rx_refill always either has both skb and mapping
785 * or none.
786 */
787 if (unlikely(!skb)) {
788 netif_err(priv, rx_err, ndev, "out of memory!\n");
789 ndev->stats.rx_dropped++;
790 ndev->stats.rx_errors++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700791 goto next;
Florian Fainellife24ba02014-09-08 11:37:51 -0700792 }
793
Florian Fainelli80105be2014-04-24 18:08:57 -0700794 /* Extract the Receive Status Block prepended */
Paul Gortmaker3afc5572014-05-30 15:39:30 -0400795 rsb = (struct bcm_rsb *)skb->data;
Florian Fainelli80105be2014-04-24 18:08:57 -0700796 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
797 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700798 DESC_STATUS_MASK;
Florian Fainelli80105be2014-04-24 18:08:57 -0700799
Florian Fainelli80105be2014-04-24 18:08:57 -0700800 netif_dbg(priv, rx_status, ndev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700801 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
802 p_index, priv->rx_c_index, priv->rx_read_ptr,
803 len, status);
Florian Fainelli80105be2014-04-24 18:08:57 -0700804
Florian Fainelli25977ac2015-05-28 15:24:44 -0700805 if (unlikely(len > RX_BUF_LENGTH)) {
806 netif_err(priv, rx_status, ndev, "oversized packet\n");
807 ndev->stats.rx_length_errors++;
808 ndev->stats.rx_errors++;
809 dev_kfree_skb_any(skb);
810 goto next;
811 }
812
Florian Fainelli80105be2014-04-24 18:08:57 -0700813 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
814 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
815 ndev->stats.rx_dropped++;
816 ndev->stats.rx_errors++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700817 dev_kfree_skb_any(skb);
818 goto next;
Florian Fainelli80105be2014-04-24 18:08:57 -0700819 }
820
821 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
822 netif_err(priv, rx_err, ndev, "error packet\n");
Florian Fainelliad51c612014-06-05 10:22:16 -0700823 if (status & RX_STATUS_OVFLOW)
Florian Fainelli80105be2014-04-24 18:08:57 -0700824 ndev->stats.rx_over_errors++;
825 ndev->stats.rx_dropped++;
826 ndev->stats.rx_errors++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700827 dev_kfree_skb_any(skb);
828 goto next;
Florian Fainelli80105be2014-04-24 18:08:57 -0700829 }
830
831 skb_put(skb, len);
832
833 /* Hardware validated our checksum */
834 if (likely(status & DESC_L4_CSUM))
835 skb->ip_summed = CHECKSUM_UNNECESSARY;
836
Florian Fainellie0ea05d2014-06-05 10:22:17 -0700837 /* Hardware pre-pends packets with 2bytes before Ethernet
838 * header plus we have the Receive Status Block, strip off all
839 * of this from the SKB.
Florian Fainelli80105be2014-04-24 18:08:57 -0700840 */
841 skb_pull(skb, sizeof(*rsb) + 2);
842 len -= (sizeof(*rsb) + 2);
Florian Fainellib6e0e872018-03-22 18:19:32 -0700843 processed_bytes += len;
Florian Fainelli80105be2014-04-24 18:08:57 -0700844
845 /* UniMAC may forward CRC */
846 if (priv->crc_fwd) {
847 skb_trim(skb, len - ETH_FCS_LEN);
848 len -= ETH_FCS_LEN;
849 }
850
851 skb->protocol = eth_type_trans(skb, ndev);
852 ndev->stats.rx_packets++;
853 ndev->stats.rx_bytes += len;
kiki good10377ba2017-08-04 00:07:45 +0100854 u64_stats_update_begin(&priv->syncp);
855 stats64->rx_packets++;
856 stats64->rx_bytes += len;
857 u64_stats_update_end(&priv->syncp);
Florian Fainelli80105be2014-04-24 18:08:57 -0700858
859 napi_gro_receive(&priv->napi, skb);
Florian Fainellic73b0182015-05-28 15:24:43 -0700860next:
861 processed++;
862 priv->rx_read_ptr++;
863
864 if (priv->rx_read_ptr == priv->num_rx_bds)
865 priv->rx_read_ptr = 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700866 }
867
Florian Fainellib6e0e872018-03-22 18:19:32 -0700868 priv->dim.packets = processed;
869 priv->dim.bytes = processed_bytes;
870
Florian Fainelli80105be2014-04-24 18:08:57 -0700871 return processed;
872}
873
Florian Fainelli30defeb2017-03-23 10:36:46 -0700874static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700875 struct bcm_sysport_cb *cb,
876 unsigned int *bytes_compl,
877 unsigned int *pkts_compl)
Florian Fainelli80105be2014-04-24 18:08:57 -0700878{
Florian Fainelli30defeb2017-03-23 10:36:46 -0700879 struct bcm_sysport_priv *priv = ring->priv;
Florian Fainelli80105be2014-04-24 18:08:57 -0700880 struct device *kdev = &priv->pdev->dev;
Florian Fainelli80105be2014-04-24 18:08:57 -0700881
882 if (cb->skb) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700883 *bytes_compl += cb->skb->len;
884 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700885 dma_unmap_len(cb, dma_len),
886 DMA_TO_DEVICE);
Florian Fainelli80105be2014-04-24 18:08:57 -0700887 (*pkts_compl)++;
888 bcm_sysport_free_cb(cb);
889 /* SKB fragment */
890 } else if (dma_unmap_addr(cb, dma_addr)) {
kiki good10377ba2017-08-04 00:07:45 +0100891 *bytes_compl += dma_unmap_len(cb, dma_len);
Florian Fainelli80105be2014-04-24 18:08:57 -0700892 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700893 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
Florian Fainelli80105be2014-04-24 18:08:57 -0700894 dma_unmap_addr_set(cb, dma_addr, 0);
895 }
896}
897
898/* Reclaim queued SKBs for transmission completion, lockless version */
899static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
900 struct bcm_sysport_tx_ring *ring)
901{
Florian Fainelli80105be2014-04-24 18:08:57 -0700902 unsigned int pkts_compl = 0, bytes_compl = 0;
kiki good10377ba2017-08-04 00:07:45 +0100903 struct net_device *ndev = priv->netdev;
Florian Fainelli484d8022018-03-13 14:45:07 -0700904 unsigned int txbds_processed = 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700905 struct bcm_sysport_cb *cb;
Florian Fainelli484d8022018-03-13 14:45:07 -0700906 unsigned int txbds_ready;
907 unsigned int c_index;
Florian Fainelli80105be2014-04-24 18:08:57 -0700908 u32 hw_ind;
909
Florian Fainelli6baa785a2017-03-23 10:36:47 -0700910 /* Clear status before servicing to reduce spurious interrupts */
911 if (!ring->priv->is_lite)
912 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
913 else
914 intrl2_0_writel(ring->priv, BIT(ring->index +
915 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
916
Florian Fainelli80105be2014-04-24 18:08:57 -0700917 /* Compute how many descriptors have been processed since last call */
918 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
919 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
Florian Fainelli484d8022018-03-13 14:45:07 -0700920 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
Florian Fainelli80105be2014-04-24 18:08:57 -0700921
922 netif_dbg(priv, tx_done, ndev,
Florian Fainelli484d8022018-03-13 14:45:07 -0700923 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
924 ring->index, ring->c_index, c_index, txbds_ready);
Florian Fainelli80105be2014-04-24 18:08:57 -0700925
Florian Fainelli484d8022018-03-13 14:45:07 -0700926 while (txbds_processed < txbds_ready) {
927 cb = &ring->cbs[ring->clean_index];
Florian Fainelli30defeb2017-03-23 10:36:46 -0700928 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
Florian Fainelli80105be2014-04-24 18:08:57 -0700929
930 ring->desc_count++;
Florian Fainelli484d8022018-03-13 14:45:07 -0700931 txbds_processed++;
932
933 if (likely(ring->clean_index < ring->size - 1))
934 ring->clean_index++;
935 else
936 ring->clean_index = 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700937 }
938
kiki good10377ba2017-08-04 00:07:45 +0100939 u64_stats_update_begin(&priv->syncp);
940 ring->packets += pkts_compl;
941 ring->bytes += bytes_compl;
942 u64_stats_update_end(&priv->syncp);
943
Florian Fainelli80105be2014-04-24 18:08:57 -0700944 ring->c_index = c_index;
945
Florian Fainelli80105be2014-04-24 18:08:57 -0700946 netif_dbg(priv, tx_done, ndev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700947 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
948 ring->index, ring->c_index, pkts_compl, bytes_compl);
Florian Fainelli80105be2014-04-24 18:08:57 -0700949
950 return pkts_compl;
951}
952
953/* Locked version of the per-ring TX reclaim routine */
954static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
955 struct bcm_sysport_tx_ring *ring)
956{
Florian Fainelli148d3d02017-01-12 12:09:09 -0800957 struct netdev_queue *txq;
Florian Fainelli80105be2014-04-24 18:08:57 -0700958 unsigned int released;
Florian Fainellid8498082014-06-05 10:22:15 -0700959 unsigned long flags;
Florian Fainelli80105be2014-04-24 18:08:57 -0700960
Florian Fainelli148d3d02017-01-12 12:09:09 -0800961 txq = netdev_get_tx_queue(priv->netdev, ring->index);
962
Florian Fainellid8498082014-06-05 10:22:15 -0700963 spin_lock_irqsave(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -0700964 released = __bcm_sysport_tx_reclaim(priv, ring);
Florian Fainelli148d3d02017-01-12 12:09:09 -0800965 if (released)
966 netif_tx_wake_queue(txq);
967
Florian Fainellid8498082014-06-05 10:22:15 -0700968 spin_unlock_irqrestore(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -0700969
970 return released;
971}
972
Florian Fainelli148d3d02017-01-12 12:09:09 -0800973/* Locked version of the per-ring TX reclaim, but does not wake the queue */
974static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
975 struct bcm_sysport_tx_ring *ring)
976{
977 unsigned long flags;
978
979 spin_lock_irqsave(&ring->lock, flags);
980 __bcm_sysport_tx_reclaim(priv, ring);
981 spin_unlock_irqrestore(&ring->lock, flags);
982}
983
Florian Fainelli80105be2014-04-24 18:08:57 -0700984static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
985{
986 struct bcm_sysport_tx_ring *ring =
987 container_of(napi, struct bcm_sysport_tx_ring, napi);
988 unsigned int work_done = 0;
989
990 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
991
Florian Fainelli16f62d92014-06-26 10:06:46 -0700992 if (work_done == 0) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700993 napi_complete(napi);
994 /* re-enable TX interrupt */
Florian Fainelli44a45242017-01-20 11:08:27 -0800995 if (!ring->priv->is_lite)
996 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
997 else
998 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
999 INTRL2_0_TDMA_MBDONE_SHIFT));
Florian Fainelli9dfa9a22014-11-12 15:40:43 -08001000
1001 return 0;
Florian Fainelli80105be2014-04-24 18:08:57 -07001002 }
1003
Florian Fainelli9dfa9a22014-11-12 15:40:43 -08001004 return budget;
Florian Fainelli80105be2014-04-24 18:08:57 -07001005}
1006
1007static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1008{
1009 unsigned int q;
1010
1011 for (q = 0; q < priv->netdev->num_tx_queues; q++)
1012 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1013}
1014
1015static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1016{
1017 struct bcm_sysport_priv *priv =
1018 container_of(napi, struct bcm_sysport_priv, napi);
Yamin Friedmanf06d0ca2019-07-23 10:22:47 +03001019 struct dim_sample dim_sample = {};
Florian Fainelli80105be2014-04-24 18:08:57 -07001020 unsigned int work_done = 0;
1021
1022 work_done = bcm_sysport_desc_rx(priv, budget);
1023
1024 priv->rx_c_index += work_done;
1025 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
Florian Fainelli44a45242017-01-20 11:08:27 -08001026
1027 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1028 * maintained by HW, but writes to it will be ignore while RDMA
1029 * is active
1030 */
1031 if (!priv->is_lite)
1032 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1033 else
1034 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
Florian Fainelli80105be2014-04-24 18:08:57 -07001035
1036 if (work_done < budget) {
Florian Fainellic82f47e2016-04-20 11:37:09 -07001037 napi_complete_done(napi, work_done);
Florian Fainelli80105be2014-04-24 18:08:57 -07001038 /* re-enable RX interrupts */
1039 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1040 }
1041
Florian Fainellib6e0e872018-03-22 18:19:32 -07001042 if (priv->dim.use_dim) {
Tal Gilboa8960b382019-01-31 16:44:48 +02001043 dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1044 priv->dim.bytes, &dim_sample);
Florian Fainellib6e0e872018-03-22 18:19:32 -07001045 net_dim(&priv->dim.dim, dim_sample);
1046 }
1047
Florian Fainelli80105be2014-04-24 18:08:57 -07001048 return work_done;
1049}
1050
Florian Fainelli542261162018-08-03 11:08:44 -07001051static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
Florian Fainelli83e82f42014-07-01 21:08:40 -07001052{
Florian Fainellibb9051a22018-08-07 10:50:23 -07001053 u32 reg, bit;
Florian Fainelli83e82f42014-07-01 21:08:40 -07001054
Florian Fainelli542261162018-08-03 11:08:44 -07001055 reg = umac_readl(priv, UMAC_MPD_CTRL);
1056 if (enable)
1057 reg |= MPD_EN;
1058 else
1059 reg &= ~MPD_EN;
1060 umac_writel(priv, reg, UMAC_MPD_CTRL);
Florian Fainellibb9051a22018-08-07 10:50:23 -07001061
1062 if (priv->is_lite)
1063 bit = RBUF_ACPI_EN_LITE;
1064 else
1065 bit = RBUF_ACPI_EN;
1066
1067 reg = rbuf_readl(priv, RBUF_CONTROL);
1068 if (enable)
1069 reg |= bit;
1070 else
1071 reg &= ~bit;
1072 rbuf_writel(priv, reg, RBUF_CONTROL);
Florian Fainelli542261162018-08-03 11:08:44 -07001073}
1074
1075static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1076{
Florian Fainelli80f8dea2018-11-06 12:58:41 -08001077 unsigned int index;
Florian Fainellibb9051a22018-08-07 10:50:23 -07001078 u32 reg;
1079
Florian Fainellibb9051a22018-08-07 10:50:23 -07001080 /* Disable RXCHK, active filters and Broadcom tag matching */
1081 reg = rxchk_readl(priv, RXCHK_CONTROL);
1082 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1083 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1084 rxchk_writel(priv, reg, RXCHK_CONTROL);
Florian Fainelli83e82f42014-07-01 21:08:40 -07001085
Florian Fainelli80f8dea2018-11-06 12:58:41 -08001086 /* Make sure we restore correct CID index in case HW lost
1087 * its context during deep idle state
1088 */
1089 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1090 rxchk_writel(priv, priv->filters_loc[index] <<
1091 RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1092 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1093 }
1094
Florian Fainelli83e82f42014-07-01 21:08:40 -07001095 /* Clear the MagicPacket detection logic */
Florian Fainelli542261162018-08-03 11:08:44 -07001096 mpd_enable_set(priv, false);
Florian Fainelli83e82f42014-07-01 21:08:40 -07001097
Florian Fainelli45ec3182018-10-02 16:52:03 -07001098 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1099 if (reg & INTRL2_0_MPD)
1100 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1101
1102 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1103 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1104 RXCHK_BRCM_TAG_MATCH_MASK;
1105 netdev_info(priv->netdev,
1106 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1107 }
1108
Florian Fainelli83e82f42014-07-01 21:08:40 -07001109 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1110}
Florian Fainelli80105be2014-04-24 18:08:57 -07001111
Florian Fainellib6e0e872018-03-22 18:19:32 -07001112static void bcm_sysport_dim_work(struct work_struct *work)
1113{
Tal Gilboa8960b382019-01-31 16:44:48 +02001114 struct dim *dim = container_of(work, struct dim, work);
Florian Fainellib6e0e872018-03-22 18:19:32 -07001115 struct bcm_sysport_net_dim *ndim =
1116 container_of(dim, struct bcm_sysport_net_dim, dim);
1117 struct bcm_sysport_priv *priv =
1118 container_of(ndim, struct bcm_sysport_priv, dim);
Tal Gilboa8960b382019-01-31 16:44:48 +02001119 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
1120 dim->profile_ix);
Florian Fainellib6e0e872018-03-22 18:19:32 -07001121
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07001122 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
Tal Gilboac002bd52018-11-05 12:07:52 +02001123 dim->state = DIM_START_MEASURE;
Florian Fainellib6e0e872018-03-22 18:19:32 -07001124}
1125
Florian Fainelli80105be2014-04-24 18:08:57 -07001126/* RX and misc interrupt routine */
1127static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1128{
1129 struct net_device *dev = dev_id;
1130 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli44a45242017-01-20 11:08:27 -08001131 struct bcm_sysport_tx_ring *txr;
1132 unsigned int ring, ring_bit;
Florian Fainelli80105be2014-04-24 18:08:57 -07001133
1134 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1135 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1136 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1137
1138 if (unlikely(priv->irq0_stat == 0)) {
1139 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1140 return IRQ_NONE;
1141 }
1142
1143 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
Florian Fainellib6e0e872018-03-22 18:19:32 -07001144 priv->dim.event_ctr++;
Florian Fainelli80105be2014-04-24 18:08:57 -07001145 if (likely(napi_schedule_prep(&priv->napi))) {
1146 /* disable RX interrupts */
1147 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
Florian Fainelliba909502016-04-20 11:37:08 -07001148 __napi_schedule_irqoff(&priv->napi);
Florian Fainelli80105be2014-04-24 18:08:57 -07001149 }
1150 }
1151
1152 /* TX ring is full, perform a full reclaim since we do not know
1153 * which one would trigger this interrupt
1154 */
1155 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1156 bcm_sysport_tx_reclaim_all(priv);
1157
Florian Fainelli44a45242017-01-20 11:08:27 -08001158 if (!priv->is_lite)
1159 goto out;
1160
1161 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1162 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1163 if (!(priv->irq0_stat & ring_bit))
1164 continue;
1165
1166 txr = &priv->tx_rings[ring];
1167
1168 if (likely(napi_schedule_prep(&txr->napi))) {
1169 intrl2_0_mask_set(priv, ring_bit);
1170 __napi_schedule(&txr->napi);
1171 }
1172 }
1173out:
Florian Fainelli80105be2014-04-24 18:08:57 -07001174 return IRQ_HANDLED;
1175}
1176
1177/* TX interrupt service routine */
1178static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1179{
1180 struct net_device *dev = dev_id;
1181 struct bcm_sysport_priv *priv = netdev_priv(dev);
1182 struct bcm_sysport_tx_ring *txr;
1183 unsigned int ring;
1184
1185 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1186 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1187 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1188
1189 if (unlikely(priv->irq1_stat == 0)) {
1190 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1191 return IRQ_NONE;
1192 }
1193
1194 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1195 if (!(priv->irq1_stat & BIT(ring)))
1196 continue;
1197
1198 txr = &priv->tx_rings[ring];
1199
1200 if (likely(napi_schedule_prep(&txr->napi))) {
1201 intrl2_1_mask_set(priv, BIT(ring));
Florian Fainelliba909502016-04-20 11:37:08 -07001202 __napi_schedule_irqoff(&txr->napi);
Florian Fainelli80105be2014-04-24 18:08:57 -07001203 }
1204 }
1205
1206 return IRQ_HANDLED;
1207}
1208
Florian Fainelli83e82f42014-07-01 21:08:40 -07001209static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1210{
1211 struct bcm_sysport_priv *priv = dev_id;
1212
1213 pm_wakeup_event(&priv->pdev->dev, 0);
1214
1215 return IRQ_HANDLED;
1216}
1217
Florian Fainelli6cec4f52015-07-31 11:42:55 -07001218#ifdef CONFIG_NET_POLL_CONTROLLER
1219static void bcm_sysport_poll_controller(struct net_device *dev)
1220{
1221 struct bcm_sysport_priv *priv = netdev_priv(dev);
1222
1223 disable_irq(priv->irq0);
1224 bcm_sysport_rx_isr(priv->irq0, priv);
1225 enable_irq(priv->irq0);
1226
Florian Fainelli44a45242017-01-20 11:08:27 -08001227 if (!priv->is_lite) {
1228 disable_irq(priv->irq1);
1229 bcm_sysport_tx_isr(priv->irq1, priv);
1230 enable_irq(priv->irq1);
1231 }
Florian Fainelli6cec4f52015-07-31 11:42:55 -07001232}
1233#endif
1234
Florian Fainellie87474a2014-10-02 09:43:16 -07001235static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1236 struct net_device *dev)
Florian Fainelli80105be2014-04-24 18:08:57 -07001237{
Florian Fainellia5d78ce2018-09-27 15:36:14 -07001238 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001239 struct sk_buff *nskb;
Paul Gortmaker3afc5572014-05-30 15:39:30 -04001240 struct bcm_tsb *tsb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001241 u32 csum_info;
1242 u8 ip_proto;
1243 u16 csum_start;
Florian Fainellic0eb0552018-04-02 15:58:56 -07001244 __be16 ip_ver;
Florian Fainelli80105be2014-04-24 18:08:57 -07001245
1246 /* Re-allocate SKB if needed */
1247 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1248 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
Florian Fainelli80105be2014-04-24 18:08:57 -07001249 if (!nskb) {
Florian Fainelliaa6ca0e2018-09-27 15:36:13 -07001250 dev_kfree_skb_any(skb);
Florian Fainellia5d78ce2018-09-27 15:36:14 -07001251 priv->mib.tx_realloc_tsb_failed++;
Florian Fainelli80105be2014-04-24 18:08:57 -07001252 dev->stats.tx_errors++;
1253 dev->stats.tx_dropped++;
Florian Fainellie87474a2014-10-02 09:43:16 -07001254 return NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -07001255 }
Florian Fainelliaa6ca0e2018-09-27 15:36:13 -07001256 dev_consume_skb_any(skb);
Florian Fainelli80105be2014-04-24 18:08:57 -07001257 skb = nskb;
Florian Fainellia5d78ce2018-09-27 15:36:14 -07001258 priv->mib.tx_realloc_tsb++;
Florian Fainelli80105be2014-04-24 18:08:57 -07001259 }
1260
Johannes Bergd58ff352017-06-16 14:29:23 +02001261 tsb = skb_push(skb, sizeof(*tsb));
Florian Fainelli80105be2014-04-24 18:08:57 -07001262 /* Zero-out TSB by default */
1263 memset(tsb, 0, sizeof(*tsb));
1264
Florian Fainelli6e9fdb62020-07-06 14:29:39 -07001265 if (skb_vlan_tag_present(skb)) {
Colin Ian Kinge3cbdaf2020-07-08 19:37:23 +01001266 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
Florian Fainelli6e9fdb62020-07-06 14:29:39 -07001267 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
1268 }
1269
Florian Fainelli80105be2014-04-24 18:08:57 -07001270 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Florian Fainellic0eb0552018-04-02 15:58:56 -07001271 ip_ver = skb->protocol;
Florian Fainelli80105be2014-04-24 18:08:57 -07001272 switch (ip_ver) {
Florian Fainellic0eb0552018-04-02 15:58:56 -07001273 case htons(ETH_P_IP):
Florian Fainelli80105be2014-04-24 18:08:57 -07001274 ip_proto = ip_hdr(skb)->protocol;
1275 break;
Florian Fainellic0eb0552018-04-02 15:58:56 -07001276 case htons(ETH_P_IPV6):
Florian Fainelli80105be2014-04-24 18:08:57 -07001277 ip_proto = ipv6_hdr(skb)->nexthdr;
1278 break;
1279 default:
Florian Fainellie87474a2014-10-02 09:43:16 -07001280 return skb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001281 }
1282
1283 /* Get the checksum offset and the L4 (transport) offset */
1284 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
Florian Fainelli6e9fdb62020-07-06 14:29:39 -07001285 /* Account for the HW inserted VLAN tag */
1286 if (skb_vlan_tag_present(skb))
1287 csum_start += VLAN_HLEN;
Florian Fainelli80105be2014-04-24 18:08:57 -07001288 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1289 csum_info |= (csum_start << L4_PTR_SHIFT);
1290
1291 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1292 csum_info |= L4_LENGTH_VALID;
Florian Fainellic0eb0552018-04-02 15:58:56 -07001293 if (ip_proto == IPPROTO_UDP &&
1294 ip_ver == htons(ETH_P_IP))
Florian Fainelli80105be2014-04-24 18:08:57 -07001295 csum_info |= L4_UDP;
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001296 } else {
Florian Fainelli80105be2014-04-24 18:08:57 -07001297 csum_info = 0;
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001298 }
Florian Fainelli80105be2014-04-24 18:08:57 -07001299
1300 tsb->l4_ptr_dest_map = csum_info;
1301 }
1302
Florian Fainellie87474a2014-10-02 09:43:16 -07001303 return skb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001304}
1305
1306static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1307 struct net_device *dev)
1308{
1309 struct bcm_sysport_priv *priv = netdev_priv(dev);
1310 struct device *kdev = &priv->pdev->dev;
1311 struct bcm_sysport_tx_ring *ring;
Florian Fainelli8b8e6e72021-12-15 12:24:49 -08001312 unsigned long flags, desc_flags;
Florian Fainelli80105be2014-04-24 18:08:57 -07001313 struct bcm_sysport_cb *cb;
1314 struct netdev_queue *txq;
Florian Fainelli7e6e1852019-04-22 09:46:44 -07001315 u32 len_status, addr_lo;
Florian Fainellidab531b2014-05-14 19:32:14 -07001316 unsigned int skb_len;
Florian Fainelli80105be2014-04-24 18:08:57 -07001317 dma_addr_t mapping;
Florian Fainelli80105be2014-04-24 18:08:57 -07001318 u16 queue;
1319 int ret;
1320
1321 queue = skb_get_queue_mapping(skb);
1322 txq = netdev_get_tx_queue(dev, queue);
1323 ring = &priv->tx_rings[queue];
1324
Florian Fainellid8498082014-06-05 10:22:15 -07001325 /* lock against tx reclaim in BH context and TX ring full interrupt */
1326 spin_lock_irqsave(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -07001327 if (unlikely(ring->desc_count == 0)) {
1328 netif_tx_stop_queue(txq);
1329 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1330 ret = NETDEV_TX_BUSY;
1331 goto out;
1332 }
1333
Florian Fainelli38e5a852017-01-03 16:34:49 -08001334 /* Insert TSB and checksum infos */
1335 if (priv->tsb_en) {
1336 skb = bcm_sysport_insert_tsb(skb, dev);
1337 if (!skb) {
1338 ret = NETDEV_TX_OK;
1339 goto out;
1340 }
1341 }
1342
Florian Fainellibb7da332017-01-03 16:34:48 -08001343 skb_len = skb->len;
Florian Fainellidab531b2014-05-14 19:32:14 -07001344
1345 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
Florian Fainelli80105be2014-04-24 18:08:57 -07001346 if (dma_mapping_error(kdev, mapping)) {
Florian Fainelli60b4ea12014-11-19 10:29:55 -08001347 priv->mib.tx_dma_failed++;
Florian Fainelli80105be2014-04-24 18:08:57 -07001348 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001349 skb->data, skb_len);
Florian Fainelli80105be2014-04-24 18:08:57 -07001350 ret = NETDEV_TX_OK;
1351 goto out;
1352 }
1353
1354 /* Remember the SKB for future freeing */
1355 cb = &ring->cbs[ring->curr_desc];
1356 cb->skb = skb;
1357 dma_unmap_addr_set(cb, dma_addr, mapping);
Florian Fainellidab531b2014-05-14 19:32:14 -07001358 dma_unmap_len_set(cb, dma_len, skb_len);
Florian Fainelli80105be2014-04-24 18:08:57 -07001359
Florian Fainelli7e6e1852019-04-22 09:46:44 -07001360 addr_lo = lower_32_bits(mapping);
Florian Fainelli80105be2014-04-24 18:08:57 -07001361 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
Florian Fainellidab531b2014-05-14 19:32:14 -07001362 len_status |= (skb_len << DESC_LEN_SHIFT);
Florian Fainelli80105be2014-04-24 18:08:57 -07001363 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001364 DESC_STATUS_SHIFT;
Florian Fainelli80105be2014-04-24 18:08:57 -07001365 if (skb->ip_summed == CHECKSUM_PARTIAL)
1366 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
Florian Fainelli6e9fdb62020-07-06 14:29:39 -07001367 if (skb_vlan_tag_present(skb))
1368 len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
Florian Fainelli80105be2014-04-24 18:08:57 -07001369
1370 ring->curr_desc++;
1371 if (ring->curr_desc == ring->size)
1372 ring->curr_desc = 0;
1373 ring->desc_count--;
1374
Florian Fainelli7e6e1852019-04-22 09:46:44 -07001375 /* Ports are latched, so write upper address first */
Florian Fainelli8b8e6e72021-12-15 12:24:49 -08001376 spin_lock_irqsave(&priv->desc_lock, desc_flags);
Florian Fainelli7e6e1852019-04-22 09:46:44 -07001377 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1378 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
Florian Fainelli8b8e6e72021-12-15 12:24:49 -08001379 spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
Florian Fainelli80105be2014-04-24 18:08:57 -07001380
1381 /* Check ring space and update SW control flow */
1382 if (ring->desc_count == 0)
1383 netif_tx_stop_queue(txq);
1384
1385 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001386 ring->index, ring->desc_count, ring->curr_desc);
Florian Fainelli80105be2014-04-24 18:08:57 -07001387
1388 ret = NETDEV_TX_OK;
1389out:
Florian Fainellid8498082014-06-05 10:22:15 -07001390 spin_unlock_irqrestore(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -07001391 return ret;
1392}
1393
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05001394static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
Florian Fainelli80105be2014-04-24 18:08:57 -07001395{
1396 netdev_warn(dev, "transmit timeout!\n");
1397
Florian Westphal860e9532016-05-03 16:33:13 +02001398 netif_trans_update(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001399 dev->stats.tx_errors++;
1400
1401 netif_tx_wake_all_queues(dev);
1402}
1403
1404/* phylib adjust link callback */
1405static void bcm_sysport_adj_link(struct net_device *dev)
1406{
1407 struct bcm_sysport_priv *priv = netdev_priv(dev);
Philippe Reynes715a0222016-06-19 20:39:08 +02001408 struct phy_device *phydev = dev->phydev;
Florian Fainelli80105be2014-04-24 18:08:57 -07001409 unsigned int changed = 0;
1410 u32 cmd_bits = 0, reg;
1411
1412 if (priv->old_link != phydev->link) {
1413 changed = 1;
1414 priv->old_link = phydev->link;
1415 }
1416
1417 if (priv->old_duplex != phydev->duplex) {
1418 changed = 1;
1419 priv->old_duplex = phydev->duplex;
1420 }
1421
Florian Fainelli44a45242017-01-20 11:08:27 -08001422 if (priv->is_lite)
1423 goto out;
1424
Florian Fainelli80105be2014-04-24 18:08:57 -07001425 switch (phydev->speed) {
1426 case SPEED_2500:
1427 cmd_bits = CMD_SPEED_2500;
1428 break;
1429 case SPEED_1000:
1430 cmd_bits = CMD_SPEED_1000;
1431 break;
1432 case SPEED_100:
1433 cmd_bits = CMD_SPEED_100;
1434 break;
1435 case SPEED_10:
1436 cmd_bits = CMD_SPEED_10;
1437 break;
1438 default:
1439 break;
1440 }
1441 cmd_bits <<= CMD_SPEED_SHIFT;
1442
1443 if (phydev->duplex == DUPLEX_HALF)
1444 cmd_bits |= CMD_HD_EN;
1445
1446 if (priv->old_pause != phydev->pause) {
1447 changed = 1;
1448 priv->old_pause = phydev->pause;
1449 }
1450
1451 if (!phydev->pause)
1452 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1453
Florian Fainelli4a804c02014-09-02 11:17:07 -07001454 if (!changed)
1455 return;
1456
1457 if (phydev->link) {
Florian Fainellid5e32cc2014-05-14 19:32:13 -07001458 reg = umac_readl(priv, UMAC_CMD);
1459 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
Florian Fainelli80105be2014-04-24 18:08:57 -07001460 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1461 CMD_TX_PAUSE_IGNORE);
Florian Fainellid5e32cc2014-05-14 19:32:13 -07001462 reg |= cmd_bits;
1463 umac_writel(priv, reg, UMAC_CMD);
Florian Fainellid5e32cc2014-05-14 19:32:13 -07001464 }
Florian Fainelli44a45242017-01-20 11:08:27 -08001465out:
1466 if (changed)
1467 phy_print_status(phydev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001468}
1469
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07001470static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
Florian Fainellib6e0e872018-03-22 18:19:32 -07001471 void (*cb)(struct work_struct *work))
1472{
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07001473 struct bcm_sysport_net_dim *dim = &priv->dim;
1474
Florian Fainellib6e0e872018-03-22 18:19:32 -07001475 INIT_WORK(&dim->dim.work, cb);
Tal Gilboac002bd52018-11-05 12:07:52 +02001476 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
Florian Fainellib6e0e872018-03-22 18:19:32 -07001477 dim->event_ctr = 0;
1478 dim->packets = 0;
1479 dim->bytes = 0;
1480}
1481
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07001482static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1483{
1484 struct bcm_sysport_net_dim *dim = &priv->dim;
Tal Gilboa8960b382019-01-31 16:44:48 +02001485 struct dim_cq_moder moder;
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07001486 u32 usecs, pkts;
1487
1488 usecs = priv->rx_coalesce_usecs;
1489 pkts = priv->rx_max_coalesced_frames;
1490
1491 /* If DIM was enabled, re-apply default parameters */
1492 if (dim->use_dim) {
Tal Gilboa026a8072018-04-24 13:36:01 +03001493 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07001494 usecs = moder.usec;
1495 pkts = moder.pkts;
1496 }
1497
1498 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1499}
1500
Florian Fainelli80105be2014-04-24 18:08:57 -07001501static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1502 unsigned int index)
1503{
1504 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
Florian Fainelli80105be2014-04-24 18:08:57 -07001505 size_t size;
Florian Fainelli80105be2014-04-24 18:08:57 -07001506 u32 reg;
1507
1508 /* Simple descriptors partitioning for now */
1509 size = 256;
1510
Florian Fainelli40a8a312014-07-09 17:36:47 -07001511 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
Florian Fainelli80105be2014-04-24 18:08:57 -07001512 if (!ring->cbs) {
1513 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1514 return -ENOMEM;
1515 }
1516
1517 /* Initialize SW view of the ring */
1518 spin_lock_init(&ring->lock);
1519 ring->priv = priv;
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001520 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
Florian Fainelli80105be2014-04-24 18:08:57 -07001521 ring->index = index;
1522 ring->size = size;
Florian Fainelli484d8022018-03-13 14:45:07 -07001523 ring->clean_index = 0;
Florian Fainelli80105be2014-04-24 18:08:57 -07001524 ring->alloc_size = ring->size;
Florian Fainelli80105be2014-04-24 18:08:57 -07001525 ring->desc_count = ring->size;
1526 ring->curr_desc = 0;
1527
1528 /* Initialize HW ring */
1529 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1530 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1531 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1532 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
Florian Fainellid1565762017-10-11 10:57:50 -07001533
1534 /* Configure QID and port mapping */
1535 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1536 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
Florian Fainelli3ded76a2017-11-01 11:29:47 -07001537 if (ring->inspect) {
1538 reg |= ring->switch_queue & RING_QID_MASK;
1539 reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1540 } else {
1541 reg |= RING_IGNORE_STATUS;
1542 }
Florian Fainellid1565762017-10-11 10:57:50 -07001543 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
Florian Fainelli6e9fdb62020-07-06 14:29:39 -07001544 reg = 0;
1545 /* Adjust the packet size calculations if SYSTEMPORT is responsible
1546 * for HW insertion of VLAN tags
1547 */
1548 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1549 reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
1550 tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
Florian Fainelli80105be2014-04-24 18:08:57 -07001551
Florian Fainelli723934f2017-10-11 10:57:52 -07001552 /* Enable ACB algorithm 2 */
1553 reg = tdma_readl(priv, TDMA_CONTROL);
1554 reg |= tdma_control_bit(priv, ACB_ALGO);
1555 tdma_writel(priv, reg, TDMA_CONTROL);
1556
Florian Fainelli487234c2017-09-01 17:32:34 -07001557 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1558 * with the original definition of ACB_ALGO
1559 */
1560 reg = tdma_readl(priv, TDMA_CONTROL);
1561 if (priv->is_lite)
1562 reg &= ~BIT(TSB_SWAP1);
1563 /* Set a correct TSB format based on host endian */
1564 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1565 reg |= tdma_control_bit(priv, TSB_SWAP0);
1566 else
1567 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1568 tdma_writel(priv, reg, TDMA_CONTROL);
1569
Florian Fainelli80105be2014-04-24 18:08:57 -07001570 /* Program the number of descriptors as MAX_THRESHOLD and half of
1571 * its size for the hysteresis trigger
1572 */
1573 tdma_writel(priv, ring->size |
1574 1 << RING_HYST_THRESH_SHIFT,
1575 TDMA_DESC_RING_MAX_HYST(index));
1576
1577 /* Enable the ring queue in the arbiter */
1578 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1579 reg |= (1 << index);
1580 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1581
1582 napi_enable(&ring->napi);
1583
1584 netif_dbg(priv, hw, priv->netdev,
Florian Fainelli7e6e1852019-04-22 09:46:44 -07001585 "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1586 ring->size, ring->switch_queue,
Florian Fainellid1565762017-10-11 10:57:50 -07001587 ring->switch_port);
Florian Fainelli80105be2014-04-24 18:08:57 -07001588
1589 return 0;
1590}
1591
1592static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001593 unsigned int index)
Florian Fainelli80105be2014-04-24 18:08:57 -07001594{
1595 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
Florian Fainelli80105be2014-04-24 18:08:57 -07001596 u32 reg;
1597
1598 /* Caller should stop the TDMA engine */
1599 reg = tdma_readl(priv, TDMA_STATUS);
1600 if (!(reg & TDMA_DISABLED))
1601 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1602
Florian Fainelli914adb52014-10-31 15:51:35 -07001603 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1604 * fail, so by checking this pointer we know whether the TX ring was
1605 * fully initialized or not.
1606 */
1607 if (!ring->cbs)
1608 return;
1609
Florian Fainelli80105be2014-04-24 18:08:57 -07001610 napi_disable(&ring->napi);
1611 netif_napi_del(&ring->napi);
1612
Florian Fainelli148d3d02017-01-12 12:09:09 -08001613 bcm_sysport_tx_clean(priv, ring);
Florian Fainelli80105be2014-04-24 18:08:57 -07001614
1615 kfree(ring->cbs);
1616 ring->cbs = NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -07001617 ring->size = 0;
1618 ring->alloc_size = 0;
1619
1620 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1621}
1622
1623/* RDMA helper */
1624static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001625 unsigned int enable)
Florian Fainelli80105be2014-04-24 18:08:57 -07001626{
1627 unsigned int timeout = 1000;
1628 u32 reg;
1629
1630 reg = rdma_readl(priv, RDMA_CONTROL);
1631 if (enable)
1632 reg |= RDMA_EN;
1633 else
1634 reg &= ~RDMA_EN;
1635 rdma_writel(priv, reg, RDMA_CONTROL);
1636
1637 /* Poll for RMDA disabling completion */
1638 do {
1639 reg = rdma_readl(priv, RDMA_STATUS);
1640 if (!!(reg & RDMA_DISABLED) == !enable)
1641 return 0;
1642 usleep_range(1000, 2000);
1643 } while (timeout-- > 0);
1644
1645 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1646
1647 return -ETIMEDOUT;
1648}
1649
1650/* TDMA helper */
1651static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001652 unsigned int enable)
Florian Fainelli80105be2014-04-24 18:08:57 -07001653{
1654 unsigned int timeout = 1000;
1655 u32 reg;
1656
1657 reg = tdma_readl(priv, TDMA_CONTROL);
1658 if (enable)
Florian Fainelli44a45242017-01-20 11:08:27 -08001659 reg |= tdma_control_bit(priv, TDMA_EN);
Florian Fainelli80105be2014-04-24 18:08:57 -07001660 else
Florian Fainelli44a45242017-01-20 11:08:27 -08001661 reg &= ~tdma_control_bit(priv, TDMA_EN);
Florian Fainelli80105be2014-04-24 18:08:57 -07001662 tdma_writel(priv, reg, TDMA_CONTROL);
1663
1664 /* Poll for TMDA disabling completion */
1665 do {
1666 reg = tdma_readl(priv, TDMA_STATUS);
1667 if (!!(reg & TDMA_DISABLED) == !enable)
1668 return 0;
1669
1670 usleep_range(1000, 2000);
1671 } while (timeout-- > 0);
1672
1673 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1674
1675 return -ETIMEDOUT;
1676}
1677
1678static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1679{
Florian Fainellibaf387a2015-05-28 15:24:42 -07001680 struct bcm_sysport_cb *cb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001681 u32 reg;
1682 int ret;
Florian Fainellibaf387a2015-05-28 15:24:42 -07001683 int i;
Florian Fainelli80105be2014-04-24 18:08:57 -07001684
1685 /* Initialize SW view of the RX ring */
Florian Fainelli44a45242017-01-20 11:08:27 -08001686 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
Florian Fainelli80105be2014-04-24 18:08:57 -07001687 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
Florian Fainelli80105be2014-04-24 18:08:57 -07001688 priv->rx_c_index = 0;
1689 priv->rx_read_ptr = 0;
Florian Fainelli40a8a312014-07-09 17:36:47 -07001690 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1691 GFP_KERNEL);
Florian Fainelli80105be2014-04-24 18:08:57 -07001692 if (!priv->rx_cbs) {
1693 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1694 return -ENOMEM;
1695 }
1696
Florian Fainellibaf387a2015-05-28 15:24:42 -07001697 for (i = 0; i < priv->num_rx_bds; i++) {
1698 cb = priv->rx_cbs + i;
1699 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1700 }
1701
Florian Fainelli80105be2014-04-24 18:08:57 -07001702 ret = bcm_sysport_alloc_rx_bufs(priv);
1703 if (ret) {
1704 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1705 return ret;
1706 }
1707
1708 /* Initialize HW, ensure RDMA is disabled */
1709 reg = rdma_readl(priv, RDMA_STATUS);
1710 if (!(reg & RDMA_DISABLED))
1711 rdma_enable_set(priv, 0);
1712
1713 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1714 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1715 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1716 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1717 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1718 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1719 /* Operate the queue in ring mode */
1720 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1721 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1722 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
Florian Fainelli44a45242017-01-20 11:08:27 -08001723 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
Florian Fainelli80105be2014-04-24 18:08:57 -07001724
Florian Fainelli80105be2014-04-24 18:08:57 -07001725 netif_dbg(priv, hw, priv->netdev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001726 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1727 priv->num_rx_bds, priv->rx_bds);
Florian Fainelli80105be2014-04-24 18:08:57 -07001728
1729 return 0;
1730}
1731
1732static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1733{
1734 struct bcm_sysport_cb *cb;
1735 unsigned int i;
1736 u32 reg;
1737
1738 /* Caller should ensure RDMA is disabled */
1739 reg = rdma_readl(priv, RDMA_STATUS);
1740 if (!(reg & RDMA_DISABLED))
1741 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1742
1743 for (i = 0; i < priv->num_rx_bds; i++) {
1744 cb = &priv->rx_cbs[i];
1745 if (dma_unmap_addr(cb, dma_addr))
1746 dma_unmap_single(&priv->pdev->dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001747 dma_unmap_addr(cb, dma_addr),
1748 RX_BUF_LENGTH, DMA_FROM_DEVICE);
Florian Fainelli80105be2014-04-24 18:08:57 -07001749 bcm_sysport_free_cb(cb);
1750 }
1751
1752 kfree(priv->rx_cbs);
1753 priv->rx_cbs = NULL;
1754
1755 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1756}
1757
1758static void bcm_sysport_set_rx_mode(struct net_device *dev)
1759{
1760 struct bcm_sysport_priv *priv = netdev_priv(dev);
1761 u32 reg;
1762
Florian Fainelli44a45242017-01-20 11:08:27 -08001763 if (priv->is_lite)
1764 return;
1765
Florian Fainelli80105be2014-04-24 18:08:57 -07001766 reg = umac_readl(priv, UMAC_CMD);
1767 if (dev->flags & IFF_PROMISC)
1768 reg |= CMD_PROMISC;
1769 else
1770 reg &= ~CMD_PROMISC;
1771 umac_writel(priv, reg, UMAC_CMD);
1772
1773 /* No support for ALLMULTI */
1774 if (dev->flags & IFF_ALLMULTI)
1775 return;
1776}
1777
1778static inline void umac_enable_set(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001779 u32 mask, unsigned int enable)
Florian Fainelli80105be2014-04-24 18:08:57 -07001780{
1781 u32 reg;
1782
Florian Fainelli44a45242017-01-20 11:08:27 -08001783 if (!priv->is_lite) {
1784 reg = umac_readl(priv, UMAC_CMD);
1785 if (enable)
1786 reg |= mask;
1787 else
1788 reg &= ~mask;
1789 umac_writel(priv, reg, UMAC_CMD);
1790 } else {
1791 reg = gib_readl(priv, GIB_CONTROL);
1792 if (enable)
1793 reg |= mask;
1794 else
1795 reg &= ~mask;
1796 gib_writel(priv, reg, GIB_CONTROL);
1797 }
Florian Fainelli00b91c62014-05-15 14:33:53 -07001798
1799 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1800 * to be processed (1 msec).
1801 */
1802 if (enable == 0)
1803 usleep_range(1000, 2000);
Florian Fainelli80105be2014-04-24 18:08:57 -07001804}
1805
Florian Fainelli412bce82014-06-26 10:06:45 -07001806static inline void umac_reset(struct bcm_sysport_priv *priv)
Florian Fainelli80105be2014-04-24 18:08:57 -07001807{
Florian Fainelli80105be2014-04-24 18:08:57 -07001808 u32 reg;
Florian Fainelli80105be2014-04-24 18:08:57 -07001809
Florian Fainelli44a45242017-01-20 11:08:27 -08001810 if (priv->is_lite)
1811 return;
1812
Florian Fainelli412bce82014-06-26 10:06:45 -07001813 reg = umac_readl(priv, UMAC_CMD);
1814 reg |= CMD_SW_RESET;
1815 umac_writel(priv, reg, UMAC_CMD);
1816 udelay(10);
1817 reg = umac_readl(priv, UMAC_CMD);
1818 reg &= ~CMD_SW_RESET;
1819 umac_writel(priv, reg, UMAC_CMD);
Florian Fainelli80105be2014-04-24 18:08:57 -07001820}
1821
1822static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
Jakub Kicinski76660752021-10-14 07:24:31 -07001823 const unsigned char *addr)
Florian Fainelli80105be2014-04-24 18:08:57 -07001824{
Florian Fainelli44a45242017-01-20 11:08:27 -08001825 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1826 addr[3];
1827 u32 mac1 = (addr[4] << 8) | addr[5];
1828
1829 if (!priv->is_lite) {
1830 umac_writel(priv, mac0, UMAC_MAC0);
1831 umac_writel(priv, mac1, UMAC_MAC1);
1832 } else {
1833 gib_writel(priv, mac0, GIB_MAC0);
1834 gib_writel(priv, mac1, GIB_MAC1);
1835 }
Florian Fainelli80105be2014-04-24 18:08:57 -07001836}
1837
1838static void topctrl_flush(struct bcm_sysport_priv *priv)
1839{
1840 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1841 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1842 mdelay(1);
1843 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1844 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1845}
1846
Florian Fainellifb3b5962014-12-08 15:59:18 -08001847static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1848{
1849 struct bcm_sysport_priv *priv = netdev_priv(dev);
1850 struct sockaddr *addr = p;
1851
1852 if (!is_valid_ether_addr(addr->sa_data))
1853 return -EINVAL;
1854
Jakub Kicinskia05e4c02021-10-04 09:05:21 -07001855 eth_hw_addr_set(dev, addr->sa_data);
Florian Fainellifb3b5962014-12-08 15:59:18 -08001856
1857 /* interface is disabled, changes to MAC will be reflected on next
1858 * open call
1859 */
1860 if (!netif_running(dev))
1861 return 0;
1862
1863 umac_set_hw_addr(priv, dev->dev_addr);
1864
1865 return 0;
1866}
1867
kiki good10377ba2017-08-04 00:07:45 +01001868static void bcm_sysport_get_stats64(struct net_device *dev,
1869 struct rtnl_link_stats64 *stats)
Florian Fainelli30defeb2017-03-23 10:36:46 -07001870{
1871 struct bcm_sysport_priv *priv = netdev_priv(dev);
kiki good10377ba2017-08-04 00:07:45 +01001872 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
kiki good10377ba2017-08-04 00:07:45 +01001873 unsigned int start;
Florian Fainelli30defeb2017-03-23 10:36:46 -07001874
kiki good10377ba2017-08-04 00:07:45 +01001875 netdev_stats_to_stats64(stats, &dev->stats);
1876
Florian Fainelli8ecb1a22017-09-18 16:31:30 -07001877 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1878 &stats->tx_packets);
kiki good10377ba2017-08-04 00:07:45 +01001879
1880 do {
1881 start = u64_stats_fetch_begin_irq(&priv->syncp);
1882 stats->rx_packets = stats64->rx_packets;
1883 stats->rx_bytes = stats64->rx_bytes;
1884 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
Florian Fainelli30defeb2017-03-23 10:36:46 -07001885}
1886
Florian Fainellib02e6d92014-07-01 21:08:37 -07001887static void bcm_sysport_netif_start(struct net_device *dev)
1888{
1889 struct bcm_sysport_priv *priv = netdev_priv(dev);
1890
1891 /* Enable NAPI */
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07001892 bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1893 bcm_sysport_init_rx_coalesce(priv);
Florian Fainellib02e6d92014-07-01 21:08:37 -07001894 napi_enable(&priv->napi);
1895
Florian Fainelli8edf0042014-10-28 11:12:00 -07001896 /* Enable RX interrupt and TX ring full interrupt */
1897 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1898
Philippe Reynes715a0222016-06-19 20:39:08 +02001899 phy_start(dev->phydev);
Florian Fainellib02e6d92014-07-01 21:08:37 -07001900
Florian Fainelli44a45242017-01-20 11:08:27 -08001901 /* Enable TX interrupts for the TXQs */
1902 if (!priv->is_lite)
1903 intrl2_1_mask_clear(priv, 0xffffffff);
1904 else
1905 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
Florian Fainellib02e6d92014-07-01 21:08:37 -07001906}
1907
Florian Fainelli40755a02014-07-01 21:08:38 -07001908static void rbuf_init(struct bcm_sysport_priv *priv)
1909{
1910 u32 reg;
1911
1912 reg = rbuf_readl(priv, RBUF_CONTROL);
1913 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
Florian Fainelli44a45242017-01-20 11:08:27 -08001914 /* Set a correct RSB format on SYSTEMPORT Lite */
Florian Fainelli389a06b2017-08-29 13:35:17 -07001915 if (priv->is_lite)
Florian Fainelli44a45242017-01-20 11:08:27 -08001916 reg &= ~RBUF_RSB_SWAP1;
Florian Fainelli389a06b2017-08-29 13:35:17 -07001917
1918 /* Set a correct RSB format based on host endian */
1919 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
Florian Fainelli44a45242017-01-20 11:08:27 -08001920 reg |= RBUF_RSB_SWAP0;
Florian Fainelli389a06b2017-08-29 13:35:17 -07001921 else
1922 reg &= ~RBUF_RSB_SWAP0;
Florian Fainelli40755a02014-07-01 21:08:38 -07001923 rbuf_writel(priv, reg, RBUF_CONTROL);
1924}
1925
Florian Fainelli44a45242017-01-20 11:08:27 -08001926static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1927{
1928 intrl2_0_mask_set(priv, 0xffffffff);
1929 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1930 if (!priv->is_lite) {
1931 intrl2_1_mask_set(priv, 0xffffffff);
1932 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1933 }
1934}
1935
1936static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1937{
Florian Fainelli93824c82017-11-02 16:08:40 -07001938 u32 reg;
Florian Fainelli44a45242017-01-20 11:08:27 -08001939
Florian Fainelli93824c82017-11-02 16:08:40 -07001940 reg = gib_readl(priv, GIB_CONTROL);
1941 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
Florian Fainelli44a45242017-01-20 11:08:27 -08001942 if (netdev_uses_dsa(priv->netdev)) {
Florian Fainelli44a45242017-01-20 11:08:27 -08001943 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1944 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
Florian Fainelli44a45242017-01-20 11:08:27 -08001945 }
Florian Fainelli93824c82017-11-02 16:08:40 -07001946 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1947 reg |= 12 << GIB_IPG_LEN_SHIFT;
1948 gib_writel(priv, reg, GIB_CONTROL);
Florian Fainelli44a45242017-01-20 11:08:27 -08001949}
1950
Florian Fainelli80105be2014-04-24 18:08:57 -07001951static int bcm_sysport_open(struct net_device *dev)
1952{
1953 struct bcm_sysport_priv *priv = netdev_priv(dev);
Philippe Reynes715a0222016-06-19 20:39:08 +02001954 struct phy_device *phydev;
Florian Fainelli80105be2014-04-24 18:08:57 -07001955 unsigned int i;
Florian Fainelli80105be2014-04-24 18:08:57 -07001956 int ret;
1957
Florian Fainelli31bc72d2020-09-01 14:43:47 -07001958 clk_prepare_enable(priv->clk);
1959
Florian Fainelli80105be2014-04-24 18:08:57 -07001960 /* Reset UniMAC */
Florian Fainelli412bce82014-06-26 10:06:45 -07001961 umac_reset(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07001962
1963 /* Flush TX and RX FIFOs at TOPCTRL level */
1964 topctrl_flush(priv);
1965
1966 /* Disable the UniMAC RX/TX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07001967 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
Florian Fainelli80105be2014-04-24 18:08:57 -07001968
1969 /* Enable RBUF 2bytes alignment and Receive Status Block */
Florian Fainelli40755a02014-07-01 21:08:38 -07001970 rbuf_init(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07001971
1972 /* Set maximum frame length */
Florian Fainelli44a45242017-01-20 11:08:27 -08001973 if (!priv->is_lite)
1974 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1975 else
1976 gib_set_pad_extension(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07001977
Florian Fainelli297357d2018-09-27 15:36:11 -07001978 /* Apply features again in case we changed them while interface was
1979 * down
1980 */
1981 bcm_sysport_set_features(dev, dev->features);
1982
Florian Fainelli80105be2014-04-24 18:08:57 -07001983 /* Set MAC address */
1984 umac_set_hw_addr(priv, dev->dev_addr);
1985
Philippe Reynes715a0222016-06-19 20:39:08 +02001986 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1987 0, priv->phy_interface);
1988 if (!phydev) {
Florian Fainelli80105be2014-04-24 18:08:57 -07001989 netdev_err(dev, "could not attach to PHY\n");
Florian Fainelli31bc72d2020-09-01 14:43:47 -07001990 ret = -ENODEV;
1991 goto out_clk_disable;
Florian Fainelli80105be2014-04-24 18:08:57 -07001992 }
1993
1994 /* Reset house keeping link status */
1995 priv->old_duplex = -1;
1996 priv->old_link = -1;
1997 priv->old_pause = -1;
1998
1999 /* mask all interrupts and request them */
Florian Fainelli44a45242017-01-20 11:08:27 -08002000 bcm_sysport_mask_all_intrs(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07002001
2002 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
2003 if (ret) {
2004 netdev_err(dev, "failed to request RX interrupt\n");
2005 goto out_phy_disconnect;
2006 }
2007
Florian Fainelli44a45242017-01-20 11:08:27 -08002008 if (!priv->is_lite) {
2009 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2010 dev->name, dev);
2011 if (ret) {
2012 netdev_err(dev, "failed to request TX interrupt\n");
2013 goto out_free_irq0;
2014 }
Florian Fainelli80105be2014-04-24 18:08:57 -07002015 }
2016
2017 /* Initialize both hardware and software ring */
Florian Fainelli8b8e6e72021-12-15 12:24:49 -08002018 spin_lock_init(&priv->desc_lock);
Florian Fainelli80105be2014-04-24 18:08:57 -07002019 for (i = 0; i < dev->num_tx_queues; i++) {
2020 ret = bcm_sysport_init_tx_ring(priv, i);
2021 if (ret) {
2022 netdev_err(dev, "failed to initialize TX ring %d\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002023 i);
Florian Fainelli80105be2014-04-24 18:08:57 -07002024 goto out_free_tx_ring;
2025 }
2026 }
2027
2028 /* Initialize linked-list */
2029 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2030
2031 /* Initialize RX ring */
2032 ret = bcm_sysport_init_rx_ring(priv);
2033 if (ret) {
2034 netdev_err(dev, "failed to initialize RX ring\n");
2035 goto out_free_rx_ring;
2036 }
2037
2038 /* Turn on RDMA */
2039 ret = rdma_enable_set(priv, 1);
2040 if (ret)
2041 goto out_free_rx_ring;
2042
Florian Fainelli80105be2014-04-24 18:08:57 -07002043 /* Turn on TDMA */
2044 ret = tdma_enable_set(priv, 1);
2045 if (ret)
2046 goto out_clear_rx_int;
2047
Florian Fainelli80105be2014-04-24 18:08:57 -07002048 /* Turn on UniMAC TX/RX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07002049 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
Florian Fainelli80105be2014-04-24 18:08:57 -07002050
Florian Fainellib02e6d92014-07-01 21:08:37 -07002051 bcm_sysport_netif_start(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002052
Florian Fainelli7cb6a2a2018-11-01 15:55:38 -07002053 netif_tx_start_all_queues(dev);
2054
Florian Fainelli80105be2014-04-24 18:08:57 -07002055 return 0;
2056
2057out_clear_rx_int:
2058 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2059out_free_rx_ring:
2060 bcm_sysport_fini_rx_ring(priv);
2061out_free_tx_ring:
2062 for (i = 0; i < dev->num_tx_queues; i++)
2063 bcm_sysport_fini_tx_ring(priv, i);
Florian Fainelli44a45242017-01-20 11:08:27 -08002064 if (!priv->is_lite)
2065 free_irq(priv->irq1, dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002066out_free_irq0:
2067 free_irq(priv->irq0, dev);
2068out_phy_disconnect:
Philippe Reynes715a0222016-06-19 20:39:08 +02002069 phy_disconnect(phydev);
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002070out_clk_disable:
2071 clk_disable_unprepare(priv->clk);
Florian Fainelli80105be2014-04-24 18:08:57 -07002072 return ret;
2073}
2074
Florian Fainellib02e6d92014-07-01 21:08:37 -07002075static void bcm_sysport_netif_stop(struct net_device *dev)
Florian Fainelli80105be2014-04-24 18:08:57 -07002076{
2077 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002078
2079 /* stop all software from updating hardware */
Florian Fainelli7cb6a2a2018-11-01 15:55:38 -07002080 netif_tx_disable(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002081 napi_disable(&priv->napi);
Florian Fainellib6e0e872018-03-22 18:19:32 -07002082 cancel_work_sync(&priv->dim.dim.work);
Philippe Reynes715a0222016-06-19 20:39:08 +02002083 phy_stop(dev->phydev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002084
2085 /* mask all interrupts */
Florian Fainelli44a45242017-01-20 11:08:27 -08002086 bcm_sysport_mask_all_intrs(priv);
Florian Fainellib02e6d92014-07-01 21:08:37 -07002087}
2088
2089static int bcm_sysport_stop(struct net_device *dev)
2090{
2091 struct bcm_sysport_priv *priv = netdev_priv(dev);
2092 unsigned int i;
2093 int ret;
2094
2095 bcm_sysport_netif_stop(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002096
2097 /* Disable UniMAC RX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07002098 umac_enable_set(priv, CMD_RX_EN, 0);
Florian Fainelli80105be2014-04-24 18:08:57 -07002099
2100 ret = tdma_enable_set(priv, 0);
2101 if (ret) {
2102 netdev_err(dev, "timeout disabling RDMA\n");
2103 return ret;
2104 }
2105
2106 /* Wait for a maximum packet size to be drained */
2107 usleep_range(2000, 3000);
2108
2109 ret = rdma_enable_set(priv, 0);
2110 if (ret) {
2111 netdev_err(dev, "timeout disabling TDMA\n");
2112 return ret;
2113 }
2114
2115 /* Disable UniMAC TX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07002116 umac_enable_set(priv, CMD_TX_EN, 0);
Florian Fainelli80105be2014-04-24 18:08:57 -07002117
2118 /* Free RX/TX rings SW structures */
2119 for (i = 0; i < dev->num_tx_queues; i++)
2120 bcm_sysport_fini_tx_ring(priv, i);
2121 bcm_sysport_fini_rx_ring(priv);
2122
2123 free_irq(priv->irq0, dev);
Florian Fainelli44a45242017-01-20 11:08:27 -08002124 if (!priv->is_lite)
2125 free_irq(priv->irq1, dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002126
2127 /* Disconnect from PHY */
Philippe Reynes715a0222016-06-19 20:39:08 +02002128 phy_disconnect(dev->phydev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002129
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002130 clk_disable_unprepare(priv->clk);
2131
Florian Fainelli80105be2014-04-24 18:08:57 -07002132 return 0;
2133}
2134
Florian Fainellibb9051a22018-08-07 10:50:23 -07002135static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2136 u64 location)
2137{
2138 unsigned int index;
2139 u32 reg;
2140
2141 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2142 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2143 reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2144 reg &= RXCHK_BRCM_TAG_CID_MASK;
2145 if (reg == location)
2146 return index;
2147 }
2148
2149 return -EINVAL;
2150}
2151
2152static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2153 struct ethtool_rxnfc *nfc)
2154{
2155 int index;
2156
2157 /* This is not a rule that we know about */
2158 index = bcm_sysport_rule_find(priv, nfc->fs.location);
2159 if (index < 0)
2160 return -EOPNOTSUPP;
2161
2162 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2163
2164 return 0;
2165}
2166
2167static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2168 struct ethtool_rxnfc *nfc)
2169{
2170 unsigned int index;
2171 u32 reg;
2172
2173 /* We cannot match locations greater than what the classification ID
2174 * permits (256 entries)
2175 */
2176 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2177 return -E2BIG;
2178
2179 /* We cannot support flows that are not destined for a wake-up */
2180 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2181 return -EOPNOTSUPP;
2182
2183 /* All filters are already in use, we cannot match more rules */
2184 if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
2185 RXCHK_BRCM_TAG_MAX)
2186 return -ENOSPC;
2187
2188 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
Colin Ian Kingc0368592020-03-12 15:04:30 +00002189 if (index >= RXCHK_BRCM_TAG_MAX)
Florian Fainellibb9051a22018-08-07 10:50:23 -07002190 return -ENOSPC;
2191
2192 /* Location is the classification ID, and index is the position
2193 * within one of our 8 possible filters to be programmed
2194 */
2195 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2196 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2197 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2198 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2199 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2200
Florian Fainelli80f8dea2018-11-06 12:58:41 -08002201 priv->filters_loc[index] = nfc->fs.location;
Florian Fainellibb9051a22018-08-07 10:50:23 -07002202 set_bit(index, priv->filters);
2203
2204 return 0;
2205}
2206
2207static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2208 u64 location)
2209{
2210 int index;
2211
2212 /* This is not a rule that we know about */
2213 index = bcm_sysport_rule_find(priv, location);
2214 if (index < 0)
2215 return -EOPNOTSUPP;
2216
2217 /* No need to disable this filter if it was enabled, this will
2218 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2219 */
2220 clear_bit(index, priv->filters);
Florian Fainelli80f8dea2018-11-06 12:58:41 -08002221 priv->filters_loc[index] = 0;
Florian Fainellibb9051a22018-08-07 10:50:23 -07002222
2223 return 0;
2224}
2225
2226static int bcm_sysport_get_rxnfc(struct net_device *dev,
2227 struct ethtool_rxnfc *nfc, u32 *rule_locs)
2228{
2229 struct bcm_sysport_priv *priv = netdev_priv(dev);
2230 int ret = -EOPNOTSUPP;
2231
2232 switch (nfc->cmd) {
2233 case ETHTOOL_GRXCLSRULE:
2234 ret = bcm_sysport_rule_get(priv, nfc);
2235 break;
2236 default:
2237 break;
2238 }
2239
2240 return ret;
2241}
2242
2243static int bcm_sysport_set_rxnfc(struct net_device *dev,
2244 struct ethtool_rxnfc *nfc)
2245{
2246 struct bcm_sysport_priv *priv = netdev_priv(dev);
2247 int ret = -EOPNOTSUPP;
2248
2249 switch (nfc->cmd) {
2250 case ETHTOOL_SRXCLSRLINS:
2251 ret = bcm_sysport_rule_set(priv, nfc);
2252 break;
2253 case ETHTOOL_SRXCLSRLDEL:
2254 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2255 break;
2256 default:
2257 break;
2258 }
2259
2260 return ret;
2261}
2262
Julia Lawallc1ab0e92016-08-31 09:30:48 +02002263static const struct ethtool_ops bcm_sysport_ethtool_ops = {
Jakub Kicinskif4a76615f2020-03-09 19:15:00 -07002264 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2265 ETHTOOL_COALESCE_MAX_FRAMES |
2266 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
Florian Fainelli80105be2014-04-24 18:08:57 -07002267 .get_drvinfo = bcm_sysport_get_drvinfo,
2268 .get_msglevel = bcm_sysport_get_msglvl,
2269 .set_msglevel = bcm_sysport_set_msglvl,
2270 .get_link = ethtool_op_get_link,
2271 .get_strings = bcm_sysport_get_strings,
2272 .get_ethtool_stats = bcm_sysport_get_stats,
2273 .get_sset_count = bcm_sysport_get_sset_count,
Florian Fainelli83e82f42014-07-01 21:08:40 -07002274 .get_wol = bcm_sysport_get_wol,
2275 .set_wol = bcm_sysport_set_wol,
Florian Fainellib1a15e82015-05-11 15:12:41 -07002276 .get_coalesce = bcm_sysport_get_coalesce,
2277 .set_coalesce = bcm_sysport_set_coalesce,
Philippe Reynes697666e2016-06-19 20:39:09 +02002278 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2279 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Florian Fainellibb9051a22018-08-07 10:50:23 -07002280 .get_rxnfc = bcm_sysport_get_rxnfc,
2281 .set_rxnfc = bcm_sysport_set_rxnfc,
Florian Fainelli80105be2014-04-24 18:08:57 -07002282};
2283
Florian Fainellid1565762017-10-11 10:57:50 -07002284static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +01002285 struct net_device *sb_dev)
Florian Fainellid1565762017-10-11 10:57:50 -07002286{
2287 struct bcm_sysport_priv *priv = netdev_priv(dev);
2288 u16 queue = skb_get_queue_mapping(skb);
2289 struct bcm_sysport_tx_ring *tx_ring;
2290 unsigned int q, port;
2291
2292 if (!netdev_uses_dsa(dev))
Paolo Abenia350ecc2019-03-20 11:02:06 +01002293 return netdev_pick_tx(dev, skb, NULL);
Florian Fainellid1565762017-10-11 10:57:50 -07002294
2295 /* DSA tagging layer will have configured the correct queue */
2296 q = BRCM_TAG_GET_QUEUE(queue);
2297 port = BRCM_TAG_GET_PORT(queue);
2298 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2299
Florian Fainellie83b1712017-10-20 15:59:30 -07002300 if (unlikely(!tx_ring))
Paolo Abenia350ecc2019-03-20 11:02:06 +01002301 return netdev_pick_tx(dev, skb, NULL);
Florian Fainellie83b1712017-10-20 15:59:30 -07002302
Florian Fainellid1565762017-10-11 10:57:50 -07002303 return tx_ring->index;
2304}
2305
Florian Fainellic0c21452017-10-25 18:01:05 -07002306static const struct net_device_ops bcm_sysport_netdev_ops = {
2307 .ndo_start_xmit = bcm_sysport_xmit,
2308 .ndo_tx_timeout = bcm_sysport_tx_timeout,
2309 .ndo_open = bcm_sysport_open,
2310 .ndo_stop = bcm_sysport_stop,
2311 .ndo_set_features = bcm_sysport_set_features,
2312 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
2313 .ndo_set_mac_address = bcm_sysport_change_mac,
2314#ifdef CONFIG_NET_POLL_CONTROLLER
2315 .ndo_poll_controller = bcm_sysport_poll_controller,
2316#endif
2317 .ndo_get_stats64 = bcm_sysport_get_stats64,
2318 .ndo_select_queue = bcm_sysport_select_queue,
2319};
2320
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002321static int bcm_sysport_map_queues(struct net_device *dev,
2322 struct net_device *slave_dev)
Florian Fainellid1565762017-10-11 10:57:50 -07002323{
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002324 struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2325 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainellid1565762017-10-11 10:57:50 -07002326 struct bcm_sysport_tx_ring *ring;
Florian Fainellid1565762017-10-11 10:57:50 -07002327 unsigned int num_tx_queues;
Florian Fainelli25c44072018-11-06 15:15:17 -08002328 unsigned int q, qp, port;
Florian Fainellid1565762017-10-11 10:57:50 -07002329
2330 /* We can't be setting up queue inspection for non directly attached
2331 * switches
2332 */
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002333 if (dp->ds->index)
Florian Fainellid1565762017-10-11 10:57:50 -07002334 return 0;
2335
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002336 port = dp->index;
Florian Fainellid1565762017-10-11 10:57:50 -07002337
2338 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2339 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2340 * per-port (slave_dev) network devices queue, we achieve just that.
2341 * This need to happen now before any slave network device is used such
2342 * it accurately reflects the number of real TX queues.
2343 */
2344 if (priv->is_lite)
2345 netif_set_real_num_tx_queues(slave_dev,
2346 slave_dev->num_tx_queues / 2);
Florian Fainelli1f3ccc3c2018-04-25 16:21:51 -07002347
Florian Fainellid1565762017-10-11 10:57:50 -07002348 num_tx_queues = slave_dev->real_num_tx_queues;
2349
2350 if (priv->per_port_num_tx_queues &&
2351 priv->per_port_num_tx_queues != num_tx_queues)
Colin Ian King14b7dc12018-04-27 20:09:25 +01002352 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
Florian Fainellid1565762017-10-11 10:57:50 -07002353
2354 priv->per_port_num_tx_queues = num_tx_queues;
2355
Florian Fainelli25c44072018-11-06 15:15:17 -08002356 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2357 q++) {
2358 ring = &priv->tx_rings[q];
2359
2360 if (ring->inspect)
2361 continue;
Florian Fainellid1565762017-10-11 10:57:50 -07002362
2363 /* Just remember the mapping actual programming done
2364 * during bcm_sysport_init_tx_ring
2365 */
Florian Fainelli25c44072018-11-06 15:15:17 -08002366 ring->switch_queue = qp;
Florian Fainellid1565762017-10-11 10:57:50 -07002367 ring->switch_port = port;
Florian Fainelli3ded76a2017-11-01 11:29:47 -07002368 ring->inspect = true;
Florian Fainelli5a9ef192020-01-16 13:08:58 -08002369 priv->ring_map[qp + port * num_tx_queues] = ring;
Florian Fainelli25c44072018-11-06 15:15:17 -08002370 qp++;
Florian Fainellid1565762017-10-11 10:57:50 -07002371 }
2372
2373 return 0;
2374}
2375
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002376static int bcm_sysport_unmap_queues(struct net_device *dev,
2377 struct net_device *slave_dev)
Florian Fainellida106a12018-11-06 15:15:18 -08002378{
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002379 struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2380 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainellida106a12018-11-06 15:15:18 -08002381 struct bcm_sysport_tx_ring *ring;
Florian Fainellida106a12018-11-06 15:15:18 -08002382 unsigned int num_tx_queues;
Florian Fainelli5a9ef192020-01-16 13:08:58 -08002383 unsigned int q, qp, port;
Florian Fainellida106a12018-11-06 15:15:18 -08002384
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002385 port = dp->index;
Florian Fainellida106a12018-11-06 15:15:18 -08002386
2387 num_tx_queues = slave_dev->real_num_tx_queues;
2388
2389 for (q = 0; q < dev->num_tx_queues; q++) {
2390 ring = &priv->tx_rings[q];
2391
2392 if (ring->switch_port != port)
2393 continue;
2394
2395 if (!ring->inspect)
2396 continue;
2397
2398 ring->inspect = false;
Florian Fainelli5a9ef192020-01-16 13:08:58 -08002399 qp = ring->switch_queue;
2400 priv->ring_map[qp + port * num_tx_queues] = NULL;
Florian Fainellida106a12018-11-06 15:15:18 -08002401 }
2402
2403 return 0;
2404}
2405
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002406static int bcm_sysport_netdevice_event(struct notifier_block *nb,
2407 unsigned long event, void *ptr)
Florian Fainellid1565762017-10-11 10:57:50 -07002408{
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002409 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2410 struct netdev_notifier_changeupper_info *info = ptr;
2411 struct bcm_sysport_priv *priv;
2412 int ret = 0;
2413
2414 priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
2415 if (priv->netdev != dev)
2416 return NOTIFY_DONE;
Florian Fainellid1565762017-10-11 10:57:50 -07002417
Florian Fainellida106a12018-11-06 15:15:18 -08002418 switch (event) {
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002419 case NETDEV_CHANGEUPPER:
2420 if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2421 return NOTIFY_DONE;
2422
2423 if (!dsa_slave_dev_check(info->upper_dev))
2424 return NOTIFY_DONE;
2425
2426 if (info->linking)
2427 ret = bcm_sysport_map_queues(dev, info->upper_dev);
2428 else
2429 ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
Florian Fainellida106a12018-11-06 15:15:18 -08002430 break;
2431 }
Florian Fainellid1565762017-10-11 10:57:50 -07002432
Florian Fainellida106a12018-11-06 15:15:18 -08002433 return notifier_from_errno(ret);
Florian Fainellid1565762017-10-11 10:57:50 -07002434}
2435
Florian Fainelli80105be2014-04-24 18:08:57 -07002436#define REV_FMT "v%2x.%02x"
2437
Florian Fainelli44a45242017-01-20 11:08:27 -08002438static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2439 [SYSTEMPORT] = {
2440 .is_lite = false,
2441 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2442 },
2443 [SYSTEMPORT_LITE] = {
2444 .is_lite = true,
2445 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2446 },
2447};
2448
2449static const struct of_device_id bcm_sysport_of_match[] = {
2450 { .compatible = "brcm,systemportlite-v1.00",
2451 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2452 { .compatible = "brcm,systemport-v1.00",
2453 .data = &bcm_sysport_params[SYSTEMPORT] },
2454 { .compatible = "brcm,systemport",
2455 .data = &bcm_sysport_params[SYSTEMPORT] },
2456 { /* sentinel */ }
2457};
2458MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2459
Florian Fainelli80105be2014-04-24 18:08:57 -07002460static int bcm_sysport_probe(struct platform_device *pdev)
2461{
Florian Fainelli44a45242017-01-20 11:08:27 -08002462 const struct bcm_sysport_hw_params *params;
2463 const struct of_device_id *of_id = NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -07002464 struct bcm_sysport_priv *priv;
2465 struct device_node *dn;
2466 struct net_device *dev;
Florian Fainelli80105be2014-04-24 18:08:57 -07002467 u32 txq, rxq;
2468 int ret;
2469
2470 dn = pdev->dev.of_node;
Florian Fainelli44a45242017-01-20 11:08:27 -08002471 of_id = of_match_node(bcm_sysport_of_match, dn);
2472 if (!of_id || !of_id->data)
2473 return -EINVAL;
2474
Florian Fainellid63b5422019-12-17 16:29:50 -08002475 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2476 if (ret)
2477 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2478 if (ret) {
2479 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
2480 return ret;
2481 }
2482
Florian Fainelli44a45242017-01-20 11:08:27 -08002483 /* Fairly quickly we need to know the type of adapter we have */
2484 params = of_id->data;
Florian Fainelli80105be2014-04-24 18:08:57 -07002485
2486 /* Read the Transmit/Receive Queue properties */
2487 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2488 txq = TDMA_NUM_RINGS;
2489 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2490 rxq = 1;
2491
Florian Fainelli7b78be42017-01-20 11:08:26 -08002492 /* Sanity check the number of transmit queues */
2493 if (!txq || txq > TDMA_NUM_RINGS)
2494 return -EINVAL;
2495
Florian Fainelli80105be2014-04-24 18:08:57 -07002496 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2497 if (!dev)
2498 return -ENOMEM;
2499
2500 /* Initialize private members */
2501 priv = netdev_priv(dev);
2502
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002503 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
Pan Bian0c630a62021-01-19 20:44:23 -08002504 if (IS_ERR(priv->clk)) {
2505 ret = PTR_ERR(priv->clk);
2506 goto err_free_netdev;
2507 }
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002508
Florian Fainelli7b78be42017-01-20 11:08:26 -08002509 /* Allocate number of TX rings */
2510 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2511 sizeof(struct bcm_sysport_tx_ring),
2512 GFP_KERNEL);
Dinghao Liu7ef1fc52020-08-24 13:58:31 +08002513 if (!priv->tx_rings) {
2514 ret = -ENOMEM;
2515 goto err_free_netdev;
2516 }
Florian Fainelli7b78be42017-01-20 11:08:26 -08002517
Florian Fainelli44a45242017-01-20 11:08:27 -08002518 priv->is_lite = params->is_lite;
2519 priv->num_rx_desc_words = params->num_rx_desc_words;
2520
Florian Fainelli80105be2014-04-24 18:08:57 -07002521 priv->irq0 = platform_get_irq(pdev, 0);
Florian Fainellid31353c2017-06-01 18:02:39 -07002522 if (!priv->is_lite) {
Florian Fainelli44a45242017-01-20 11:08:27 -08002523 priv->irq1 = platform_get_irq(pdev, 1);
Florian Fainellid31353c2017-06-01 18:02:39 -07002524 priv->wol_irq = platform_get_irq(pdev, 2);
2525 } else {
2526 priv->wol_irq = platform_get_irq(pdev, 1);
2527 }
Florian Fainelli44a45242017-01-20 11:08:27 -08002528 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
Florian Fainelli80105be2014-04-24 18:08:57 -07002529 ret = -EINVAL;
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002530 goto err_free_netdev;
Florian Fainelli80105be2014-04-24 18:08:57 -07002531 }
2532
YueHaibing913919e2019-08-21 21:46:13 +08002533 priv->base = devm_platform_ioremap_resource(pdev, 0);
Jingoo Han126e6122014-05-14 12:15:42 +09002534 if (IS_ERR(priv->base)) {
2535 ret = PTR_ERR(priv->base);
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002536 goto err_free_netdev;
Florian Fainelli80105be2014-04-24 18:08:57 -07002537 }
2538
2539 priv->netdev = dev;
2540 priv->pdev = pdev;
2541
Andrew Lunn0c65b2b2019-11-04 02:40:33 +01002542 ret = of_get_phy_mode(dn, &priv->phy_interface);
Florian Fainelli80105be2014-04-24 18:08:57 -07002543 /* Default to GMII interface mode */
Andrew Lunn0c65b2b2019-11-04 02:40:33 +01002544 if (ret)
Florian Fainelli80105be2014-04-24 18:08:57 -07002545 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2546
Florian Fainelli186534a2014-05-22 09:47:46 -07002547 /* In the case of a fixed PHY, the DT node associated
2548 * to the PHY is the Ethernet MAC DT node.
2549 */
2550 if (of_phy_is_fixed_link(dn)) {
2551 ret = of_phy_register_fixed_link(dn);
2552 if (ret) {
2553 dev_err(&pdev->dev, "failed to register fixed PHY\n");
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002554 goto err_free_netdev;
Florian Fainelli186534a2014-05-22 09:47:46 -07002555 }
2556
2557 priv->phy_dn = dn;
2558 }
2559
Florian Fainelli80105be2014-04-24 18:08:57 -07002560 /* Initialize netdevice members */
Jakub Kicinski9ca01b22021-10-06 18:06:56 -07002561 ret = of_get_ethdev_address(dn, dev);
Michael Walle83216e32021-04-12 19:47:17 +02002562 if (ret) {
Florian Fainelli80105be2014-04-24 18:08:57 -07002563 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
Vaishali Thakkaradb35052015-07-08 10:49:30 +05302564 eth_hw_addr_random(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002565 }
2566
2567 SET_NETDEV_DEV(dev, &pdev->dev);
2568 dev_set_drvdata(&pdev->dev, dev);
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002569 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
Florian Fainelli80105be2014-04-24 18:08:57 -07002570 dev->netdev_ops = &bcm_sysport_netdev_ops;
2571 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2572
Florian Fainellib5061772018-09-27 15:36:12 -07002573 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
Florian Fainelli6e9fdb62020-07-06 14:29:39 -07002574 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2575 NETIF_F_HW_VLAN_CTAG_TX;
Florian Fainellib5061772018-09-27 15:36:12 -07002576 dev->hw_features |= dev->features;
2577 dev->vlan_features |= dev->features;
Florian Fainelli54ddbdb2020-12-18 09:38:43 -08002578 dev->max_mtu = UMAC_MAX_MTU_SIZE;
Florian Fainelli80105be2014-04-24 18:08:57 -07002579
Florian Fainelli83e82f42014-07-01 21:08:40 -07002580 /* Request the WOL interrupt and advertise suspend if available */
2581 priv->wol_irq_disabled = 1;
2582 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002583 bcm_sysport_wol_isr, 0, dev->name, priv);
Florian Fainelli83e82f42014-07-01 21:08:40 -07002584 if (!ret)
2585 device_set_wakeup_capable(&pdev->dev, 1);
2586
Florian Fainelli6328a122020-09-01 14:43:48 -07002587 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2588 if (IS_ERR(priv->wol_clk))
2589 return PTR_ERR(priv->wol_clk);
2590
Florian Fainelli80105be2014-04-24 18:08:57 -07002591 /* Set the needed headroom once and for all */
Paul Gortmaker3afc5572014-05-30 15:39:30 -04002592 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2593 dev->needed_headroom += sizeof(struct bcm_tsb);
Florian Fainelli80105be2014-04-24 18:08:57 -07002594
Florian Fainellif532e742014-06-05 10:22:18 -07002595 /* libphy will adjust the link state accordingly */
2596 netif_carrier_off(dev);
2597
Florian Fainellia8cdfbdf2018-03-28 15:15:37 -07002598 priv->rx_max_coalesced_frames = 1;
kiki good10377ba2017-08-04 00:07:45 +01002599 u64_stats_init(&priv->syncp);
2600
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002601 priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
Florian Fainellid1565762017-10-11 10:57:50 -07002602
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002603 ret = register_netdevice_notifier(&priv->netdev_notifier);
Florian Fainellid1565762017-10-11 10:57:50 -07002604 if (ret) {
2605 dev_err(&pdev->dev, "failed to register DSA notifier\n");
2606 goto err_deregister_fixed_link;
2607 }
2608
Florian Fainelli80105be2014-04-24 18:08:57 -07002609 ret = register_netdev(dev);
2610 if (ret) {
2611 dev_err(&pdev->dev, "failed to register net_device\n");
Florian Fainellid1565762017-10-11 10:57:50 -07002612 goto err_deregister_notifier;
Florian Fainelli80105be2014-04-24 18:08:57 -07002613 }
2614
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002615 clk_prepare_enable(priv->clk);
2616
Florian Fainelli80105be2014-04-24 18:08:57 -07002617 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2618 dev_info(&pdev->dev,
Florian Fainelli62be7572019-03-20 09:45:17 -07002619 "Broadcom SYSTEMPORT%s " REV_FMT
2620 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
Florian Fainelli44a45242017-01-20 11:08:27 -08002621 priv->is_lite ? " Lite" : "",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002622 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
Florian Fainelli62be7572019-03-20 09:45:17 -07002623 priv->irq0, priv->irq1, txq, rxq);
Florian Fainelli80105be2014-04-24 18:08:57 -07002624
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002625 clk_disable_unprepare(priv->clk);
2626
Florian Fainelli80105be2014-04-24 18:08:57 -07002627 return 0;
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002628
Florian Fainellid1565762017-10-11 10:57:50 -07002629err_deregister_notifier:
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002630 unregister_netdevice_notifier(&priv->netdev_notifier);
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002631err_deregister_fixed_link:
2632 if (of_phy_is_fixed_link(dn))
2633 of_phy_deregister_fixed_link(dn);
2634err_free_netdev:
Florian Fainelli80105be2014-04-24 18:08:57 -07002635 free_netdev(dev);
2636 return ret;
2637}
2638
2639static int bcm_sysport_remove(struct platform_device *pdev)
2640{
2641 struct net_device *dev = dev_get_drvdata(&pdev->dev);
Florian Fainellid1565762017-10-11 10:57:50 -07002642 struct bcm_sysport_priv *priv = netdev_priv(dev);
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002643 struct device_node *dn = pdev->dev.of_node;
Florian Fainelli80105be2014-04-24 18:08:57 -07002644
2645 /* Not much to do, ndo_close has been called
2646 * and we use managed allocations
2647 */
Vladimir Oltean1593cd42021-01-07 03:24:02 +02002648 unregister_netdevice_notifier(&priv->netdev_notifier);
Florian Fainelli80105be2014-04-24 18:08:57 -07002649 unregister_netdev(dev);
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002650 if (of_phy_is_fixed_link(dn))
2651 of_phy_deregister_fixed_link(dn);
Florian Fainelli80105be2014-04-24 18:08:57 -07002652 free_netdev(dev);
2653 dev_set_drvdata(&pdev->dev, NULL);
2654
2655 return 0;
2656}
2657
Florian Fainelli83e82f42014-07-01 21:08:40 -07002658static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2659{
2660 struct net_device *ndev = priv->netdev;
2661 unsigned int timeout = 1000;
Florian Fainellibb9051a22018-08-07 10:50:23 -07002662 unsigned int index, i = 0;
Florian Fainelli83e82f42014-07-01 21:08:40 -07002663 u32 reg;
2664
Florian Fainelli83e82f42014-07-01 21:08:40 -07002665 reg = umac_readl(priv, UMAC_MPD_CTRL);
Florian Fainellibb9051a22018-08-07 10:50:23 -07002666 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2667 reg |= MPD_EN;
Florian Fainelli83e82f42014-07-01 21:08:40 -07002668 reg &= ~PSW_EN;
Florian Fainelli8dfb8d22019-02-01 13:23:38 -08002669 if (priv->wolopts & WAKE_MAGICSECURE) {
2670 /* Program the SecureOn password */
2671 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2672 UMAC_PSW_MS);
2673 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2674 UMAC_PSW_LS);
Florian Fainelli83e82f42014-07-01 21:08:40 -07002675 reg |= PSW_EN;
Florian Fainelli8dfb8d22019-02-01 13:23:38 -08002676 }
Florian Fainelli83e82f42014-07-01 21:08:40 -07002677 umac_writel(priv, reg, UMAC_MPD_CTRL);
2678
Florian Fainellibb9051a22018-08-07 10:50:23 -07002679 if (priv->wolopts & WAKE_FILTER) {
2680 /* Turn on ACPI matching to steal packets from RBUF */
2681 reg = rbuf_readl(priv, RBUF_CONTROL);
2682 if (priv->is_lite)
2683 reg |= RBUF_ACPI_EN_LITE;
2684 else
2685 reg |= RBUF_ACPI_EN;
2686 rbuf_writel(priv, reg, RBUF_CONTROL);
2687
2688 /* Enable RXCHK, active filters and Broadcom tag matching */
2689 reg = rxchk_readl(priv, RXCHK_CONTROL);
2690 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2691 RXCHK_BRCM_TAG_MATCH_SHIFT);
2692 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2693 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2694 i++;
2695 }
2696 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2697 rxchk_writel(priv, reg, RXCHK_CONTROL);
2698 }
2699
Florian Fainelli83e82f42014-07-01 21:08:40 -07002700 /* Make sure RBUF entered WoL mode as result */
2701 do {
2702 reg = rbuf_readl(priv, RBUF_STATUS);
2703 if (reg & RBUF_WOL_MODE)
2704 break;
2705
2706 udelay(10);
2707 } while (timeout-- > 0);
2708
2709 /* Do not leave the UniMAC RBUF matching only MPD packets */
2710 if (!timeout) {
Florian Fainelli542261162018-08-03 11:08:44 -07002711 mpd_enable_set(priv, false);
Florian Fainelli83e82f42014-07-01 21:08:40 -07002712 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2713 return -ETIMEDOUT;
2714 }
2715
2716 /* UniMAC receive needs to be turned on */
2717 umac_enable_set(priv, CMD_RX_EN, 1);
2718
Florian Fainelli83e82f42014-07-01 21:08:40 -07002719 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2720
2721 return 0;
2722}
2723
Arnd Bergmanncf876152018-08-14 00:10:34 +02002724static int __maybe_unused bcm_sysport_suspend(struct device *d)
Florian Fainelli40755a02014-07-01 21:08:38 -07002725{
2726 struct net_device *dev = dev_get_drvdata(d);
2727 struct bcm_sysport_priv *priv = netdev_priv(dev);
2728 unsigned int i;
Florian Fainelli83e82f42014-07-01 21:08:40 -07002729 int ret = 0;
Florian Fainelli40755a02014-07-01 21:08:38 -07002730 u32 reg;
2731
2732 if (!netif_running(dev))
2733 return 0;
2734
Florian Fainelli7cb6a2a2018-11-01 15:55:38 -07002735 netif_device_detach(dev);
2736
Florian Fainelli40755a02014-07-01 21:08:38 -07002737 bcm_sysport_netif_stop(dev);
2738
Philippe Reynes715a0222016-06-19 20:39:08 +02002739 phy_suspend(dev->phydev);
Florian Fainelli40755a02014-07-01 21:08:38 -07002740
Florian Fainelli40755a02014-07-01 21:08:38 -07002741 /* Disable UniMAC RX */
2742 umac_enable_set(priv, CMD_RX_EN, 0);
2743
2744 ret = rdma_enable_set(priv, 0);
2745 if (ret) {
2746 netdev_err(dev, "RDMA timeout!\n");
2747 return ret;
2748 }
2749
2750 /* Disable RXCHK if enabled */
Florian Fainelli9d34c1cb2014-07-01 21:08:39 -07002751 if (priv->rx_chk_en) {
Florian Fainelli40755a02014-07-01 21:08:38 -07002752 reg = rxchk_readl(priv, RXCHK_CONTROL);
2753 reg &= ~RXCHK_EN;
2754 rxchk_writel(priv, reg, RXCHK_CONTROL);
2755 }
2756
2757 /* Flush RX pipe */
Florian Fainelli83e82f42014-07-01 21:08:40 -07002758 if (!priv->wolopts)
2759 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
Florian Fainelli40755a02014-07-01 21:08:38 -07002760
2761 ret = tdma_enable_set(priv, 0);
2762 if (ret) {
2763 netdev_err(dev, "TDMA timeout!\n");
2764 return ret;
2765 }
2766
2767 /* Wait for a packet boundary */
2768 usleep_range(2000, 3000);
2769
2770 umac_enable_set(priv, CMD_TX_EN, 0);
2771
2772 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2773
2774 /* Free RX/TX rings SW structures */
2775 for (i = 0; i < dev->num_tx_queues; i++)
2776 bcm_sysport_fini_tx_ring(priv, i);
2777 bcm_sysport_fini_rx_ring(priv);
2778
Florian Fainelli83e82f42014-07-01 21:08:40 -07002779 /* Get prepared for Wake-on-LAN */
Florian Fainelli6328a122020-09-01 14:43:48 -07002780 if (device_may_wakeup(d) && priv->wolopts) {
2781 clk_prepare_enable(priv->wol_clk);
Florian Fainelli83e82f42014-07-01 21:08:40 -07002782 ret = bcm_sysport_suspend_to_wol(priv);
Florian Fainelli6328a122020-09-01 14:43:48 -07002783 }
Florian Fainelli83e82f42014-07-01 21:08:40 -07002784
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002785 clk_disable_unprepare(priv->clk);
2786
Florian Fainelli83e82f42014-07-01 21:08:40 -07002787 return ret;
Florian Fainelli40755a02014-07-01 21:08:38 -07002788}
2789
Arnd Bergmanncf876152018-08-14 00:10:34 +02002790static int __maybe_unused bcm_sysport_resume(struct device *d)
Florian Fainelli40755a02014-07-01 21:08:38 -07002791{
2792 struct net_device *dev = dev_get_drvdata(d);
2793 struct bcm_sysport_priv *priv = netdev_priv(dev);
2794 unsigned int i;
Florian Fainelli40755a02014-07-01 21:08:38 -07002795 int ret;
2796
2797 if (!netif_running(dev))
2798 return 0;
2799
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002800 clk_prepare_enable(priv->clk);
Florian Fainelli6328a122020-09-01 14:43:48 -07002801 if (priv->wolopts)
2802 clk_disable_unprepare(priv->wol_clk);
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002803
Florian Fainelli704d33e2014-10-28 11:12:01 -07002804 umac_reset(priv);
2805
Florian Fainelli263a4252020-02-05 12:32:04 -08002806 /* Disable the UniMAC RX/TX */
2807 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2808
Florian Fainelli83e82f42014-07-01 21:08:40 -07002809 /* We may have been suspended and never received a WOL event that
2810 * would turn off MPD detection, take care of that now
2811 */
2812 bcm_sysport_resume_from_wol(priv);
2813
Florian Fainelli40755a02014-07-01 21:08:38 -07002814 /* Initialize both hardware and software ring */
2815 for (i = 0; i < dev->num_tx_queues; i++) {
2816 ret = bcm_sysport_init_tx_ring(priv, i);
2817 if (ret) {
2818 netdev_err(dev, "failed to initialize TX ring %d\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002819 i);
Florian Fainelli40755a02014-07-01 21:08:38 -07002820 goto out_free_tx_rings;
2821 }
2822 }
2823
2824 /* Initialize linked-list */
2825 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2826
2827 /* Initialize RX ring */
2828 ret = bcm_sysport_init_rx_ring(priv);
2829 if (ret) {
2830 netdev_err(dev, "failed to initialize RX ring\n");
2831 goto out_free_rx_ring;
2832 }
2833
Florian Fainelli40755a02014-07-01 21:08:38 -07002834 /* RX pipe enable */
2835 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2836
2837 ret = rdma_enable_set(priv, 1);
2838 if (ret) {
2839 netdev_err(dev, "failed to enable RDMA\n");
2840 goto out_free_rx_ring;
2841 }
2842
Florian Fainelli297357d2018-09-27 15:36:11 -07002843 /* Restore enabled features */
2844 bcm_sysport_set_features(dev, dev->features);
Florian Fainelli40755a02014-07-01 21:08:38 -07002845
2846 rbuf_init(priv);
2847
2848 /* Set maximum frame length */
Florian Fainelli44a45242017-01-20 11:08:27 -08002849 if (!priv->is_lite)
2850 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2851 else
2852 gib_set_pad_extension(priv);
Florian Fainelli40755a02014-07-01 21:08:38 -07002853
2854 /* Set MAC address */
2855 umac_set_hw_addr(priv, dev->dev_addr);
2856
2857 umac_enable_set(priv, CMD_RX_EN, 1);
2858
2859 /* TX pipe enable */
2860 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2861
2862 umac_enable_set(priv, CMD_TX_EN, 1);
2863
2864 ret = tdma_enable_set(priv, 1);
2865 if (ret) {
2866 netdev_err(dev, "TDMA timeout!\n");
2867 goto out_free_rx_ring;
2868 }
2869
Philippe Reynes715a0222016-06-19 20:39:08 +02002870 phy_resume(dev->phydev);
Florian Fainelli40755a02014-07-01 21:08:38 -07002871
2872 bcm_sysport_netif_start(dev);
2873
Florian Fainelli7cb6a2a2018-11-01 15:55:38 -07002874 netif_device_attach(dev);
2875
Florian Fainelli40755a02014-07-01 21:08:38 -07002876 return 0;
2877
2878out_free_rx_ring:
2879 bcm_sysport_fini_rx_ring(priv);
2880out_free_tx_rings:
2881 for (i = 0; i < dev->num_tx_queues; i++)
2882 bcm_sysport_fini_tx_ring(priv, i);
Florian Fainelli31bc72d2020-09-01 14:43:47 -07002883 clk_disable_unprepare(priv->clk);
Florian Fainelli40755a02014-07-01 21:08:38 -07002884 return ret;
2885}
Florian Fainelli40755a02014-07-01 21:08:38 -07002886
2887static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2888 bcm_sysport_suspend, bcm_sysport_resume);
2889
Florian Fainelli80105be2014-04-24 18:08:57 -07002890static struct platform_driver bcm_sysport_driver = {
2891 .probe = bcm_sysport_probe,
2892 .remove = bcm_sysport_remove,
2893 .driver = {
2894 .name = "brcm-systemport",
Florian Fainelli80105be2014-04-24 18:08:57 -07002895 .of_match_table = bcm_sysport_of_match,
Florian Fainelli40755a02014-07-01 21:08:38 -07002896 .pm = &bcm_sysport_pm_ops,
Florian Fainelli80105be2014-04-24 18:08:57 -07002897 },
2898};
2899module_platform_driver(bcm_sysport_driver);
2900
2901MODULE_AUTHOR("Broadcom Corporation");
2902MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2903MODULE_ALIAS("platform:brcm-systemport");
2904MODULE_LICENSE("GPL");