blob: 241325c35cb40445580ea13cf6844d9d06528c39 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Gortmaker3396c782012-01-27 13:36:01 +00002 * drivers/net/ethernet/freescale/gianfar_ethtool.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000012 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -040014 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * by reference.
17 */
18
Joe Perches59deab22011-06-14 08:57:47 +000019#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/string.h>
23#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
David S. Miller65a85a82012-04-06 00:35:34 -040028#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32
33#include <asm/io.h>
34#include <asm/irq.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080035#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/crc32.h>
38#include <asm/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ethtool.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040040#include <linux/mii.h>
41#include <linux/phy.h>
Sebastian Poehn4aa3a712011-06-20 13:57:59 -070042#include <linux/sort.h>
Sebastian Poehn380b1532011-07-07 04:30:29 -070043#include <linux/if_vlan.h>
Yangbo Lu7349a742018-05-25 12:40:36 +080044#include <linux/of_platform.h>
45#include <linux/fsl/ptp_qoriq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include "gianfar.h"
48
Andy Flemingbb40dcb2005-09-23 22:54:21 -040049#define GFAR_MAX_COAL_USECS 0xffff
50#define GFAR_MAX_COAL_FRAMES 0xff
Kumar Gala0bbaf062005-06-20 10:54:21 -050051static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000052 u64 *buf);
Kumar Gala0bbaf062005-06-20 10:54:21 -050053static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000054static int gfar_gcoalesce(struct net_device *dev,
55 struct ethtool_coalesce *cvals);
56static int gfar_scoalesce(struct net_device *dev,
57 struct ethtool_coalesce *cvals);
58static void gfar_gringparam(struct net_device *dev,
59 struct ethtool_ringparam *rvals);
60static int gfar_sringparam(struct net_device *dev,
61 struct ethtool_ringparam *rvals);
62static void gfar_gdrvinfo(struct net_device *dev,
63 struct ethtool_drvinfo *drvinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Paul Gortmaker30f7e312012-01-08 13:21:57 -050065static const char stat_gstrings[][ETH_GSTRING_LEN] = {
Claudiu Manoil76f31e82015-07-13 16:22:03 +030066 /* extra stats */
67 "rx-allocation-errors",
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 "rx-large-frame-errors",
69 "rx-short-frame-errors",
70 "rx-non-octet-errors",
71 "rx-crc-errors",
72 "rx-overrun-errors",
73 "rx-busy-errors",
74 "rx-babbling-errors",
75 "rx-truncated-frames",
76 "ethernet-bus-error",
77 "tx-babbling-errors",
78 "tx-underrun-errors",
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 "tx-timeout-errors",
Claudiu Manoil76f31e82015-07-13 16:22:03 +030080 /* rmon stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 "tx-rx-64-frames",
82 "tx-rx-65-127-frames",
83 "tx-rx-128-255-frames",
84 "tx-rx-256-511-frames",
85 "tx-rx-512-1023-frames",
86 "tx-rx-1024-1518-frames",
87 "tx-rx-1519-1522-good-vlan",
88 "rx-bytes",
89 "rx-packets",
90 "rx-fcs-errors",
91 "receive-multicast-packet",
92 "receive-broadcast-packet",
93 "rx-control-frame-packets",
94 "rx-pause-frame-packets",
95 "rx-unknown-op-code",
96 "rx-alignment-error",
97 "rx-frame-length-error",
98 "rx-code-error",
99 "rx-carrier-sense-error",
100 "rx-undersize-packets",
101 "rx-oversize-packets",
102 "rx-fragmented-frames",
103 "rx-jabber-frames",
104 "rx-dropped-frames",
105 "tx-byte-counter",
106 "tx-packets",
107 "tx-multicast-packets",
108 "tx-broadcast-packets",
109 "tx-pause-control-frames",
110 "tx-deferral-packets",
111 "tx-excessive-deferral-packets",
112 "tx-single-collision-packets",
113 "tx-multiple-collision-packets",
114 "tx-late-collision-packets",
115 "tx-excessive-collision-packets",
116 "tx-total-collision",
117 "reserved",
118 "tx-dropped-frames",
119 "tx-jabber-frames",
120 "tx-fcs-errors",
121 "tx-control-frames",
122 "tx-oversize-frames",
123 "tx-undersize-frames",
124 "tx-fragmented-frames",
125};
126
Kumar Gala0bbaf062005-06-20 10:54:21 -0500127/* Fill in a buffer with the strings which correspond to the
128 * stats */
129static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
130{
131 struct gfar_private *priv = netdev_priv(dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600132
Andy Flemingb31a1d82008-12-16 15:29:15 -0800133 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500134 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
135 else
136 memcpy(buf, stat_gstrings,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000137 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500138}
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/* Fill in an array of 64-bit statistics from various sources.
141 * This array will be appended to the end of the ethtool_stats
142 * structure, and returned to user space
143 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000144static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
145 u64 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
147 int i;
148 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000149 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Paul Gortmaker212079d2013-02-12 15:38:19 -0500150 atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Paul Gortmaker68719782013-02-12 15:28:35 -0500152 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
Paul Gortmaker212079d2013-02-12 15:38:19 -0500153 buf[i] = atomic64_read(&extra[i]);
Paul Gortmaker68719782013-02-12 15:28:35 -0500154
Andy Flemingb31a1d82008-12-16 15:29:15 -0800155 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000156 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Paul Gortmaker68719782013-02-12 15:28:35 -0500158 for (; i < GFAR_STATS_LEN; i++, rmon++)
159 buf[i] = (u64) gfar_read(rmon);
160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
162
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700163static int gfar_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700167 switch (sset) {
168 case ETH_SS_STATS:
Andy Flemingb31a1d82008-12-16 15:29:15 -0800169 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700170 return GFAR_STATS_LEN;
171 else
172 return GFAR_EXTRA_STATS_LEN;
173 default:
174 return -EOPNOTSUPP;
175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/* Fills in the drvinfo structure with some basic info */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000179static void gfar_gdrvinfo(struct net_device *dev,
180 struct ethtool_drvinfo *drvinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
Jiri Pirko7826d432013-01-06 00:44:26 +0000182 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
183 strlcpy(drvinfo->version, gfar_driver_version,
184 sizeof(drvinfo->version));
185 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
186 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189/* Return the length of the register structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500190static int gfar_reglen(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 return sizeof (struct gfar);
193}
194
195/* Return a dump of the GFAR register space */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000196static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
197 void *regbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 int i;
200 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000201 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 u32 *buf = (u32 *) regbuf;
203
204 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
Kumar Galacc8c6e32006-02-01 15:18:03 -0600205 buf[i] = gfar_read(&theregs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208/* Convert microseconds to ethernet clock ticks, which changes
209 * depending on what speed the controller is running at */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000210static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
211 unsigned int usecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200213 struct net_device *ndev = priv->ndev;
214 struct phy_device *phydev = ndev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 unsigned int count;
216
217 /* The timer is different, depending on the interface speed */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200218 switch (phydev->speed) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400219 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 count = GFAR_GBIT_TIME;
221 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400222 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 count = GFAR_100_TIME;
224 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400225 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 default:
227 count = GFAR_10_TIME;
228 break;
229 }
230
231 /* Make sure we return a number greater than 0
232 * if usecs > 0 */
zhong jiangf8a19882018-09-11 21:08:15 +0800233 return DIV_ROUND_UP(usecs * 1000, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234}
235
236/* Convert ethernet clock ticks to microseconds */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000237static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
238 unsigned int ticks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239{
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200240 struct net_device *ndev = priv->ndev;
241 struct phy_device *phydev = ndev->phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 unsigned int count;
243
244 /* The timer is different, depending on the interface speed */
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200245 switch (phydev->speed) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400246 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 count = GFAR_GBIT_TIME;
248 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400249 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 count = GFAR_100_TIME;
251 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400252 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 default:
254 count = GFAR_10_TIME;
255 break;
256 }
257
258 /* Make sure we return a number greater than 0 */
259 /* if ticks is > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000260 return (ticks * count) / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
263/* Get the coalescing parameters, and put them in the cvals
264 * structure. */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000265static int gfar_gcoalesce(struct net_device *dev,
266 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267{
268 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000269 struct gfar_priv_rx_q *rx_queue = NULL;
270 struct gfar_priv_tx_q *tx_queue = NULL;
Dai Harukib46a8452008-12-16 15:29:52 -0800271 unsigned long rxtime;
272 unsigned long rxcount;
273 unsigned long txtime;
274 unsigned long txcount;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400275
Andy Flemingb31a1d82008-12-16 15:29:15 -0800276 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500277 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200279 if (!dev->phydev)
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400280 return -ENODEV;
281
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000282 rx_queue = priv->rx_queue[0];
283 tx_queue = priv->tx_queue[0];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000284
285 rxtime = get_ictt_value(rx_queue->rxic);
286 rxcount = get_icft_value(rx_queue->rxic);
287 txtime = get_ictt_value(tx_queue->txic);
288 txcount = get_icft_value(tx_queue->txic);
Dai Harukib46a8452008-12-16 15:29:52 -0800289 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
290 cvals->rx_max_coalesced_frames = rxcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Dai Harukib46a8452008-12-16 15:29:52 -0800292 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
293 cvals->tx_max_coalesced_frames = txcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 cvals->use_adaptive_rx_coalesce = 0;
296 cvals->use_adaptive_tx_coalesce = 0;
297
298 cvals->pkt_rate_low = 0;
299 cvals->rx_coalesce_usecs_low = 0;
300 cvals->rx_max_coalesced_frames_low = 0;
301 cvals->tx_coalesce_usecs_low = 0;
302 cvals->tx_max_coalesced_frames_low = 0;
303
304 /* When the packet rate is below pkt_rate_high but above
305 * pkt_rate_low (both measured in packets per second) the
306 * normal {rx,tx}_* coalescing parameters are used.
307 */
308
309 /* When the packet rate is (measured in packets per second)
310 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
311 * used.
312 */
313 cvals->pkt_rate_high = 0;
314 cvals->rx_coalesce_usecs_high = 0;
315 cvals->rx_max_coalesced_frames_high = 0;
316 cvals->tx_coalesce_usecs_high = 0;
317 cvals->tx_max_coalesced_frames_high = 0;
318
319 /* How often to do adaptive coalescing packet rate sampling,
320 * measured in seconds. Must not be zero.
321 */
322 cvals->rate_sample_interval = 0;
323
324 return 0;
325}
326
327/* Change the coalescing values.
328 * Both cvals->*_usecs and cvals->*_frames have to be > 0
329 * in order for coalescing to be active
330 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000331static int gfar_scoalesce(struct net_device *dev,
332 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
334 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200335 int i, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Andy Flemingb31a1d82008-12-16 15:29:15 -0800337 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500338 return -EOPNOTSUPP;
339
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200340 if (!dev->phydev)
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400341 return -ENODEV;
342
343 /* Check the bounds of the values */
344 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
Joe Perches375d6a12013-04-13 19:03:18 +0000345 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
346 GFAR_MAX_COAL_USECS);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400347 return -EINVAL;
348 }
349
350 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
Joe Perches375d6a12013-04-13 19:03:18 +0000351 netdev_info(dev, "Coalescing is limited to %d frames\n",
352 GFAR_MAX_COAL_FRAMES);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400353 return -EINVAL;
354 }
355
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200356 /* Check the bounds of the values */
357 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
358 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
359 GFAR_MAX_COAL_USECS);
360 return -EINVAL;
361 }
362
363 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
364 netdev_info(dev, "Coalescing is limited to %d frames\n",
365 GFAR_MAX_COAL_FRAMES);
366 return -EINVAL;
367 }
368
369 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
370 cpu_relax();
371
372 /* Set up rx coalescing */
373 if ((cvals->rx_coalesce_usecs == 0) ||
374 (cvals->rx_max_coalesced_frames == 0)) {
375 for (i = 0; i < priv->num_rx_queues; i++)
376 priv->rx_queue[i]->rxcoalescing = 0;
377 } else {
378 for (i = 0; i < priv->num_rx_queues; i++)
379 priv->rx_queue[i]->rxcoalescing = 1;
380 }
381
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000382 for (i = 0; i < priv->num_rx_queues; i++) {
383 priv->rx_queue[i]->rxic = mk_ic_value(
384 cvals->rx_max_coalesced_frames,
385 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
386 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 /* Set up tx coalescing */
389 if ((cvals->tx_coalesce_usecs == 0) ||
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000390 (cvals->tx_max_coalesced_frames == 0)) {
391 for (i = 0; i < priv->num_tx_queues; i++)
392 priv->tx_queue[i]->txcoalescing = 0;
393 } else {
394 for (i = 0; i < priv->num_tx_queues; i++)
395 priv->tx_queue[i]->txcoalescing = 1;
396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000398 for (i = 0; i < priv->num_tx_queues; i++) {
399 priv->tx_queue[i]->txic = mk_ic_value(
400 cvals->tx_max_coalesced_frames,
401 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200404 if (dev->flags & IFF_UP) {
405 stop_gfar(dev);
406 err = startup_gfar(dev);
407 } else {
408 gfar_mac_reset(priv);
409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200411 clear_bit_unlock(GFAR_RESETTING, &priv->state);
412
413 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
416/* Fills in rvals with the current ring parameters. Currently,
417 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
418 * jumbo are ignored by the driver */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000419static void gfar_gringparam(struct net_device *dev,
420 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421{
422 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000423 struct gfar_priv_tx_q *tx_queue = NULL;
424 struct gfar_priv_rx_q *rx_queue = NULL;
425
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000426 tx_queue = priv->tx_queue[0];
427 rx_queue = priv->rx_queue[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
429 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
430 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
431 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
432 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
433
434 /* Values changeable by the user. The valid values are
435 * in the range 1 to the "*_max_pending" counterpart above.
436 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000437 rvals->rx_pending = rx_queue->rx_ring_size;
438 rvals->rx_mini_pending = rx_queue->rx_ring_size;
439 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
440 rvals->tx_pending = tx_queue->tx_ring_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
443/* Change the current ring parameters, stopping the controller if
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200444 * necessary so that we don't mess things up while we're in motion.
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000445 */
446static int gfar_sringparam(struct net_device *dev,
447 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200450 int err = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
453 return -EINVAL;
454
455 if (!is_power_of_2(rvals->rx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000456 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 return -EINVAL;
458 }
459
460 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
461 return -EINVAL;
462
463 if (!is_power_of_2(rvals->tx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000464 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 return -EINVAL;
466 }
467
Claudiu Manoil08511332014-02-24 12:13:45 +0200468 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
469 cpu_relax();
470
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200471 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200474 /* Change the sizes */
475 for (i = 0; i < priv->num_rx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000476 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200477
478 for (i = 0; i < priv->num_tx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000479 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Kumar Gala0bbaf062005-06-20 10:54:21 -0500481 /* Rebuild the rings with the new size */
Claudiu Manoil08511332014-02-24 12:13:45 +0200482 if (dev->flags & IFF_UP)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500483 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200484
485 clear_bit_unlock(GFAR_RESETTING, &priv->state);
486
Kumar Gala0bbaf062005-06-20 10:54:21 -0500487 return err;
488}
489
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300490static void gfar_gpauseparam(struct net_device *dev,
491 struct ethtool_pauseparam *epause)
492{
493 struct gfar_private *priv = netdev_priv(dev);
494
495 epause->autoneg = !!priv->pause_aneg_en;
496 epause->rx_pause = !!priv->rx_pause_en;
497 epause->tx_pause = !!priv->tx_pause_en;
498}
499
500static int gfar_spauseparam(struct net_device *dev,
501 struct ethtool_pauseparam *epause)
502{
503 struct gfar_private *priv = netdev_priv(dev);
Philippe Reynes4c4a6b02016-05-16 01:30:08 +0200504 struct phy_device *phydev = dev->phydev;
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300505 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300506
Claudiu Manoil98a46d42014-04-23 16:38:47 +0300507 if (!phydev)
508 return -ENODEV;
509
Andrew Lunn22b7d292018-09-12 01:53:19 +0200510 if (!phy_validate_pause(phydev, epause))
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300511 return -EINVAL;
512
513 priv->rx_pause_en = priv->tx_pause_en = 0;
Andrew Lunn70814e82018-09-12 01:53:17 +0200514 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300515 if (epause->rx_pause) {
516 priv->rx_pause_en = 1;
517
518 if (epause->tx_pause) {
519 priv->tx_pause_en = 1;
Andrew Lunn70814e82018-09-12 01:53:17 +0200520 }
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300521 } else if (epause->tx_pause) {
522 priv->tx_pause_en = 1;
Andrew Lunn70814e82018-09-12 01:53:17 +0200523 }
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300524
525 if (epause->autoneg)
526 priv->pause_aneg_en = 1;
527 else
528 priv->pause_aneg_en = 0;
529
Andrew Lunn70814e82018-09-12 01:53:17 +0200530 if (!epause->autoneg) {
531 u32 tempval = gfar_read(&regs->maccfg1);
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300532
Andrew Lunn70814e82018-09-12 01:53:17 +0200533 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200534
Andrew Lunn70814e82018-09-12 01:53:17 +0200535 priv->tx_actual_en = 0;
536 if (priv->tx_pause_en) {
537 priv->tx_actual_en = 1;
538 tempval |= MACCFG1_TX_FLOW;
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300539 }
Andrew Lunn70814e82018-09-12 01:53:17 +0200540
541 if (priv->rx_pause_en)
542 tempval |= MACCFG1_RX_FLOW;
543 gfar_write(&regs->maccfg1, tempval);
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300544 }
545
546 return 0;
547}
548
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000549int gfar_set_features(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500550{
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000551 netdev_features_t changed = dev->features ^ features;
Claudiu Manoil08511332014-02-24 12:13:45 +0200552 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200553 int err = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500554
Claudiu Manoil88302642014-02-24 12:13:43 +0200555 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
556 NETIF_F_RXCSUM)))
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000557 return 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000558
Claudiu Manoil08511332014-02-24 12:13:45 +0200559 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
560 cpu_relax();
561
Claudiu Manoil88302642014-02-24 12:13:43 +0200562 dev->features = features;
563
Kumar Gala0bbaf062005-06-20 10:54:21 -0500564 if (dev->flags & IFF_UP) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500565 /* Now we take down the rings to rebuild them */
566 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200568 } else {
569 gfar_mac_reset(priv);
Dai Haruki12dea572008-12-16 15:30:20 -0800570 }
Claudiu Manoil08511332014-02-24 12:13:45 +0200571
572 clear_bit_unlock(GFAR_RESETTING, &priv->state);
573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 return err;
575}
576
Kumar Gala0bbaf062005-06-20 10:54:21 -0500577static uint32_t gfar_get_msglevel(struct net_device *dev)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400578{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500579 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000580
Kumar Gala0bbaf062005-06-20 10:54:21 -0500581 return priv->msg_enable;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400582}
583
Kumar Gala0bbaf062005-06-20 10:54:21 -0500584static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400585{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500586 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000587
Kumar Gala0bbaf062005-06-20 10:54:21 -0500588 priv->msg_enable = data;
589}
590
Scott Woodd87eb122008-07-11 18:04:45 -0500591#ifdef CONFIG_PM
592static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
593{
594 struct gfar_private *priv = netdev_priv(dev);
595
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300596 wol->supported = 0;
597 wol->wolopts = 0;
598
599 if (priv->wol_supported & GFAR_WOL_MAGIC)
600 wol->supported |= WAKE_MAGIC;
601
602 if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
603 wol->supported |= WAKE_UCAST;
604
605 if (priv->wol_opts & GFAR_WOL_MAGIC)
606 wol->wolopts |= WAKE_MAGIC;
607
608 if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
609 wol->wolopts |= WAKE_UCAST;
Scott Woodd87eb122008-07-11 18:04:45 -0500610}
611
612static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
613{
614 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300615 u16 wol_opts = 0;
616 int err;
Scott Woodd87eb122008-07-11 18:04:45 -0500617
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300618 if (!priv->wol_supported && wol->wolopts)
Scott Woodd87eb122008-07-11 18:04:45 -0500619 return -EINVAL;
620
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300621 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
Scott Woodd87eb122008-07-11 18:04:45 -0500622 return -EINVAL;
623
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300624 if (wol->wolopts & WAKE_MAGIC) {
625 wol_opts |= GFAR_WOL_MAGIC;
626 } else {
627 if (wol->wolopts & WAKE_UCAST)
628 wol_opts |= GFAR_WOL_FILER_UCAST;
629 }
Rafael J. Wysocki6c4f1992010-11-09 11:54:19 +0000630
Claudiu Manoil3e905b82015-10-05 17:19:59 +0300631 wol_opts &= priv->wol_supported;
632 priv->wol_opts = 0;
633
634 err = device_set_wakeup_enable(priv->dev, wol_opts);
635 if (err)
636 return err;
637
638 priv->wol_opts = wol_opts;
Scott Woodd87eb122008-07-11 18:04:45 -0500639
640 return 0;
641}
642#endif
Kumar Gala0bbaf062005-06-20 10:54:21 -0500643
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000644static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
645{
646 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
647
648 if (ethflow & RXH_L2DA) {
Claudiu Manoil5188f7e2015-10-23 11:41:58 +0300649 fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000650 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000651 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
652 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000653 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
654 priv->cur_filer_idx = priv->cur_filer_idx - 1;
655
Claudiu Manoil5188f7e2015-10-23 11:41:58 +0300656 fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000657 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000658 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
659 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000660 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
661 priv->cur_filer_idx = priv->cur_filer_idx - 1;
662 }
663
664 if (ethflow & RXH_VLAN) {
665 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000666 RQFCR_AND | RQFCR_HASHTBL_0;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000667 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000668 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
669 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000670 priv->cur_filer_idx = priv->cur_filer_idx - 1;
671 }
672
673 if (ethflow & RXH_IP_SRC) {
674 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000675 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000676 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
677 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000678 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
679 priv->cur_filer_idx = priv->cur_filer_idx - 1;
680 }
681
682 if (ethflow & (RXH_IP_DST)) {
683 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000684 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000685 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
686 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000687 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
688 priv->cur_filer_idx = priv->cur_filer_idx - 1;
689 }
690
691 if (ethflow & RXH_L3_PROTO) {
692 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000693 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000694 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
695 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000696 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
697 priv->cur_filer_idx = priv->cur_filer_idx - 1;
698 }
699
700 if (ethflow & RXH_L4_B_0_1) {
701 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000702 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000703 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
704 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000705 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
706 priv->cur_filer_idx = priv->cur_filer_idx - 1;
707 }
708
709 if (ethflow & RXH_L4_B_2_3) {
710 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000711 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000712 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
713 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000714 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
715 priv->cur_filer_idx = priv->cur_filer_idx - 1;
716 }
717}
718
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000719static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
720 u64 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000721{
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000722 unsigned int cmp_rqfpr;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000723 unsigned int *local_rqfpr;
724 unsigned int *local_rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000725 int i = 0x0, k = 0x0;
726 int j = MAX_FILER_IDX, l = 0x0;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000727 int ret = 1;
728
Joe Perchesb2adaca2013-02-03 17:43:58 +0000729 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
730 GFP_KERNEL);
731 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
732 GFP_KERNEL);
Wang Shaoyan588dc912011-08-11 17:07:25 +0000733 if (!local_rqfpr || !local_rqfcr) {
Wang Shaoyan588dc912011-08-11 17:07:25 +0000734 ret = 0;
735 goto err;
736 }
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000737
738 switch (class) {
739 case TCP_V4_FLOW:
740 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
741 break;
742 case UDP_V4_FLOW:
743 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
744 break;
745 case TCP_V6_FLOW:
746 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
747 break;
748 case UDP_V6_FLOW:
749 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
750 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000751 default:
Joe Perches375d6a12013-04-13 19:03:18 +0000752 netdev_err(priv->ndev,
753 "Right now this class is not supported\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000754 ret = 0;
755 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000756 }
757
758 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000759 local_rqfpr[j] = priv->ftp_rqfpr[i];
760 local_rqfcr[j] = priv->ftp_rqfcr[i];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000761 j--;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000762 if ((priv->ftp_rqfcr[i] ==
763 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
764 (priv->ftp_rqfpr[i] == cmp_rqfpr))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000765 break;
766 }
767
768 if (i == MAX_FILER_IDX + 1) {
Joe Perches375d6a12013-04-13 19:03:18 +0000769 netdev_err(priv->ndev,
770 "No parse rule found, can't create hash rules\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000771 ret = 0;
772 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000773 }
774
775 /* If a match was found, then it begins the starting of a cluster rule
776 * if it was already programmed, we need to overwrite these rules
777 */
778 for (l = i+1; l < MAX_FILER_IDX; l++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000779 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000780 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000781 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000782 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000783 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
784 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000785 priv->ftp_rqfpr[l]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000786 break;
787 }
788
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000789 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
790 (priv->ftp_rqfcr[l] & RQFCR_AND))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000791 continue;
792 else {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000793 local_rqfpr[j] = priv->ftp_rqfpr[l];
794 local_rqfcr[j] = priv->ftp_rqfcr[l];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000795 j--;
796 }
797 }
798
799 priv->cur_filer_idx = l - 1;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000800
801 /* hash rules */
802 ethflow_to_filer_rules(priv, ethflow);
803
804 /* Write back the popped out rules again */
805 for (k = j+1; k < MAX_FILER_IDX; k++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000806 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
807 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000808 gfar_write_filer(priv, priv->cur_filer_idx,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000809 local_rqfcr[k], local_rqfpr[k]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000810 if (!priv->cur_filer_idx)
811 break;
812 priv->cur_filer_idx = priv->cur_filer_idx - 1;
813 }
814
Wang Shaoyan588dc912011-08-11 17:07:25 +0000815err:
816 kfree(local_rqfcr);
817 kfree(local_rqfpr);
818 return ret;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000819}
820
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000821static int gfar_set_hash_opts(struct gfar_private *priv,
822 struct ethtool_rxnfc *cmd)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000823{
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000824 /* write the filer rules here */
825 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
Ben Hutchingsbde35282011-04-08 13:45:11 +0000826 return -EINVAL;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000827
828 return 0;
829}
830
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700831static int gfar_check_filer_hardware(struct gfar_private *priv)
832{
Claudiu Manoil42851e82014-01-14 15:35:00 +0200833 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700834 u32 i;
835
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700836 /* Check if we are in FIFO mode */
837 i = gfar_read(&regs->ecntrl);
838 i &= ECNTRL_FIFM;
839 if (i == ECNTRL_FIFM) {
840 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
841 i = gfar_read(&regs->rctrl);
842 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
843 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
844 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000845 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700846 } else {
847 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000848 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700849 return -EOPNOTSUPP;
850 }
851 }
852 /* Or in standard mode */
853 else {
854 i = gfar_read(&regs->rctrl);
855 i &= RCTRL_PRSDEP_MASK;
856 if (i == RCTRL_PRSDEP_MASK) {
857 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000858 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700859 } else {
860 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000861 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700862 return -EOPNOTSUPP;
863 }
864 }
865
866 /* Sets the properties for arbitrary filer rule
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000867 * to the first 4 Layer 4 Bytes
868 */
Claudiu Manoil42851e82014-01-14 15:35:00 +0200869 gfar_write(&regs->rbifx, 0xC0C1C2C3);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700870 return 0;
871}
872
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700873/* Write a mask to filer cache */
874static void gfar_set_mask(u32 mask, struct filer_table *tab)
875{
876 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
877 tab->fe[tab->index].prop = mask;
878 tab->index++;
879}
880
881/* Sets parse bits (e.g. IP or TCP) */
882static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
883{
884 gfar_set_mask(mask, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000885 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
886 RQFCR_AND;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700887 tab->fe[tab->index].prop = value;
888 tab->index++;
889}
890
891static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000892 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700893{
894 gfar_set_mask(mask, tab);
895 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
896 tab->fe[tab->index].prop = value;
897 tab->index++;
898}
899
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000900/* For setting a tuple of value and mask of type flag
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700901 * Example:
902 * IP-Src = 10.0.0.0/255.0.0.0
903 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
904 *
905 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
906 * For a don't care mask it gives us a 0
907 *
908 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
909 * and MAC stuff on an upper level (due to missing information on this level).
910 * For these guys we can discard them if they are value=0 and mask=0.
911 *
912 * Further the all masks are one-padded for better hardware efficiency.
913 */
914static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000915 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700916{
917 switch (flag) {
Sebastian Poehn380b1532011-07-07 04:30:29 -0700918 /* 3bit */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700919 case RQFCR_PID_PRI:
920 if (!(value | mask))
921 return;
922 mask |= RQFCR_PID_PRI_MASK;
923 break;
924 /* 8bit */
925 case RQFCR_PID_L4P:
926 case RQFCR_PID_TOS:
927 if (!~(mask | RQFCR_PID_L4P_MASK))
928 return;
929 if (!mask)
930 mask = ~0;
931 else
932 mask |= RQFCR_PID_L4P_MASK;
933 break;
934 /* 12bit */
935 case RQFCR_PID_VID:
936 if (!(value | mask))
937 return;
938 mask |= RQFCR_PID_VID_MASK;
939 break;
940 /* 16bit */
941 case RQFCR_PID_DPT:
942 case RQFCR_PID_SPT:
943 case RQFCR_PID_ETY:
944 if (!~(mask | RQFCR_PID_PORT_MASK))
945 return;
946 if (!mask)
947 mask = ~0;
948 else
949 mask |= RQFCR_PID_PORT_MASK;
950 break;
951 /* 24bit */
952 case RQFCR_PID_DAH:
953 case RQFCR_PID_DAL:
954 case RQFCR_PID_SAH:
955 case RQFCR_PID_SAL:
956 if (!(value | mask))
957 return;
958 mask |= RQFCR_PID_MAC_MASK;
959 break;
960 /* for all real 32bit masks */
961 default:
962 if (!~mask)
963 return;
964 if (!mask)
965 mask = ~0;
966 break;
967 }
968 gfar_set_general_attribute(value, mask, flag, tab);
969}
970
971/* Translates value and mask for UDP, TCP or SCTP */
972static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000973 struct ethtool_tcpip4_spec *mask,
974 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700975{
Claudiu Manoil42851e82014-01-14 15:35:00 +0200976 gfar_set_attribute(be32_to_cpu(value->ip4src),
977 be32_to_cpu(mask->ip4src),
978 RQFCR_PID_SIA, tab);
979 gfar_set_attribute(be32_to_cpu(value->ip4dst),
980 be32_to_cpu(mask->ip4dst),
981 RQFCR_PID_DIA, tab);
982 gfar_set_attribute(be16_to_cpu(value->pdst),
983 be16_to_cpu(mask->pdst),
984 RQFCR_PID_DPT, tab);
985 gfar_set_attribute(be16_to_cpu(value->psrc),
986 be16_to_cpu(mask->psrc),
987 RQFCR_PID_SPT, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700988 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
989}
990
991/* Translates value and mask for RAW-IP4 */
992static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000993 struct ethtool_usrip4_spec *mask,
994 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700995{
Claudiu Manoil42851e82014-01-14 15:35:00 +0200996 gfar_set_attribute(be32_to_cpu(value->ip4src),
997 be32_to_cpu(mask->ip4src),
998 RQFCR_PID_SIA, tab);
999 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1000 be32_to_cpu(mask->ip4dst),
1001 RQFCR_PID_DIA, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001002 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1003 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
Claudiu Manoil42851e82014-01-14 15:35:00 +02001004 gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
1005 be32_to_cpu(mask->l4_4_bytes),
1006 RQFCR_PID_ARB, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001007
1008}
1009
1010/* Translates value and mask for ETHER spec */
1011static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001012 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001013{
1014 u32 upper_temp_mask = 0;
1015 u32 lower_temp_mask = 0;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001016
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001017 /* Source address */
1018 if (!is_broadcast_ether_addr(mask->h_source)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001019 if (is_zero_ether_addr(mask->h_source)) {
1020 upper_temp_mask = 0xFFFFFFFF;
1021 lower_temp_mask = 0xFFFFFFFF;
1022 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001023 upper_temp_mask = mask->h_source[0] << 16 |
1024 mask->h_source[1] << 8 |
1025 mask->h_source[2];
1026 lower_temp_mask = mask->h_source[3] << 16 |
1027 mask->h_source[4] << 8 |
1028 mask->h_source[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001029 }
1030 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001031 gfar_set_attribute(value->h_source[0] << 16 |
1032 value->h_source[1] << 8 |
1033 value->h_source[2],
1034 upper_temp_mask, RQFCR_PID_SAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001035 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001036 gfar_set_attribute(value->h_source[3] << 16 |
1037 value->h_source[4] << 8 |
1038 value->h_source[5],
1039 lower_temp_mask, RQFCR_PID_SAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001040 }
1041 /* Destination address */
1042 if (!is_broadcast_ether_addr(mask->h_dest)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001043 /* Special for destination is limited broadcast */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001044 if ((is_broadcast_ether_addr(value->h_dest) &&
1045 is_zero_ether_addr(mask->h_dest))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001046 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1047 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001048 if (is_zero_ether_addr(mask->h_dest)) {
1049 upper_temp_mask = 0xFFFFFFFF;
1050 lower_temp_mask = 0xFFFFFFFF;
1051 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001052 upper_temp_mask = mask->h_dest[0] << 16 |
1053 mask->h_dest[1] << 8 |
1054 mask->h_dest[2];
1055 lower_temp_mask = mask->h_dest[3] << 16 |
1056 mask->h_dest[4] << 8 |
1057 mask->h_dest[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001058 }
1059
1060 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001061 gfar_set_attribute(value->h_dest[0] << 16 |
1062 value->h_dest[1] << 8 |
1063 value->h_dest[2],
1064 upper_temp_mask, RQFCR_PID_DAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001065 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001066 gfar_set_attribute(value->h_dest[3] << 16 |
1067 value->h_dest[4] << 8 |
1068 value->h_dest[5],
1069 lower_temp_mask, RQFCR_PID_DAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001070 }
1071 }
1072
Claudiu Manoil42851e82014-01-14 15:35:00 +02001073 gfar_set_attribute(be16_to_cpu(value->h_proto),
1074 be16_to_cpu(mask->h_proto),
1075 RQFCR_PID_ETY, tab);
1076}
1077
1078static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1079{
1080 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1081}
1082
1083static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1084{
1085 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1086}
1087
1088static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1089{
1090 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1091}
1092
1093static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1094{
1095 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1096}
1097
1098static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1099{
1100 return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1101 VLAN_PRIO_SHIFT;
1102}
1103
1104static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1105{
1106 return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1107 VLAN_PRIO_SHIFT;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001108}
1109
1110/* Convert a rule to binary filter format of gianfar */
1111static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001112 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001113{
1114 u32 vlan = 0, vlan_mask = 0;
1115 u32 id = 0, id_mask = 0;
1116 u32 cfi = 0, cfi_mask = 0;
1117 u32 prio = 0, prio_mask = 0;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001118 u32 old_index = tab->index;
1119
1120 /* Check if vlan is wanted */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001121 if ((rule->flow_type & FLOW_EXT) &&
1122 (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001123 if (!rule->m_ext.vlan_tci)
Claudiu Manoil42851e82014-01-14 15:35:00 +02001124 rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001125
1126 vlan = RQFPR_VLN;
1127 vlan_mask = RQFPR_VLN;
1128
1129 /* Separate the fields */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001130 id = vlan_tci_vid(rule);
1131 id_mask = vlan_tci_vidm(rule);
1132 cfi = vlan_tci_cfi(rule);
1133 cfi_mask = vlan_tci_cfim(rule);
1134 prio = vlan_tci_prio(rule);
1135 prio_mask = vlan_tci_priom(rule);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001136
Michał Mirosławf4f9a5e2018-11-08 18:44:50 +01001137 if (cfi_mask) {
1138 if (cfi)
1139 vlan |= RQFPR_CFI;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001140 vlan_mask |= RQFPR_CFI;
1141 }
1142 }
1143
1144 switch (rule->flow_type & ~FLOW_EXT) {
1145 case TCP_V4_FLOW:
1146 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001147 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001148 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001149 &rule->m_u.tcp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001150 break;
1151 case UDP_V4_FLOW:
1152 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001153 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001154 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001155 &rule->m_u.udp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001156 break;
1157 case SCTP_V4_FLOW:
1158 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001159 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001160 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001161 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1162 (struct ethtool_tcpip4_spec *)&rule->m_u,
1163 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001164 break;
1165 case IP_USER_FLOW:
1166 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001167 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001168 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001169 (struct ethtool_usrip4_spec *) &rule->m_u,
1170 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001171 break;
1172 case ETHER_FLOW:
1173 if (vlan)
1174 gfar_set_parse_bits(vlan, vlan_mask, tab);
1175 gfar_set_ether((struct ethhdr *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001176 (struct ethhdr *) &rule->m_u, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001177 break;
1178 default:
1179 return -1;
1180 }
1181
1182 /* Set the vlan attributes in the end */
1183 if (vlan) {
1184 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1185 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1186 }
1187
1188 /* If there has been nothing written till now, it must be a default */
1189 if (tab->index == old_index) {
1190 gfar_set_mask(0xFFFFFFFF, tab);
1191 tab->fe[tab->index].ctrl = 0x20;
1192 tab->fe[tab->index].prop = 0x0;
1193 tab->index++;
1194 }
1195
1196 /* Remove last AND */
1197 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1198
1199 /* Specify which queue to use or to drop */
1200 if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1201 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1202 else
1203 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1204
1205 /* Only big enough entries can be clustered */
1206 if (tab->index > (old_index + 2)) {
1207 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1208 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1209 }
1210
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001211 /* In rare cases the cache can be full while there is
1212 * free space in hw
1213 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001214 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1215 return -EBUSY;
1216
1217 return 0;
1218}
1219
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001220/* Write the bit-pattern from software's buffer to hardware registers */
1221static int gfar_write_filer_table(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001222 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001223{
1224 u32 i = 0;
1225 if (tab->index > MAX_FILER_IDX - 1)
1226 return -EBUSY;
1227
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001228 /* Fill regular entries */
Jakub Kicinskia898fe02015-08-12 02:41:55 +02001229 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001230 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1231 /* Fill the rest with fall-troughs */
Jakub Kicinskia898fe02015-08-12 02:41:55 +02001232 for (; i < MAX_FILER_IDX; i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001233 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1234 /* Last entry must be default accept
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001235 * because that's what people expect
1236 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001237 gfar_write_filer(priv, i, 0x20, 0x0);
1238
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001239 return 0;
1240}
1241
1242static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001243 struct gfar_private *priv)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001244{
1245
1246 if (flow->flow_type & FLOW_EXT) {
1247 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1248 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001249 "User-specific data not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001250 if (~flow->m_ext.vlan_etype)
1251 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001252 "VLAN-etype not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001253 }
1254 if (flow->flow_type == IP_USER_FLOW)
1255 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1256 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001257 "IP-Version differing from IPv4 not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001258
1259 return 0;
1260}
1261
1262static int gfar_process_filer_changes(struct gfar_private *priv)
1263{
1264 struct ethtool_flow_spec_container *j;
1265 struct filer_table *tab;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001266 s32 ret = 0;
1267
1268 /* So index is set to zero, too! */
1269 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1270 if (tab == NULL)
1271 return -ENOMEM;
1272
1273 /* Now convert the existing filer data from flow_spec into
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001274 * filer tables binary format
1275 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001276 list_for_each_entry(j, &priv->rx_list.list, list) {
1277 ret = gfar_convert_to_filer(&j->fs, tab);
1278 if (ret == -EBUSY) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001279 netdev_err(priv->ndev,
1280 "Rule not added: No free space!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001281 goto end;
1282 }
1283 if (ret == -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001284 netdev_err(priv->ndev,
1285 "Rule not added: Unsupported Flow-type!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001286 goto end;
1287 }
1288 }
1289
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001290 /* Write everything to hardware */
1291 ret = gfar_write_filer_table(priv, tab);
1292 if (ret == -EBUSY) {
1293 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1294 goto end;
1295 }
1296
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001297end:
1298 kfree(tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001299 return ret;
1300}
1301
1302static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1303{
1304 u32 i = 0;
1305
1306 for (i = 0; i < sizeof(flow->m_u); i++)
1307 flow->m_u.hdata[i] ^= 0xFF;
1308
Claudiu Manoil42851e82014-01-14 15:35:00 +02001309 flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1310 flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1311 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1312 flow->m_ext.data[1] ^= cpu_to_be32(~0);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001313}
1314
1315static int gfar_add_cls(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001316 struct ethtool_rx_flow_spec *flow)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001317{
1318 struct ethtool_flow_spec_container *temp, *comp;
1319 int ret = 0;
1320
1321 temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1322 if (temp == NULL)
1323 return -ENOMEM;
1324 memcpy(&temp->fs, flow, sizeof(temp->fs));
1325
1326 gfar_invert_masks(&temp->fs);
1327 ret = gfar_check_capability(&temp->fs, priv);
1328 if (ret)
1329 goto clean_mem;
1330 /* Link in the new element at the right @location */
1331 if (list_empty(&priv->rx_list.list)) {
1332 ret = gfar_check_filer_hardware(priv);
1333 if (ret != 0)
1334 goto clean_mem;
1335 list_add(&temp->list, &priv->rx_list.list);
1336 goto process;
1337 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001338 list_for_each_entry(comp, &priv->rx_list.list, list) {
1339 if (comp->fs.location > flow->location) {
1340 list_add_tail(&temp->list, &comp->list);
1341 goto process;
1342 }
1343 if (comp->fs.location == flow->location) {
1344 netdev_err(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001345 "Rule not added: ID %d not free!\n",
1346 flow->location);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001347 ret = -EBUSY;
1348 goto clean_mem;
1349 }
1350 }
1351 list_add_tail(&temp->list, &priv->rx_list.list);
1352 }
1353
1354process:
Jakub Kicinskib5c8c892015-08-12 02:41:56 +02001355 priv->rx_list.count++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001356 ret = gfar_process_filer_changes(priv);
1357 if (ret)
1358 goto clean_list;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001359 return ret;
1360
1361clean_list:
Jakub Kicinskib5c8c892015-08-12 02:41:56 +02001362 priv->rx_list.count--;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001363 list_del(&temp->list);
1364clean_mem:
1365 kfree(temp);
1366 return ret;
1367}
1368
1369static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1370{
1371 struct ethtool_flow_spec_container *comp;
1372 u32 ret = -EINVAL;
1373
1374 if (list_empty(&priv->rx_list.list))
1375 return ret;
1376
1377 list_for_each_entry(comp, &priv->rx_list.list, list) {
1378 if (comp->fs.location == loc) {
1379 list_del(&comp->list);
1380 kfree(comp);
1381 priv->rx_list.count--;
1382 gfar_process_filer_changes(priv);
1383 ret = 0;
1384 break;
1385 }
1386 }
1387
1388 return ret;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001389}
1390
1391static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1392{
1393 struct ethtool_flow_spec_container *comp;
1394 u32 ret = -EINVAL;
1395
1396 list_for_each_entry(comp, &priv->rx_list.list, list) {
1397 if (comp->fs.location == cmd->fs.location) {
1398 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1399 gfar_invert_masks(&cmd->fs);
1400 ret = 0;
1401 break;
1402 }
1403 }
1404
1405 return ret;
1406}
1407
1408static int gfar_get_cls_all(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001409 struct ethtool_rxnfc *cmd, u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001410{
1411 struct ethtool_flow_spec_container *comp;
1412 u32 i = 0;
1413
1414 list_for_each_entry(comp, &priv->rx_list.list, list) {
David S. Miller8decf862011-09-22 03:23:13 -04001415 if (i == cmd->rule_cnt)
1416 return -EMSGSIZE;
1417 rule_locs[i] = comp->fs.location;
1418 i++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001419 }
1420
1421 cmd->data = MAX_FILER_IDX;
Ben Hutchings473e64e2011-09-06 13:52:47 +00001422 cmd->rule_cnt = i;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001423
1424 return 0;
1425}
1426
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001427static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1428{
1429 struct gfar_private *priv = netdev_priv(dev);
1430 int ret = 0;
1431
Claudiu Manoil08511332014-02-24 12:13:45 +02001432 if (test_bit(GFAR_RESETTING, &priv->state))
1433 return -EBUSY;
1434
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001435 mutex_lock(&priv->rx_queue_access);
1436
1437 switch (cmd->cmd) {
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001438 case ETHTOOL_SRXFH:
1439 ret = gfar_set_hash_opts(priv, cmd);
1440 break;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001441 case ETHTOOL_SRXCLSRLINS:
Ben Hutchings3a73e492012-01-03 11:59:30 +00001442 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1443 cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1444 cmd->fs.location >= MAX_FILER_IDX) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001445 ret = -EINVAL;
1446 break;
1447 }
1448 ret = gfar_add_cls(priv, &cmd->fs);
1449 break;
1450 case ETHTOOL_SRXCLSRLDEL:
1451 ret = gfar_del_cls(priv, cmd->fs.location);
1452 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001453 default:
1454 ret = -EINVAL;
1455 }
1456
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001457 mutex_unlock(&priv->rx_queue_access);
1458
1459 return ret;
1460}
1461
1462static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001463 u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001464{
1465 struct gfar_private *priv = netdev_priv(dev);
1466 int ret = 0;
1467
1468 switch (cmd->cmd) {
1469 case ETHTOOL_GRXRINGS:
1470 cmd->data = priv->num_rx_queues;
1471 break;
1472 case ETHTOOL_GRXCLSRLCNT:
1473 cmd->rule_cnt = priv->rx_list.count;
1474 break;
1475 case ETHTOOL_GRXCLSRULE:
1476 ret = gfar_get_cls(priv, cmd);
1477 break;
1478 case ETHTOOL_GRXCLSRLALL:
Ben Hutchings815c7db2011-09-06 13:49:12 +00001479 ret = gfar_get_cls_all(priv, cmd, rule_locs);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001480 break;
1481 default:
1482 ret = -EINVAL;
1483 break;
1484 }
1485
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001486 return ret;
1487}
1488
Richard Cochran66636282012-04-03 22:59:19 +00001489static int gfar_get_ts_info(struct net_device *dev,
1490 struct ethtool_ts_info *info)
1491{
1492 struct gfar_private *priv = netdev_priv(dev);
Yangbo Lu7349a742018-05-25 12:40:36 +08001493 struct platform_device *ptp_dev;
1494 struct device_node *ptp_node;
1495 struct qoriq_ptp *ptp = NULL;
1496
1497 info->phc_index = -1;
Richard Cochran66636282012-04-03 22:59:19 +00001498
1499 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001500 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1501 SOF_TIMESTAMPING_SOFTWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001502 return 0;
1503 }
Yangbo Lu7349a742018-05-25 12:40:36 +08001504
1505 ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
1506 if (ptp_node) {
1507 ptp_dev = of_find_device_by_node(ptp_node);
1508 if (ptp_dev)
1509 ptp = platform_get_drvdata(ptp_dev);
1510 }
1511
1512 if (ptp)
1513 info->phc_index = ptp->phc_index;
1514
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001515 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1516 SOF_TIMESTAMPING_RX_HARDWARE |
1517 SOF_TIMESTAMPING_RAW_HARDWARE;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001518 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1519 (1 << HWTSTAMP_TX_ON);
1520 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1521 (1 << HWTSTAMP_FILTER_ALL);
Richard Cochran66636282012-04-03 22:59:19 +00001522 return 0;
1523}
1524
Jeff Garzik7282d492006-09-13 14:30:00 -04001525const struct ethtool_ops gfar_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 .get_drvinfo = gfar_gdrvinfo,
1527 .get_regs_len = gfar_reglen,
1528 .get_regs = gfar_get_regs,
1529 .get_link = ethtool_op_get_link,
1530 .get_coalesce = gfar_gcoalesce,
1531 .set_coalesce = gfar_scoalesce,
1532 .get_ringparam = gfar_gringparam,
1533 .set_ringparam = gfar_sringparam,
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001534 .get_pauseparam = gfar_gpauseparam,
1535 .set_pauseparam = gfar_spauseparam,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 .get_strings = gfar_gstrings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001537 .get_sset_count = gfar_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 .get_ethtool_stats = gfar_fill_stats,
Kumar Gala0bbaf062005-06-20 10:54:21 -05001539 .get_msglevel = gfar_get_msglevel,
1540 .set_msglevel = gfar_set_msglevel,
Scott Woodd87eb122008-07-11 18:04:45 -05001541#ifdef CONFIG_PM
1542 .get_wol = gfar_get_wol,
1543 .set_wol = gfar_set_wol,
1544#endif
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001545 .set_rxnfc = gfar_set_nfc,
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001546 .get_rxnfc = gfar_get_nfc,
Richard Cochran66636282012-04-03 22:59:19 +00001547 .get_ts_info = gfar_get_ts_info,
Philippe Reynescd5f9bb2016-05-16 01:30:09 +02001548 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1549 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550};