xref: /openbmc/linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c (revision abade675e02e1b73da0c20ffaf08fbe309038298)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016 NXP
4  */
5 
6 #include <linux/net_tstamp.h>
7 
8 #include "dpni.h"	/* DPNI_LINK_OPT_* */
9 #include "dpaa2-eth.h"
10 
11 /* To be kept in sync with DPNI statistics */
12 static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
13 	"[hw] rx frames",
14 	"[hw] rx bytes",
15 	"[hw] rx mcast frames",
16 	"[hw] rx mcast bytes",
17 	"[hw] rx bcast frames",
18 	"[hw] rx bcast bytes",
19 	"[hw] tx frames",
20 	"[hw] tx bytes",
21 	"[hw] tx mcast frames",
22 	"[hw] tx mcast bytes",
23 	"[hw] tx bcast frames",
24 	"[hw] tx bcast bytes",
25 	"[hw] rx filtered frames",
26 	"[hw] rx discarded frames",
27 	"[hw] rx nobuffer discards",
28 	"[hw] tx discarded frames",
29 	"[hw] tx confirmed frames",
30 };
31 
32 #define DPAA2_ETH_NUM_STATS	ARRAY_SIZE(dpaa2_ethtool_stats)
33 
34 static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
35 	/* per-cpu stats */
36 	"[drv] tx conf frames",
37 	"[drv] tx conf bytes",
38 	"[drv] tx sg frames",
39 	"[drv] tx sg bytes",
40 	"[drv] tx realloc frames",
41 	"[drv] rx sg frames",
42 	"[drv] rx sg bytes",
43 	"[drv] enqueue portal busy",
44 	/* Channel stats */
45 	"[drv] dequeue portal busy",
46 	"[drv] channel pull errors",
47 	"[drv] cdan",
48 	"[drv] xdp drop",
49 	"[drv] xdp tx",
50 	"[drv] xdp tx errors",
51 	"[drv] xdp redirect",
52 	/* FQ stats */
53 	"[qbman] rx pending frames",
54 	"[qbman] rx pending bytes",
55 	"[qbman] tx conf pending frames",
56 	"[qbman] tx conf pending bytes",
57 	"[qbman] buffer count",
58 };
59 
60 #define DPAA2_ETH_NUM_EXTRA_STATS	ARRAY_SIZE(dpaa2_ethtool_extras)
61 
62 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
63 				  struct ethtool_drvinfo *drvinfo)
64 {
65 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
66 
67 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
68 
69 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
70 		 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
71 
72 	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
73 		sizeof(drvinfo->bus_info));
74 }
75 
76 static int
77 dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
78 			     struct ethtool_link_ksettings *link_settings)
79 {
80 	struct dpni_link_state state = {0};
81 	int err = 0;
82 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
83 
84 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
85 	if (err) {
86 		netdev_err(net_dev, "ERROR %d getting link state\n", err);
87 		goto out;
88 	}
89 
90 	/* At the moment, we have no way of interrogating the DPMAC
91 	 * from the DPNI side - and for that matter there may exist
92 	 * no DPMAC at all. So for now we just don't report anything
93 	 * beyond the DPNI attributes.
94 	 */
95 	if (state.options & DPNI_LINK_OPT_AUTONEG)
96 		link_settings->base.autoneg = AUTONEG_ENABLE;
97 	if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
98 		link_settings->base.duplex = DUPLEX_FULL;
99 	link_settings->base.speed = state.rate;
100 
101 out:
102 	return err;
103 }
104 
105 #define DPNI_DYNAMIC_LINK_SET_VER_MAJOR		7
106 #define DPNI_DYNAMIC_LINK_SET_VER_MINOR		1
107 static int
108 dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
109 			     const struct ethtool_link_ksettings *link_settings)
110 {
111 	struct dpni_link_cfg cfg = {0};
112 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
113 	int err = 0;
114 
115 	/* If using an older MC version, the DPNI must be down
116 	 * in order to be able to change link settings. Taking steps to let
117 	 * the user know that.
118 	 */
119 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
120 				   DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
121 		if (netif_running(net_dev)) {
122 			netdev_info(net_dev, "Interface must be brought down first.\n");
123 			return -EACCES;
124 		}
125 	}
126 
127 	cfg.rate = link_settings->base.speed;
128 	if (link_settings->base.autoneg == AUTONEG_ENABLE)
129 		cfg.options |= DPNI_LINK_OPT_AUTONEG;
130 	else
131 		cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
132 	if (link_settings->base.duplex  == DUPLEX_HALF)
133 		cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
134 	else
135 		cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
136 
137 	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
138 	if (err)
139 		/* ethtool will be loud enough if we return an error; no point
140 		 * in putting our own error message on the console by default
141 		 */
142 		netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
143 
144 	return err;
145 }
146 
147 static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
148 				  u8 *data)
149 {
150 	u8 *p = data;
151 	int i;
152 
153 	switch (stringset) {
154 	case ETH_SS_STATS:
155 		for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
156 			strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
157 			p += ETH_GSTRING_LEN;
158 		}
159 		for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
160 			strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
161 			p += ETH_GSTRING_LEN;
162 		}
163 		break;
164 	}
165 }
166 
167 static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
168 {
169 	switch (sset) {
170 	case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
171 		return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
172 	default:
173 		return -EOPNOTSUPP;
174 	}
175 }
176 
177 /** Fill in hardware counters, as returned by MC.
178  */
179 static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
180 					struct ethtool_stats *stats,
181 					u64 *data)
182 {
183 	int i = 0;
184 	int j, k, err;
185 	int num_cnt;
186 	union dpni_statistics dpni_stats;
187 	u32 fcnt, bcnt;
188 	u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
189 	u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
190 	u32 buf_cnt;
191 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
192 	struct dpaa2_eth_drv_stats *extras;
193 	struct dpaa2_eth_ch_stats *ch_stats;
194 
195 	memset(data, 0,
196 	       sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
197 
198 	/* Print standard counters, from DPNI statistics */
199 	for (j = 0; j <= 2; j++) {
200 		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
201 					  j, &dpni_stats);
202 		if (err != 0)
203 			netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
204 		switch (j) {
205 		case 0:
206 			num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
207 			break;
208 		case 1:
209 			num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
210 			break;
211 		case 2:
212 			num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
213 			break;
214 		}
215 		for (k = 0; k < num_cnt; k++)
216 			*(data + i++) = dpni_stats.raw.counter[k];
217 	}
218 
219 	/* Print per-cpu extra stats */
220 	for_each_online_cpu(k) {
221 		extras = per_cpu_ptr(priv->percpu_extras, k);
222 		for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
223 			*((__u64 *)data + i + j) += *((__u64 *)extras + j);
224 	}
225 	i += j;
226 
227 	/* Per-channel stats */
228 	for (k = 0; k < priv->num_channels; k++) {
229 		ch_stats = &priv->channel[k]->stats;
230 		for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
231 			*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
232 	}
233 	i += j;
234 
235 	for (j = 0; j < priv->num_fqs; j++) {
236 		/* Print FQ instantaneous counts */
237 		err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
238 					      &fcnt, &bcnt);
239 		if (err) {
240 			netdev_warn(net_dev, "FQ query error %d", err);
241 			return;
242 		}
243 
244 		if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
245 			fcnt_tx_total += fcnt;
246 			bcnt_tx_total += bcnt;
247 		} else {
248 			fcnt_rx_total += fcnt;
249 			bcnt_rx_total += bcnt;
250 		}
251 	}
252 
253 	*(data + i++) = fcnt_rx_total;
254 	*(data + i++) = bcnt_rx_total;
255 	*(data + i++) = fcnt_tx_total;
256 	*(data + i++) = bcnt_tx_total;
257 
258 	err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
259 	if (err) {
260 		netdev_warn(net_dev, "Buffer count query error %d\n", err);
261 		return;
262 	}
263 	*(data + i++) = buf_cnt;
264 }
265 
266 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
267 			 void *key, void *mask, u64 *fields)
268 {
269 	int off;
270 
271 	if (eth_mask->h_proto) {
272 		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
273 		*(__be16 *)(key + off) = eth_value->h_proto;
274 		*(__be16 *)(mask + off) = eth_mask->h_proto;
275 		*fields |= DPAA2_ETH_DIST_ETHTYPE;
276 	}
277 
278 	if (!is_zero_ether_addr(eth_mask->h_source)) {
279 		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
280 		ether_addr_copy(key + off, eth_value->h_source);
281 		ether_addr_copy(mask + off, eth_mask->h_source);
282 		*fields |= DPAA2_ETH_DIST_ETHSRC;
283 	}
284 
285 	if (!is_zero_ether_addr(eth_mask->h_dest)) {
286 		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
287 		ether_addr_copy(key + off, eth_value->h_dest);
288 		ether_addr_copy(mask + off, eth_mask->h_dest);
289 		*fields |= DPAA2_ETH_DIST_ETHDST;
290 	}
291 
292 	return 0;
293 }
294 
295 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
296 			 struct ethtool_usrip4_spec *uip_mask,
297 			 void *key, void *mask, u64 *fields)
298 {
299 	int off;
300 	u32 tmp_value, tmp_mask;
301 
302 	if (uip_mask->tos || uip_mask->ip_ver)
303 		return -EOPNOTSUPP;
304 
305 	if (uip_mask->ip4src) {
306 		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
307 		*(__be32 *)(key + off) = uip_value->ip4src;
308 		*(__be32 *)(mask + off) = uip_mask->ip4src;
309 		*fields |= DPAA2_ETH_DIST_IPSRC;
310 	}
311 
312 	if (uip_mask->ip4dst) {
313 		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
314 		*(__be32 *)(key + off) = uip_value->ip4dst;
315 		*(__be32 *)(mask + off) = uip_mask->ip4dst;
316 		*fields |= DPAA2_ETH_DIST_IPDST;
317 	}
318 
319 	if (uip_mask->proto) {
320 		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
321 		*(u8 *)(key + off) = uip_value->proto;
322 		*(u8 *)(mask + off) = uip_mask->proto;
323 		*fields |= DPAA2_ETH_DIST_IPPROTO;
324 	}
325 
326 	if (uip_mask->l4_4_bytes) {
327 		tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
328 		tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
329 
330 		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
331 		*(__be16 *)(key + off) = htons(tmp_value >> 16);
332 		*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
333 		*fields |= DPAA2_ETH_DIST_L4SRC;
334 
335 		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
336 		*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
337 		*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
338 		*fields |= DPAA2_ETH_DIST_L4DST;
339 	}
340 
341 	/* Only apply the rule for IPv4 frames */
342 	off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
343 	*(__be16 *)(key + off) = htons(ETH_P_IP);
344 	*(__be16 *)(mask + off) = htons(0xFFFF);
345 	*fields |= DPAA2_ETH_DIST_ETHTYPE;
346 
347 	return 0;
348 }
349 
350 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
351 			struct ethtool_tcpip4_spec *l4_mask,
352 			void *key, void *mask, u8 l4_proto, u64 *fields)
353 {
354 	int off;
355 
356 	if (l4_mask->tos)
357 		return -EOPNOTSUPP;
358 
359 	if (l4_mask->ip4src) {
360 		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
361 		*(__be32 *)(key + off) = l4_value->ip4src;
362 		*(__be32 *)(mask + off) = l4_mask->ip4src;
363 		*fields |= DPAA2_ETH_DIST_IPSRC;
364 	}
365 
366 	if (l4_mask->ip4dst) {
367 		off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
368 		*(__be32 *)(key + off) = l4_value->ip4dst;
369 		*(__be32 *)(mask + off) = l4_mask->ip4dst;
370 		*fields |= DPAA2_ETH_DIST_IPDST;
371 	}
372 
373 	if (l4_mask->psrc) {
374 		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
375 		*(__be16 *)(key + off) = l4_value->psrc;
376 		*(__be16 *)(mask + off) = l4_mask->psrc;
377 		*fields |= DPAA2_ETH_DIST_L4SRC;
378 	}
379 
380 	if (l4_mask->pdst) {
381 		off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
382 		*(__be16 *)(key + off) = l4_value->pdst;
383 		*(__be16 *)(mask + off) = l4_mask->pdst;
384 		*fields |= DPAA2_ETH_DIST_L4DST;
385 	}
386 
387 	/* Only apply the rule for IPv4 frames with the specified L4 proto */
388 	off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
389 	*(__be16 *)(key + off) = htons(ETH_P_IP);
390 	*(__be16 *)(mask + off) = htons(0xFFFF);
391 	*fields |= DPAA2_ETH_DIST_ETHTYPE;
392 
393 	off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
394 	*(u8 *)(key + off) = l4_proto;
395 	*(u8 *)(mask + off) = 0xFF;
396 	*fields |= DPAA2_ETH_DIST_IPPROTO;
397 
398 	return 0;
399 }
400 
401 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
402 			 struct ethtool_flow_ext *ext_mask,
403 			 void *key, void *mask, u64 *fields)
404 {
405 	int off;
406 
407 	if (ext_mask->vlan_etype)
408 		return -EOPNOTSUPP;
409 
410 	if (ext_mask->vlan_tci) {
411 		off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
412 		*(__be16 *)(key + off) = ext_value->vlan_tci;
413 		*(__be16 *)(mask + off) = ext_mask->vlan_tci;
414 		*fields |= DPAA2_ETH_DIST_VLAN;
415 	}
416 
417 	return 0;
418 }
419 
420 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
421 			     struct ethtool_flow_ext *ext_mask,
422 			     void *key, void *mask, u64 *fields)
423 {
424 	int off;
425 
426 	if (!is_zero_ether_addr(ext_mask->h_dest)) {
427 		off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
428 		ether_addr_copy(key + off, ext_value->h_dest);
429 		ether_addr_copy(mask + off, ext_mask->h_dest);
430 		*fields |= DPAA2_ETH_DIST_ETHDST;
431 	}
432 
433 	return 0;
434 }
435 
436 static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
437 			 u64 *fields)
438 {
439 	int err;
440 
441 	switch (fs->flow_type & 0xFF) {
442 	case ETHER_FLOW:
443 		err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
444 				    key, mask, fields);
445 		break;
446 	case IP_USER_FLOW:
447 		err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
448 				    &fs->m_u.usr_ip4_spec, key, mask, fields);
449 		break;
450 	case TCP_V4_FLOW:
451 		err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
452 				   key, mask, IPPROTO_TCP, fields);
453 		break;
454 	case UDP_V4_FLOW:
455 		err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
456 				   key, mask, IPPROTO_UDP, fields);
457 		break;
458 	case SCTP_V4_FLOW:
459 		err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
460 				   &fs->m_u.sctp_ip4_spec, key, mask,
461 				   IPPROTO_SCTP, fields);
462 		break;
463 	default:
464 		return -EOPNOTSUPP;
465 	}
466 
467 	if (err)
468 		return err;
469 
470 	if (fs->flow_type & FLOW_EXT) {
471 		err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
472 		if (err)
473 			return err;
474 	}
475 
476 	if (fs->flow_type & FLOW_MAC_EXT) {
477 		err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
478 					fields);
479 		if (err)
480 			return err;
481 	}
482 
483 	return 0;
484 }
485 
486 static int do_cls_rule(struct net_device *net_dev,
487 		       struct ethtool_rx_flow_spec *fs,
488 		       bool add)
489 {
490 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
491 	struct device *dev = net_dev->dev.parent;
492 	struct dpni_rule_cfg rule_cfg = { 0 };
493 	struct dpni_fs_action_cfg fs_act = { 0 };
494 	dma_addr_t key_iova;
495 	u64 fields = 0;
496 	void *key_buf;
497 	int err;
498 
499 	if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
500 	    fs->ring_cookie >= dpaa2_eth_queue_count(priv))
501 		return -EINVAL;
502 
503 	rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
504 
505 	/* allocate twice the key size, for the actual key and for mask */
506 	key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
507 	if (!key_buf)
508 		return -ENOMEM;
509 
510 	/* Fill the key and mask memory areas */
511 	err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
512 	if (err)
513 		goto free_mem;
514 
515 	if (!dpaa2_eth_fs_mask_enabled(priv)) {
516 		/* Masking allows us to configure a maximal key during init and
517 		 * use it for all flow steering rules. Without it, we include
518 		 * in the key only the fields actually used, so we need to
519 		 * extract the others from the final key buffer.
520 		 *
521 		 * Program the FS key if needed, or return error if previously
522 		 * set key can't be used for the current rule. User needs to
523 		 * delete existing rules in this case to allow for the new one.
524 		 */
525 		if (!priv->rx_cls_fields) {
526 			err = dpaa2_eth_set_cls(net_dev, fields);
527 			if (err)
528 				goto free_mem;
529 
530 			priv->rx_cls_fields = fields;
531 		} else if (priv->rx_cls_fields != fields) {
532 			netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
533 			err = -EOPNOTSUPP;
534 			goto free_mem;
535 		}
536 
537 		dpaa2_eth_cls_trim_rule(key_buf, fields);
538 		rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
539 	}
540 
541 	key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
542 				  DMA_TO_DEVICE);
543 	if (dma_mapping_error(dev, key_iova)) {
544 		err = -ENOMEM;
545 		goto free_mem;
546 	}
547 
548 	rule_cfg.key_iova = key_iova;
549 	if (dpaa2_eth_fs_mask_enabled(priv))
550 		rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
551 
552 	if (add) {
553 		if (fs->ring_cookie == RX_CLS_FLOW_DISC)
554 			fs_act.options |= DPNI_FS_OPT_DISCARD;
555 		else
556 			fs_act.flow_id = fs->ring_cookie;
557 		err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
558 					fs->location, &rule_cfg, &fs_act);
559 	} else {
560 		err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
561 					   &rule_cfg);
562 	}
563 
564 	dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
565 
566 free_mem:
567 	kfree(key_buf);
568 
569 	return err;
570 }
571 
572 static int num_rules(struct dpaa2_eth_priv *priv)
573 {
574 	int i, rules = 0;
575 
576 	for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
577 		if (priv->cls_rules[i].in_use)
578 			rules++;
579 
580 	return rules;
581 }
582 
583 static int update_cls_rule(struct net_device *net_dev,
584 			   struct ethtool_rx_flow_spec *new_fs,
585 			   int location)
586 {
587 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
588 	struct dpaa2_eth_cls_rule *rule;
589 	int err = -EINVAL;
590 
591 	if (!priv->rx_cls_enabled)
592 		return -EOPNOTSUPP;
593 
594 	if (location >= dpaa2_eth_fs_count(priv))
595 		return -EINVAL;
596 
597 	rule = &priv->cls_rules[location];
598 
599 	/* If a rule is present at the specified location, delete it. */
600 	if (rule->in_use) {
601 		err = do_cls_rule(net_dev, &rule->fs, false);
602 		if (err)
603 			return err;
604 
605 		rule->in_use = 0;
606 
607 		if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
608 			priv->rx_cls_fields = 0;
609 	}
610 
611 	/* If no new entry to add, return here */
612 	if (!new_fs)
613 		return err;
614 
615 	err = do_cls_rule(net_dev, new_fs, true);
616 	if (err)
617 		return err;
618 
619 	rule->in_use = 1;
620 	rule->fs = *new_fs;
621 
622 	return 0;
623 }
624 
625 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
626 			       struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
627 {
628 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
629 	int max_rules = dpaa2_eth_fs_count(priv);
630 	int i, j = 0;
631 
632 	switch (rxnfc->cmd) {
633 	case ETHTOOL_GRXFH:
634 		/* we purposely ignore cmd->flow_type for now, because the
635 		 * classifier only supports a single set of fields for all
636 		 * protocols
637 		 */
638 		rxnfc->data = priv->rx_hash_fields;
639 		break;
640 	case ETHTOOL_GRXRINGS:
641 		rxnfc->data = dpaa2_eth_queue_count(priv);
642 		break;
643 	case ETHTOOL_GRXCLSRLCNT:
644 		rxnfc->rule_cnt = 0;
645 		rxnfc->rule_cnt = num_rules(priv);
646 		rxnfc->data = max_rules;
647 		break;
648 	case ETHTOOL_GRXCLSRULE:
649 		if (rxnfc->fs.location >= max_rules)
650 			return -EINVAL;
651 		if (!priv->cls_rules[rxnfc->fs.location].in_use)
652 			return -EINVAL;
653 		rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
654 		break;
655 	case ETHTOOL_GRXCLSRLALL:
656 		for (i = 0; i < max_rules; i++) {
657 			if (!priv->cls_rules[i].in_use)
658 				continue;
659 			if (j == rxnfc->rule_cnt)
660 				return -EMSGSIZE;
661 			rule_locs[j++] = i;
662 		}
663 		rxnfc->rule_cnt = j;
664 		rxnfc->data = max_rules;
665 		break;
666 	default:
667 		return -EOPNOTSUPP;
668 	}
669 
670 	return 0;
671 }
672 
673 static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
674 			       struct ethtool_rxnfc *rxnfc)
675 {
676 	int err = 0;
677 
678 	switch (rxnfc->cmd) {
679 	case ETHTOOL_SRXFH:
680 		if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
681 			return -EOPNOTSUPP;
682 		err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
683 		break;
684 	case ETHTOOL_SRXCLSRLINS:
685 		err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
686 		break;
687 	case ETHTOOL_SRXCLSRLDEL:
688 		err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
689 		break;
690 	default:
691 		err = -EOPNOTSUPP;
692 	}
693 
694 	return err;
695 }
696 
697 int dpaa2_phc_index = -1;
698 EXPORT_SYMBOL(dpaa2_phc_index);
699 
700 static int dpaa2_eth_get_ts_info(struct net_device *dev,
701 				 struct ethtool_ts_info *info)
702 {
703 	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
704 				SOF_TIMESTAMPING_RX_HARDWARE |
705 				SOF_TIMESTAMPING_RAW_HARDWARE;
706 
707 	info->phc_index = dpaa2_phc_index;
708 
709 	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
710 			 (1 << HWTSTAMP_TX_ON);
711 
712 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
713 			   (1 << HWTSTAMP_FILTER_ALL);
714 	return 0;
715 }
716 
717 const struct ethtool_ops dpaa2_ethtool_ops = {
718 	.get_drvinfo = dpaa2_eth_get_drvinfo,
719 	.get_link = ethtool_op_get_link,
720 	.get_link_ksettings = dpaa2_eth_get_link_ksettings,
721 	.set_link_ksettings = dpaa2_eth_set_link_ksettings,
722 	.get_sset_count = dpaa2_eth_get_sset_count,
723 	.get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
724 	.get_strings = dpaa2_eth_get_strings,
725 	.get_rxnfc = dpaa2_eth_get_rxnfc,
726 	.set_rxnfc = dpaa2_eth_set_rxnfc,
727 	.get_ts_info = dpaa2_eth_get_ts_info,
728 };
729