1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
4  * stmmac Selftests Support
5  *
6  * Author: Jose Abreu <joabreu@synopsys.com>
7  */
8 
9 #include <linux/bitrev.h>
10 #include <linux/completion.h>
11 #include <linux/crc32.h>
12 #include <linux/ethtool.h>
13 #include <linux/ip.h>
14 #include <linux/phy.h>
15 #include <linux/udp.h>
16 #include <net/pkt_cls.h>
17 #include <net/tcp.h>
18 #include <net/udp.h>
19 #include <net/tc_act/tc_gact.h>
20 #include "stmmac.h"
21 
22 struct stmmachdr {
23 	__be32 version;
24 	__be64 magic;
25 	u8 id;
26 } __packed;
27 
28 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
29 			      sizeof(struct stmmachdr))
30 #define STMMAC_TEST_PKT_MAGIC	0xdeadcafecafedeadULL
31 #define STMMAC_LB_TIMEOUT	msecs_to_jiffies(200)
32 
33 struct stmmac_packet_attrs {
34 	int vlan;
35 	int vlan_id_in;
36 	int vlan_id_out;
37 	unsigned char *src;
38 	unsigned char *dst;
39 	u32 ip_src;
40 	u32 ip_dst;
41 	int tcp;
42 	int sport;
43 	int dport;
44 	u32 exp_hash;
45 	int dont_wait;
46 	int timeout;
47 	int size;
48 	int max_size;
49 	int remove_sa;
50 	u8 id;
51 	int sarc;
52 	u16 queue_mapping;
53 };
54 
55 static u8 stmmac_test_next_id;
56 
57 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
58 					       struct stmmac_packet_attrs *attr)
59 {
60 	struct sk_buff *skb = NULL;
61 	struct udphdr *uhdr = NULL;
62 	struct tcphdr *thdr = NULL;
63 	struct stmmachdr *shdr;
64 	struct ethhdr *ehdr;
65 	struct iphdr *ihdr;
66 	int iplen, size;
67 
68 	size = attr->size + STMMAC_TEST_PKT_SIZE;
69 	if (attr->vlan) {
70 		size += 4;
71 		if (attr->vlan > 1)
72 			size += 4;
73 	}
74 
75 	if (attr->tcp)
76 		size += sizeof(struct tcphdr);
77 	else
78 		size += sizeof(struct udphdr);
79 
80 	if (attr->max_size && (attr->max_size > size))
81 		size = attr->max_size;
82 
83 	skb = netdev_alloc_skb_ip_align(priv->dev, size);
84 	if (!skb)
85 		return NULL;
86 
87 	prefetchw(skb->data);
88 
89 	if (attr->vlan > 1)
90 		ehdr = skb_push(skb, ETH_HLEN + 8);
91 	else if (attr->vlan)
92 		ehdr = skb_push(skb, ETH_HLEN + 4);
93 	else if (attr->remove_sa)
94 		ehdr = skb_push(skb, ETH_HLEN - 6);
95 	else
96 		ehdr = skb_push(skb, ETH_HLEN);
97 	skb_reset_mac_header(skb);
98 
99 	skb_set_network_header(skb, skb->len);
100 	ihdr = skb_put(skb, sizeof(*ihdr));
101 
102 	skb_set_transport_header(skb, skb->len);
103 	if (attr->tcp)
104 		thdr = skb_put(skb, sizeof(*thdr));
105 	else
106 		uhdr = skb_put(skb, sizeof(*uhdr));
107 
108 	if (!attr->remove_sa)
109 		eth_zero_addr(ehdr->h_source);
110 	eth_zero_addr(ehdr->h_dest);
111 	if (attr->src && !attr->remove_sa)
112 		ether_addr_copy(ehdr->h_source, attr->src);
113 	if (attr->dst)
114 		ether_addr_copy(ehdr->h_dest, attr->dst);
115 
116 	if (!attr->remove_sa) {
117 		ehdr->h_proto = htons(ETH_P_IP);
118 	} else {
119 		__be16 *ptr = (__be16 *)ehdr;
120 
121 		/* HACK */
122 		ptr[3] = htons(ETH_P_IP);
123 	}
124 
125 	if (attr->vlan) {
126 		__be16 *tag, *proto;
127 
128 		if (!attr->remove_sa) {
129 			tag = (void *)ehdr + ETH_HLEN;
130 			proto = (void *)ehdr + (2 * ETH_ALEN);
131 		} else {
132 			tag = (void *)ehdr + ETH_HLEN - 6;
133 			proto = (void *)ehdr + ETH_ALEN;
134 		}
135 
136 		proto[0] = htons(ETH_P_8021Q);
137 		tag[0] = htons(attr->vlan_id_out);
138 		tag[1] = htons(ETH_P_IP);
139 		if (attr->vlan > 1) {
140 			proto[0] = htons(ETH_P_8021AD);
141 			tag[1] = htons(ETH_P_8021Q);
142 			tag[2] = htons(attr->vlan_id_in);
143 			tag[3] = htons(ETH_P_IP);
144 		}
145 	}
146 
147 	if (attr->tcp) {
148 		thdr->source = htons(attr->sport);
149 		thdr->dest = htons(attr->dport);
150 		thdr->doff = sizeof(struct tcphdr) / 4;
151 		thdr->check = 0;
152 	} else {
153 		uhdr->source = htons(attr->sport);
154 		uhdr->dest = htons(attr->dport);
155 		uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
156 		if (attr->max_size)
157 			uhdr->len = htons(attr->max_size -
158 					  (sizeof(*ihdr) + sizeof(*ehdr)));
159 		uhdr->check = 0;
160 	}
161 
162 	ihdr->ihl = 5;
163 	ihdr->ttl = 32;
164 	ihdr->version = 4;
165 	if (attr->tcp)
166 		ihdr->protocol = IPPROTO_TCP;
167 	else
168 		ihdr->protocol = IPPROTO_UDP;
169 	iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
170 	if (attr->tcp)
171 		iplen += sizeof(*thdr);
172 	else
173 		iplen += sizeof(*uhdr);
174 
175 	if (attr->max_size)
176 		iplen = attr->max_size - sizeof(*ehdr);
177 
178 	ihdr->tot_len = htons(iplen);
179 	ihdr->frag_off = 0;
180 	ihdr->saddr = htonl(attr->ip_src);
181 	ihdr->daddr = htonl(attr->ip_dst);
182 	ihdr->tos = 0;
183 	ihdr->id = 0;
184 	ip_send_check(ihdr);
185 
186 	shdr = skb_put(skb, sizeof(*shdr));
187 	shdr->version = 0;
188 	shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
189 	attr->id = stmmac_test_next_id;
190 	shdr->id = stmmac_test_next_id++;
191 
192 	if (attr->size)
193 		skb_put(skb, attr->size);
194 	if (attr->max_size && (attr->max_size > skb->len))
195 		skb_put(skb, attr->max_size - skb->len);
196 
197 	skb->csum = 0;
198 	skb->ip_summed = CHECKSUM_PARTIAL;
199 	if (attr->tcp) {
200 		thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
201 		skb->csum_start = skb_transport_header(skb) - skb->head;
202 		skb->csum_offset = offsetof(struct tcphdr, check);
203 	} else {
204 		udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
205 	}
206 
207 	skb->protocol = htons(ETH_P_IP);
208 	skb->pkt_type = PACKET_HOST;
209 	skb->dev = priv->dev;
210 
211 	return skb;
212 }
213 
214 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
215 					       struct stmmac_packet_attrs *attr)
216 {
217 	__be32 ip_src = htonl(attr->ip_src);
218 	__be32 ip_dst = htonl(attr->ip_dst);
219 	struct sk_buff *skb = NULL;
220 
221 	skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
222 			 NULL, attr->src, attr->dst);
223 	if (!skb)
224 		return NULL;
225 
226 	skb->pkt_type = PACKET_HOST;
227 	skb->dev = priv->dev;
228 
229 	return skb;
230 }
231 
232 struct stmmac_test_priv {
233 	struct stmmac_packet_attrs *packet;
234 	struct packet_type pt;
235 	struct completion comp;
236 	int double_vlan;
237 	int vlan_id;
238 	int ok;
239 };
240 
241 static int stmmac_test_loopback_validate(struct sk_buff *skb,
242 					 struct net_device *ndev,
243 					 struct packet_type *pt,
244 					 struct net_device *orig_ndev)
245 {
246 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
247 	struct stmmachdr *shdr;
248 	struct ethhdr *ehdr;
249 	struct udphdr *uhdr;
250 	struct tcphdr *thdr;
251 	struct iphdr *ihdr;
252 
253 	skb = skb_unshare(skb, GFP_ATOMIC);
254 	if (!skb)
255 		goto out;
256 
257 	if (skb_linearize(skb))
258 		goto out;
259 	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
260 		goto out;
261 
262 	ehdr = (struct ethhdr *)skb_mac_header(skb);
263 	if (tpriv->packet->dst) {
264 		if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
265 			goto out;
266 	}
267 	if (tpriv->packet->sarc) {
268 		if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
269 			goto out;
270 	} else if (tpriv->packet->src) {
271 		if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
272 			goto out;
273 	}
274 
275 	ihdr = ip_hdr(skb);
276 	if (tpriv->double_vlan)
277 		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
278 
279 	if (tpriv->packet->tcp) {
280 		if (ihdr->protocol != IPPROTO_TCP)
281 			goto out;
282 
283 		thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
284 		if (thdr->dest != htons(tpriv->packet->dport))
285 			goto out;
286 
287 		shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
288 	} else {
289 		if (ihdr->protocol != IPPROTO_UDP)
290 			goto out;
291 
292 		uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
293 		if (uhdr->dest != htons(tpriv->packet->dport))
294 			goto out;
295 
296 		shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
297 	}
298 
299 	if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
300 		goto out;
301 	if (tpriv->packet->exp_hash && !skb->hash)
302 		goto out;
303 	if (tpriv->packet->id != shdr->id)
304 		goto out;
305 
306 	tpriv->ok = true;
307 	complete(&tpriv->comp);
308 out:
309 	kfree_skb(skb);
310 	return 0;
311 }
312 
313 static int __stmmac_test_loopback(struct stmmac_priv *priv,
314 				  struct stmmac_packet_attrs *attr)
315 {
316 	struct stmmac_test_priv *tpriv;
317 	struct sk_buff *skb = NULL;
318 	int ret = 0;
319 
320 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
321 	if (!tpriv)
322 		return -ENOMEM;
323 
324 	tpriv->ok = false;
325 	init_completion(&tpriv->comp);
326 
327 	tpriv->pt.type = htons(ETH_P_IP);
328 	tpriv->pt.func = stmmac_test_loopback_validate;
329 	tpriv->pt.dev = priv->dev;
330 	tpriv->pt.af_packet_priv = tpriv;
331 	tpriv->packet = attr;
332 
333 	if (!attr->dont_wait)
334 		dev_add_pack(&tpriv->pt);
335 
336 	skb = stmmac_test_get_udp_skb(priv, attr);
337 	if (!skb) {
338 		ret = -ENOMEM;
339 		goto cleanup;
340 	}
341 
342 	skb_set_queue_mapping(skb, attr->queue_mapping);
343 	ret = dev_queue_xmit(skb);
344 	if (ret)
345 		goto cleanup;
346 
347 	if (attr->dont_wait)
348 		goto cleanup;
349 
350 	if (!attr->timeout)
351 		attr->timeout = STMMAC_LB_TIMEOUT;
352 
353 	wait_for_completion_timeout(&tpriv->comp, attr->timeout);
354 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
355 
356 cleanup:
357 	if (!attr->dont_wait)
358 		dev_remove_pack(&tpriv->pt);
359 	kfree(tpriv);
360 	return ret;
361 }
362 
363 static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
364 {
365 	struct stmmac_packet_attrs attr = { };
366 
367 	attr.dst = priv->dev->dev_addr;
368 	return __stmmac_test_loopback(priv, &attr);
369 }
370 
371 static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
372 {
373 	struct stmmac_packet_attrs attr = { };
374 	int ret;
375 
376 	if (!priv->dev->phydev)
377 		return -EBUSY;
378 
379 	ret = phy_loopback(priv->dev->phydev, true);
380 	if (ret)
381 		return ret;
382 
383 	attr.dst = priv->dev->dev_addr;
384 	ret = __stmmac_test_loopback(priv, &attr);
385 
386 	phy_loopback(priv->dev->phydev, false);
387 	return ret;
388 }
389 
390 static int stmmac_test_mmc(struct stmmac_priv *priv)
391 {
392 	struct stmmac_counters initial, final;
393 	int ret;
394 
395 	memset(&initial, 0, sizeof(initial));
396 	memset(&final, 0, sizeof(final));
397 
398 	if (!priv->dma_cap.rmon)
399 		return -EOPNOTSUPP;
400 
401 	/* Save previous results into internal struct */
402 	stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
403 
404 	ret = stmmac_test_mac_loopback(priv);
405 	if (ret)
406 		return ret;
407 
408 	/* These will be loopback results so no need to save them */
409 	stmmac_mmc_read(priv, priv->mmcaddr, &final);
410 
411 	/*
412 	 * The number of MMC counters available depends on HW configuration
413 	 * so we just use this one to validate the feature. I hope there is
414 	 * not a version without this counter.
415 	 */
416 	if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
417 		return -EINVAL;
418 
419 	return 0;
420 }
421 
422 static int stmmac_test_eee(struct stmmac_priv *priv)
423 {
424 	struct stmmac_extra_stats *initial, *final;
425 	int retries = 10;
426 	int ret;
427 
428 	if (!priv->dma_cap.eee || !priv->eee_active)
429 		return -EOPNOTSUPP;
430 
431 	initial = kzalloc(sizeof(*initial), GFP_KERNEL);
432 	if (!initial)
433 		return -ENOMEM;
434 
435 	final = kzalloc(sizeof(*final), GFP_KERNEL);
436 	if (!final) {
437 		ret = -ENOMEM;
438 		goto out_free_initial;
439 	}
440 
441 	memcpy(initial, &priv->xstats, sizeof(*initial));
442 
443 	ret = stmmac_test_mac_loopback(priv);
444 	if (ret)
445 		goto out_free_final;
446 
447 	/* We have no traffic in the line so, sooner or later it will go LPI */
448 	while (--retries) {
449 		memcpy(final, &priv->xstats, sizeof(*final));
450 
451 		if (final->irq_tx_path_in_lpi_mode_n >
452 		    initial->irq_tx_path_in_lpi_mode_n)
453 			break;
454 		msleep(100);
455 	}
456 
457 	if (!retries) {
458 		ret = -ETIMEDOUT;
459 		goto out_free_final;
460 	}
461 
462 	if (final->irq_tx_path_in_lpi_mode_n <=
463 	    initial->irq_tx_path_in_lpi_mode_n) {
464 		ret = -EINVAL;
465 		goto out_free_final;
466 	}
467 
468 	if (final->irq_tx_path_exit_lpi_mode_n <=
469 	    initial->irq_tx_path_exit_lpi_mode_n) {
470 		ret = -EINVAL;
471 		goto out_free_final;
472 	}
473 
474 out_free_final:
475 	kfree(final);
476 out_free_initial:
477 	kfree(initial);
478 	return ret;
479 }
480 
481 static int stmmac_filter_check(struct stmmac_priv *priv)
482 {
483 	if (!(priv->dev->flags & IFF_PROMISC))
484 		return 0;
485 
486 	netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
487 	return -EOPNOTSUPP;
488 }
489 
490 static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
491 {
492 	int mc_offset = 32 - priv->hw->mcast_bits_log2;
493 	struct netdev_hw_addr *ha;
494 	u32 hash, hash_nr;
495 
496 	/* First compute the hash for desired addr */
497 	hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
498 	hash_nr = hash >> 5;
499 	hash = 1 << (hash & 0x1f);
500 
501 	/* Now, check if it collides with any existing one */
502 	netdev_for_each_mc_addr(ha, priv->dev) {
503 		u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
504 		if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
505 			return false;
506 	}
507 
508 	/* No collisions, address is good to go */
509 	return true;
510 }
511 
512 static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
513 {
514 	struct netdev_hw_addr *ha;
515 
516 	/* Check if it collides with any existing one */
517 	netdev_for_each_uc_addr(ha, priv->dev) {
518 		if (!memcmp(ha->addr, addr, ETH_ALEN))
519 			return false;
520 	}
521 
522 	/* No collisions, address is good to go */
523 	return true;
524 }
525 
526 static int stmmac_test_hfilt(struct stmmac_priv *priv)
527 {
528 	unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
529 	unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
530 	struct stmmac_packet_attrs attr = { };
531 	int ret, tries = 256;
532 
533 	ret = stmmac_filter_check(priv);
534 	if (ret)
535 		return ret;
536 
537 	if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
538 		return -EOPNOTSUPP;
539 
540 	while (--tries) {
541 		/* We only need to check the bd_addr for collisions */
542 		bd_addr[ETH_ALEN - 1] = tries;
543 		if (stmmac_hash_check(priv, bd_addr))
544 			break;
545 	}
546 
547 	if (!tries)
548 		return -EOPNOTSUPP;
549 
550 	ret = dev_mc_add(priv->dev, gd_addr);
551 	if (ret)
552 		return ret;
553 
554 	attr.dst = gd_addr;
555 
556 	/* Shall receive packet */
557 	ret = __stmmac_test_loopback(priv, &attr);
558 	if (ret)
559 		goto cleanup;
560 
561 	attr.dst = bd_addr;
562 
563 	/* Shall NOT receive packet */
564 	ret = __stmmac_test_loopback(priv, &attr);
565 	ret = ret ? 0 : -EINVAL;
566 
567 cleanup:
568 	dev_mc_del(priv->dev, gd_addr);
569 	return ret;
570 }
571 
572 static int stmmac_test_pfilt(struct stmmac_priv *priv)
573 {
574 	unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
575 	unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
576 	struct stmmac_packet_attrs attr = { };
577 	int ret, tries = 256;
578 
579 	if (stmmac_filter_check(priv))
580 		return -EOPNOTSUPP;
581 	if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
582 		return -EOPNOTSUPP;
583 
584 	while (--tries) {
585 		/* We only need to check the bd_addr for collisions */
586 		bd_addr[ETH_ALEN - 1] = tries;
587 		if (stmmac_perfect_check(priv, bd_addr))
588 			break;
589 	}
590 
591 	if (!tries)
592 		return -EOPNOTSUPP;
593 
594 	ret = dev_uc_add(priv->dev, gd_addr);
595 	if (ret)
596 		return ret;
597 
598 	attr.dst = gd_addr;
599 
600 	/* Shall receive packet */
601 	ret = __stmmac_test_loopback(priv, &attr);
602 	if (ret)
603 		goto cleanup;
604 
605 	attr.dst = bd_addr;
606 
607 	/* Shall NOT receive packet */
608 	ret = __stmmac_test_loopback(priv, &attr);
609 	ret = ret ? 0 : -EINVAL;
610 
611 cleanup:
612 	dev_uc_del(priv->dev, gd_addr);
613 	return ret;
614 }
615 
616 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
617 {
618 	unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
619 	unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
620 	struct stmmac_packet_attrs attr = { };
621 	int ret, tries = 256;
622 
623 	if (stmmac_filter_check(priv))
624 		return -EOPNOTSUPP;
625 	if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
626 		return -EOPNOTSUPP;
627 
628 	while (--tries) {
629 		/* We only need to check the mc_addr for collisions */
630 		mc_addr[ETH_ALEN - 1] = tries;
631 		if (stmmac_hash_check(priv, mc_addr))
632 			break;
633 	}
634 
635 	if (!tries)
636 		return -EOPNOTSUPP;
637 
638 	ret = dev_uc_add(priv->dev, uc_addr);
639 	if (ret)
640 		return ret;
641 
642 	attr.dst = uc_addr;
643 
644 	/* Shall receive packet */
645 	ret = __stmmac_test_loopback(priv, &attr);
646 	if (ret)
647 		goto cleanup;
648 
649 	attr.dst = mc_addr;
650 
651 	/* Shall NOT receive packet */
652 	ret = __stmmac_test_loopback(priv, &attr);
653 	ret = ret ? 0 : -EINVAL;
654 
655 cleanup:
656 	dev_uc_del(priv->dev, uc_addr);
657 	return ret;
658 }
659 
660 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
661 {
662 	unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
663 	unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
664 	struct stmmac_packet_attrs attr = { };
665 	int ret, tries = 256;
666 
667 	if (stmmac_filter_check(priv))
668 		return -EOPNOTSUPP;
669 	if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
670 		return -EOPNOTSUPP;
671 
672 	while (--tries) {
673 		/* We only need to check the uc_addr for collisions */
674 		uc_addr[ETH_ALEN - 1] = tries;
675 		if (stmmac_perfect_check(priv, uc_addr))
676 			break;
677 	}
678 
679 	if (!tries)
680 		return -EOPNOTSUPP;
681 
682 	ret = dev_mc_add(priv->dev, mc_addr);
683 	if (ret)
684 		return ret;
685 
686 	attr.dst = mc_addr;
687 
688 	/* Shall receive packet */
689 	ret = __stmmac_test_loopback(priv, &attr);
690 	if (ret)
691 		goto cleanup;
692 
693 	attr.dst = uc_addr;
694 
695 	/* Shall NOT receive packet */
696 	ret = __stmmac_test_loopback(priv, &attr);
697 	ret = ret ? 0 : -EINVAL;
698 
699 cleanup:
700 	dev_mc_del(priv->dev, mc_addr);
701 	return ret;
702 }
703 
704 static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
705 					 struct net_device *ndev,
706 					 struct packet_type *pt,
707 					 struct net_device *orig_ndev)
708 {
709 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
710 	struct ethhdr *ehdr;
711 
712 	ehdr = (struct ethhdr *)skb_mac_header(skb);
713 	if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
714 		goto out;
715 	if (ehdr->h_proto != htons(ETH_P_PAUSE))
716 		goto out;
717 
718 	tpriv->ok = true;
719 	complete(&tpriv->comp);
720 out:
721 	kfree_skb(skb);
722 	return 0;
723 }
724 
725 static int stmmac_test_flowctrl(struct stmmac_priv *priv)
726 {
727 	unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
728 	struct phy_device *phydev = priv->dev->phydev;
729 	u32 rx_cnt = priv->plat->rx_queues_to_use;
730 	struct stmmac_test_priv *tpriv;
731 	unsigned int pkt_count;
732 	int i, ret = 0;
733 
734 	if (!phydev || (!phydev->pause && !phydev->asym_pause))
735 		return -EOPNOTSUPP;
736 
737 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
738 	if (!tpriv)
739 		return -ENOMEM;
740 
741 	tpriv->ok = false;
742 	init_completion(&tpriv->comp);
743 	tpriv->pt.type = htons(ETH_P_PAUSE);
744 	tpriv->pt.func = stmmac_test_flowctrl_validate;
745 	tpriv->pt.dev = priv->dev;
746 	tpriv->pt.af_packet_priv = tpriv;
747 	dev_add_pack(&tpriv->pt);
748 
749 	/* Compute minimum number of packets to make FIFO full */
750 	pkt_count = priv->plat->rx_fifo_size;
751 	if (!pkt_count)
752 		pkt_count = priv->dma_cap.rx_fifo_size;
753 	pkt_count /= 1400;
754 	pkt_count *= 2;
755 
756 	for (i = 0; i < rx_cnt; i++)
757 		stmmac_stop_rx(priv, priv->ioaddr, i);
758 
759 	ret = dev_set_promiscuity(priv->dev, 1);
760 	if (ret)
761 		goto cleanup;
762 
763 	ret = dev_mc_add(priv->dev, paddr);
764 	if (ret)
765 		goto cleanup;
766 
767 	for (i = 0; i < pkt_count; i++) {
768 		struct stmmac_packet_attrs attr = { };
769 
770 		attr.dst = priv->dev->dev_addr;
771 		attr.dont_wait = true;
772 		attr.size = 1400;
773 
774 		ret = __stmmac_test_loopback(priv, &attr);
775 		if (ret)
776 			goto cleanup;
777 		if (tpriv->ok)
778 			break;
779 	}
780 
781 	/* Wait for some time in case RX Watchdog is enabled */
782 	msleep(200);
783 
784 	for (i = 0; i < rx_cnt; i++) {
785 		struct stmmac_channel *ch = &priv->channel[i];
786 		u32 tail;
787 
788 		tail = priv->rx_queue[i].dma_rx_phy +
789 			(DMA_RX_SIZE * sizeof(struct dma_desc));
790 
791 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
792 		stmmac_start_rx(priv, priv->ioaddr, i);
793 
794 		local_bh_disable();
795 		napi_reschedule(&ch->rx_napi);
796 		local_bh_enable();
797 	}
798 
799 	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
800 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
801 
802 cleanup:
803 	dev_mc_del(priv->dev, paddr);
804 	dev_set_promiscuity(priv->dev, -1);
805 	dev_remove_pack(&tpriv->pt);
806 	kfree(tpriv);
807 	return ret;
808 }
809 
810 static int stmmac_test_rss(struct stmmac_priv *priv)
811 {
812 	struct stmmac_packet_attrs attr = { };
813 
814 	if (!priv->dma_cap.rssen || !priv->rss.enable)
815 		return -EOPNOTSUPP;
816 
817 	attr.dst = priv->dev->dev_addr;
818 	attr.exp_hash = true;
819 	attr.sport = 0x321;
820 	attr.dport = 0x123;
821 
822 	return __stmmac_test_loopback(priv, &attr);
823 }
824 
825 static int stmmac_test_vlan_validate(struct sk_buff *skb,
826 				     struct net_device *ndev,
827 				     struct packet_type *pt,
828 				     struct net_device *orig_ndev)
829 {
830 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
831 	struct stmmachdr *shdr;
832 	struct ethhdr *ehdr;
833 	struct udphdr *uhdr;
834 	struct iphdr *ihdr;
835 	u16 proto;
836 
837 	proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
838 
839 	skb = skb_unshare(skb, GFP_ATOMIC);
840 	if (!skb)
841 		goto out;
842 
843 	if (skb_linearize(skb))
844 		goto out;
845 	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
846 		goto out;
847 	if (tpriv->vlan_id) {
848 		if (skb->vlan_proto != htons(proto))
849 			goto out;
850 		if (skb->vlan_tci != tpriv->vlan_id)
851 			goto out;
852 	}
853 
854 	ehdr = (struct ethhdr *)skb_mac_header(skb);
855 	if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
856 		goto out;
857 
858 	ihdr = ip_hdr(skb);
859 	if (tpriv->double_vlan)
860 		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
861 	if (ihdr->protocol != IPPROTO_UDP)
862 		goto out;
863 
864 	uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
865 	if (uhdr->dest != htons(tpriv->packet->dport))
866 		goto out;
867 
868 	shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
869 	if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
870 		goto out;
871 
872 	tpriv->ok = true;
873 	complete(&tpriv->comp);
874 
875 out:
876 	kfree_skb(skb);
877 	return 0;
878 }
879 
880 static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
881 {
882 	struct stmmac_packet_attrs attr = { };
883 	struct stmmac_test_priv *tpriv;
884 	struct sk_buff *skb = NULL;
885 	int ret = 0, i;
886 
887 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
888 	if (!tpriv)
889 		return -ENOMEM;
890 
891 	tpriv->ok = false;
892 	init_completion(&tpriv->comp);
893 
894 	tpriv->pt.type = htons(ETH_P_IP);
895 	tpriv->pt.func = stmmac_test_vlan_validate;
896 	tpriv->pt.dev = priv->dev;
897 	tpriv->pt.af_packet_priv = tpriv;
898 	tpriv->packet = &attr;
899 
900 	/*
901 	 * As we use HASH filtering, false positives may appear. This is a
902 	 * specially chosen ID so that adjacent IDs (+4) have different
903 	 * HASH values.
904 	 */
905 	tpriv->vlan_id = 0x123;
906 	dev_add_pack(&tpriv->pt);
907 
908 	ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
909 	if (ret)
910 		goto cleanup;
911 
912 	for (i = 0; i < 4; i++) {
913 		attr.vlan = 1;
914 		attr.vlan_id_out = tpriv->vlan_id + i;
915 		attr.dst = priv->dev->dev_addr;
916 		attr.sport = 9;
917 		attr.dport = 9;
918 
919 		skb = stmmac_test_get_udp_skb(priv, &attr);
920 		if (!skb) {
921 			ret = -ENOMEM;
922 			goto vlan_del;
923 		}
924 
925 		skb_set_queue_mapping(skb, 0);
926 		ret = dev_queue_xmit(skb);
927 		if (ret)
928 			goto vlan_del;
929 
930 		wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
931 		ret = tpriv->ok ? 0 : -ETIMEDOUT;
932 		if (ret && !i) {
933 			goto vlan_del;
934 		} else if (!ret && i) {
935 			ret = -EINVAL;
936 			goto vlan_del;
937 		} else {
938 			ret = 0;
939 		}
940 
941 		tpriv->ok = false;
942 	}
943 
944 vlan_del:
945 	vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
946 cleanup:
947 	dev_remove_pack(&tpriv->pt);
948 	kfree(tpriv);
949 	return ret;
950 }
951 
952 static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
953 {
954 	if (!priv->dma_cap.vlhash)
955 		return -EOPNOTSUPP;
956 
957 	return __stmmac_test_vlanfilt(priv);
958 }
959 
960 static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
961 {
962 	int ret, prev_cap = priv->dma_cap.vlhash;
963 
964 	priv->dma_cap.vlhash = 0;
965 	ret = __stmmac_test_vlanfilt(priv);
966 	priv->dma_cap.vlhash = prev_cap;
967 
968 	return ret;
969 }
970 
971 static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
972 {
973 	struct stmmac_packet_attrs attr = { };
974 	struct stmmac_test_priv *tpriv;
975 	struct sk_buff *skb = NULL;
976 	int ret = 0, i;
977 
978 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
979 	if (!tpriv)
980 		return -ENOMEM;
981 
982 	tpriv->ok = false;
983 	tpriv->double_vlan = true;
984 	init_completion(&tpriv->comp);
985 
986 	tpriv->pt.type = htons(ETH_P_8021Q);
987 	tpriv->pt.func = stmmac_test_vlan_validate;
988 	tpriv->pt.dev = priv->dev;
989 	tpriv->pt.af_packet_priv = tpriv;
990 	tpriv->packet = &attr;
991 
992 	/*
993 	 * As we use HASH filtering, false positives may appear. This is a
994 	 * specially chosen ID so that adjacent IDs (+4) have different
995 	 * HASH values.
996 	 */
997 	tpriv->vlan_id = 0x123;
998 	dev_add_pack(&tpriv->pt);
999 
1000 	ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1001 	if (ret)
1002 		goto cleanup;
1003 
1004 	for (i = 0; i < 4; i++) {
1005 		attr.vlan = 2;
1006 		attr.vlan_id_out = tpriv->vlan_id + i;
1007 		attr.dst = priv->dev->dev_addr;
1008 		attr.sport = 9;
1009 		attr.dport = 9;
1010 
1011 		skb = stmmac_test_get_udp_skb(priv, &attr);
1012 		if (!skb) {
1013 			ret = -ENOMEM;
1014 			goto vlan_del;
1015 		}
1016 
1017 		skb_set_queue_mapping(skb, 0);
1018 		ret = dev_queue_xmit(skb);
1019 		if (ret)
1020 			goto vlan_del;
1021 
1022 		wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1023 		ret = tpriv->ok ? 0 : -ETIMEDOUT;
1024 		if (ret && !i) {
1025 			goto vlan_del;
1026 		} else if (!ret && i) {
1027 			ret = -EINVAL;
1028 			goto vlan_del;
1029 		} else {
1030 			ret = 0;
1031 		}
1032 
1033 		tpriv->ok = false;
1034 	}
1035 
1036 vlan_del:
1037 	vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
1038 cleanup:
1039 	dev_remove_pack(&tpriv->pt);
1040 	kfree(tpriv);
1041 	return ret;
1042 }
1043 
1044 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
1045 {
1046 	if (!priv->dma_cap.vlhash)
1047 		return -EOPNOTSUPP;
1048 
1049 	return __stmmac_test_dvlanfilt(priv);
1050 }
1051 
1052 static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
1053 {
1054 	int ret, prev_cap = priv->dma_cap.vlhash;
1055 
1056 	priv->dma_cap.vlhash = 0;
1057 	ret = __stmmac_test_dvlanfilt(priv);
1058 	priv->dma_cap.vlhash = prev_cap;
1059 
1060 	return ret;
1061 }
1062 
1063 #ifdef CONFIG_NET_CLS_ACT
1064 static int stmmac_test_rxp(struct stmmac_priv *priv)
1065 {
1066 	unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
1067 	struct tc_cls_u32_offload cls_u32 = { };
1068 	struct stmmac_packet_attrs attr = { };
1069 	struct tc_action **actions, *act;
1070 	struct tc_u32_sel *sel;
1071 	struct tcf_exts *exts;
1072 	int ret, i, nk = 1;
1073 
1074 	if (!tc_can_offload(priv->dev))
1075 		return -EOPNOTSUPP;
1076 	if (!priv->dma_cap.frpsel)
1077 		return -EOPNOTSUPP;
1078 
1079 	sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
1080 	if (!sel)
1081 		return -ENOMEM;
1082 
1083 	exts = kzalloc(sizeof(*exts), GFP_KERNEL);
1084 	if (!exts) {
1085 		ret = -ENOMEM;
1086 		goto cleanup_sel;
1087 	}
1088 
1089 	actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
1090 	if (!actions) {
1091 		ret = -ENOMEM;
1092 		goto cleanup_exts;
1093 	}
1094 
1095 	act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
1096 	if (!act) {
1097 		ret = -ENOMEM;
1098 		goto cleanup_actions;
1099 	}
1100 
1101 	cls_u32.command = TC_CLSU32_NEW_KNODE;
1102 	cls_u32.common.chain_index = 0;
1103 	cls_u32.common.protocol = htons(ETH_P_ALL);
1104 	cls_u32.knode.exts = exts;
1105 	cls_u32.knode.sel = sel;
1106 	cls_u32.knode.handle = 0x123;
1107 
1108 	exts->nr_actions = nk;
1109 	exts->actions = actions;
1110 	for (i = 0; i < nk; i++) {
1111 		struct tcf_gact *gact = to_gact(&act[i]);
1112 
1113 		actions[i] = &act[i];
1114 		gact->tcf_action = TC_ACT_SHOT;
1115 	}
1116 
1117 	sel->nkeys = nk;
1118 	sel->offshift = 0;
1119 	sel->keys[0].off = 6;
1120 	sel->keys[0].val = htonl(0xdeadbeef);
1121 	sel->keys[0].mask = ~0x0;
1122 
1123 	ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1124 	if (ret)
1125 		goto cleanup_act;
1126 
1127 	attr.dst = priv->dev->dev_addr;
1128 	attr.src = addr;
1129 
1130 	ret = __stmmac_test_loopback(priv, &attr);
1131 	ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1132 
1133 	cls_u32.command = TC_CLSU32_DELETE_KNODE;
1134 	stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1135 
1136 cleanup_act:
1137 	kfree(act);
1138 cleanup_actions:
1139 	kfree(actions);
1140 cleanup_exts:
1141 	kfree(exts);
1142 cleanup_sel:
1143 	kfree(sel);
1144 	return ret;
1145 }
1146 #else
1147 static int stmmac_test_rxp(struct stmmac_priv *priv)
1148 {
1149 	return -EOPNOTSUPP;
1150 }
1151 #endif
1152 
1153 static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1154 {
1155 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1156 	struct stmmac_packet_attrs attr = { };
1157 	int ret;
1158 
1159 	if (!priv->dma_cap.vlins)
1160 		return -EOPNOTSUPP;
1161 
1162 	attr.remove_sa = true;
1163 	attr.sarc = true;
1164 	attr.src = src;
1165 	attr.dst = priv->dev->dev_addr;
1166 
1167 	priv->sarc_type = 0x1;
1168 
1169 	ret = __stmmac_test_loopback(priv, &attr);
1170 
1171 	priv->sarc_type = 0x0;
1172 	return ret;
1173 }
1174 
1175 static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1176 {
1177 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1178 	struct stmmac_packet_attrs attr = { };
1179 	int ret;
1180 
1181 	if (!priv->dma_cap.vlins)
1182 		return -EOPNOTSUPP;
1183 
1184 	attr.sarc = true;
1185 	attr.src = src;
1186 	attr.dst = priv->dev->dev_addr;
1187 
1188 	priv->sarc_type = 0x2;
1189 
1190 	ret = __stmmac_test_loopback(priv, &attr);
1191 
1192 	priv->sarc_type = 0x0;
1193 	return ret;
1194 }
1195 
1196 static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1197 {
1198 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1199 	struct stmmac_packet_attrs attr = { };
1200 	int ret;
1201 
1202 	if (!priv->dma_cap.vlins)
1203 		return -EOPNOTSUPP;
1204 
1205 	attr.remove_sa = true;
1206 	attr.sarc = true;
1207 	attr.src = src;
1208 	attr.dst = priv->dev->dev_addr;
1209 
1210 	if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1211 		return -EOPNOTSUPP;
1212 
1213 	ret = __stmmac_test_loopback(priv, &attr);
1214 
1215 	stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1216 	return ret;
1217 }
1218 
1219 static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1220 {
1221 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1222 	struct stmmac_packet_attrs attr = { };
1223 	int ret;
1224 
1225 	if (!priv->dma_cap.vlins)
1226 		return -EOPNOTSUPP;
1227 
1228 	attr.sarc = true;
1229 	attr.src = src;
1230 	attr.dst = priv->dev->dev_addr;
1231 
1232 	if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1233 		return -EOPNOTSUPP;
1234 
1235 	ret = __stmmac_test_loopback(priv, &attr);
1236 
1237 	stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1238 	return ret;
1239 }
1240 
1241 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1242 {
1243 	struct stmmac_packet_attrs attr = { };
1244 	struct stmmac_test_priv *tpriv;
1245 	struct sk_buff *skb = NULL;
1246 	int ret = 0;
1247 	u16 proto;
1248 
1249 	if (!priv->dma_cap.vlins)
1250 		return -EOPNOTSUPP;
1251 
1252 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1253 	if (!tpriv)
1254 		return -ENOMEM;
1255 
1256 	proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1257 
1258 	tpriv->ok = false;
1259 	tpriv->double_vlan = svlan;
1260 	init_completion(&tpriv->comp);
1261 
1262 	tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1263 	tpriv->pt.func = stmmac_test_vlan_validate;
1264 	tpriv->pt.dev = priv->dev;
1265 	tpriv->pt.af_packet_priv = tpriv;
1266 	tpriv->packet = &attr;
1267 	tpriv->vlan_id = 0x123;
1268 	dev_add_pack(&tpriv->pt);
1269 
1270 	ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1271 	if (ret)
1272 		goto cleanup;
1273 
1274 	attr.dst = priv->dev->dev_addr;
1275 
1276 	skb = stmmac_test_get_udp_skb(priv, &attr);
1277 	if (!skb) {
1278 		ret = -ENOMEM;
1279 		goto vlan_del;
1280 	}
1281 
1282 	__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1283 	skb->protocol = htons(proto);
1284 
1285 	skb_set_queue_mapping(skb, 0);
1286 	ret = dev_queue_xmit(skb);
1287 	if (ret)
1288 		goto vlan_del;
1289 
1290 	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1291 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
1292 
1293 vlan_del:
1294 	vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1295 cleanup:
1296 	dev_remove_pack(&tpriv->pt);
1297 	kfree(tpriv);
1298 	return ret;
1299 }
1300 
1301 static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1302 {
1303 	return stmmac_test_vlanoff_common(priv, false);
1304 }
1305 
1306 static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1307 {
1308 	if (!priv->dma_cap.dvlan)
1309 		return -EOPNOTSUPP;
1310 	return stmmac_test_vlanoff_common(priv, true);
1311 }
1312 
1313 #ifdef CONFIG_NET_CLS_ACT
1314 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1315 				u32 dst_mask, u32 src_mask)
1316 {
1317 	struct flow_dissector_key_ipv4_addrs key, mask;
1318 	unsigned long dummy_cookie = 0xdeadbeef;
1319 	struct stmmac_packet_attrs attr = { };
1320 	struct flow_dissector *dissector;
1321 	struct flow_cls_offload *cls;
1322 	struct flow_rule *rule;
1323 	int ret;
1324 
1325 	if (!tc_can_offload(priv->dev))
1326 		return -EOPNOTSUPP;
1327 	if (!priv->dma_cap.l3l4fnum)
1328 		return -EOPNOTSUPP;
1329 	if (priv->rss.enable)
1330 		stmmac_rss_configure(priv, priv->hw, NULL,
1331 				     priv->plat->rx_queues_to_use);
1332 
1333 	dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1334 	if (!dissector) {
1335 		ret = -ENOMEM;
1336 		goto cleanup_rss;
1337 	}
1338 
1339 	dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1340 	dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1341 
1342 	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1343 	if (!cls) {
1344 		ret = -ENOMEM;
1345 		goto cleanup_dissector;
1346 	}
1347 
1348 	cls->common.chain_index = 0;
1349 	cls->command = FLOW_CLS_REPLACE;
1350 	cls->cookie = dummy_cookie;
1351 
1352 	rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1353 	if (!rule) {
1354 		ret = -ENOMEM;
1355 		goto cleanup_cls;
1356 	}
1357 
1358 	rule->match.dissector = dissector;
1359 	rule->match.key = (void *)&key;
1360 	rule->match.mask = (void *)&mask;
1361 
1362 	key.src = htonl(src);
1363 	key.dst = htonl(dst);
1364 	mask.src = src_mask;
1365 	mask.dst = dst_mask;
1366 
1367 	cls->rule = rule;
1368 
1369 	rule->action.entries[0].id = FLOW_ACTION_DROP;
1370 	rule->action.num_entries = 1;
1371 
1372 	attr.dst = priv->dev->dev_addr;
1373 	attr.ip_dst = dst;
1374 	attr.ip_src = src;
1375 
1376 	/* Shall receive packet */
1377 	ret = __stmmac_test_loopback(priv, &attr);
1378 	if (ret)
1379 		goto cleanup_rule;
1380 
1381 	ret = stmmac_tc_setup_cls(priv, priv, cls);
1382 	if (ret)
1383 		goto cleanup_rule;
1384 
1385 	/* Shall NOT receive packet */
1386 	ret = __stmmac_test_loopback(priv, &attr);
1387 	ret = ret ? 0 : -EINVAL;
1388 
1389 	cls->command = FLOW_CLS_DESTROY;
1390 	stmmac_tc_setup_cls(priv, priv, cls);
1391 cleanup_rule:
1392 	kfree(rule);
1393 cleanup_cls:
1394 	kfree(cls);
1395 cleanup_dissector:
1396 	kfree(dissector);
1397 cleanup_rss:
1398 	if (priv->rss.enable) {
1399 		stmmac_rss_configure(priv, priv->hw, &priv->rss,
1400 				     priv->plat->rx_queues_to_use);
1401 	}
1402 
1403 	return ret;
1404 }
1405 #else
1406 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1407 				u32 dst_mask, u32 src_mask)
1408 {
1409 	return -EOPNOTSUPP;
1410 }
1411 #endif
1412 
1413 static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1414 {
1415 	u32 addr = 0x10203040;
1416 
1417 	return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1418 }
1419 
1420 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1421 {
1422 	u32 addr = 0x10203040;
1423 
1424 	return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1425 }
1426 
1427 #ifdef CONFIG_NET_CLS_ACT
1428 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1429 				u32 dst_mask, u32 src_mask, bool udp)
1430 {
1431 	struct {
1432 		struct flow_dissector_key_basic bkey;
1433 		struct flow_dissector_key_ports key;
1434 	} __aligned(BITS_PER_LONG / 8) keys;
1435 	struct {
1436 		struct flow_dissector_key_basic bmask;
1437 		struct flow_dissector_key_ports mask;
1438 	} __aligned(BITS_PER_LONG / 8) masks;
1439 	unsigned long dummy_cookie = 0xdeadbeef;
1440 	struct stmmac_packet_attrs attr = { };
1441 	struct flow_dissector *dissector;
1442 	struct flow_cls_offload *cls;
1443 	struct flow_rule *rule;
1444 	int ret;
1445 
1446 	if (!tc_can_offload(priv->dev))
1447 		return -EOPNOTSUPP;
1448 	if (!priv->dma_cap.l3l4fnum)
1449 		return -EOPNOTSUPP;
1450 	if (priv->rss.enable)
1451 		stmmac_rss_configure(priv, priv->hw, NULL,
1452 				     priv->plat->rx_queues_to_use);
1453 
1454 	dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1455 	if (!dissector) {
1456 		ret = -ENOMEM;
1457 		goto cleanup_rss;
1458 	}
1459 
1460 	dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
1461 	dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
1462 	dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1463 	dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1464 
1465 	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1466 	if (!cls) {
1467 		ret = -ENOMEM;
1468 		goto cleanup_dissector;
1469 	}
1470 
1471 	cls->common.chain_index = 0;
1472 	cls->command = FLOW_CLS_REPLACE;
1473 	cls->cookie = dummy_cookie;
1474 
1475 	rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1476 	if (!rule) {
1477 		ret = -ENOMEM;
1478 		goto cleanup_cls;
1479 	}
1480 
1481 	rule->match.dissector = dissector;
1482 	rule->match.key = (void *)&keys;
1483 	rule->match.mask = (void *)&masks;
1484 
1485 	keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1486 	keys.key.src = htons(src);
1487 	keys.key.dst = htons(dst);
1488 	masks.mask.src = src_mask;
1489 	masks.mask.dst = dst_mask;
1490 
1491 	cls->rule = rule;
1492 
1493 	rule->action.entries[0].id = FLOW_ACTION_DROP;
1494 	rule->action.num_entries = 1;
1495 
1496 	attr.dst = priv->dev->dev_addr;
1497 	attr.tcp = !udp;
1498 	attr.sport = src;
1499 	attr.dport = dst;
1500 	attr.ip_dst = 0;
1501 
1502 	/* Shall receive packet */
1503 	ret = __stmmac_test_loopback(priv, &attr);
1504 	if (ret)
1505 		goto cleanup_rule;
1506 
1507 	ret = stmmac_tc_setup_cls(priv, priv, cls);
1508 	if (ret)
1509 		goto cleanup_rule;
1510 
1511 	/* Shall NOT receive packet */
1512 	ret = __stmmac_test_loopback(priv, &attr);
1513 	ret = ret ? 0 : -EINVAL;
1514 
1515 	cls->command = FLOW_CLS_DESTROY;
1516 	stmmac_tc_setup_cls(priv, priv, cls);
1517 cleanup_rule:
1518 	kfree(rule);
1519 cleanup_cls:
1520 	kfree(cls);
1521 cleanup_dissector:
1522 	kfree(dissector);
1523 cleanup_rss:
1524 	if (priv->rss.enable) {
1525 		stmmac_rss_configure(priv, priv->hw, &priv->rss,
1526 				     priv->plat->rx_queues_to_use);
1527 	}
1528 
1529 	return ret;
1530 }
1531 #else
1532 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1533 				u32 dst_mask, u32 src_mask, bool udp)
1534 {
1535 	return -EOPNOTSUPP;
1536 }
1537 #endif
1538 
1539 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1540 {
1541 	u16 dummy_port = 0x123;
1542 
1543 	return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1544 }
1545 
1546 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1547 {
1548 	u16 dummy_port = 0x123;
1549 
1550 	return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1551 }
1552 
1553 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1554 {
1555 	u16 dummy_port = 0x123;
1556 
1557 	return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1558 }
1559 
1560 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1561 {
1562 	u16 dummy_port = 0x123;
1563 
1564 	return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1565 }
1566 
1567 static int stmmac_test_arp_validate(struct sk_buff *skb,
1568 				    struct net_device *ndev,
1569 				    struct packet_type *pt,
1570 				    struct net_device *orig_ndev)
1571 {
1572 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1573 	struct ethhdr *ehdr;
1574 	struct arphdr *ahdr;
1575 
1576 	ehdr = (struct ethhdr *)skb_mac_header(skb);
1577 	if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
1578 		goto out;
1579 
1580 	ahdr = arp_hdr(skb);
1581 	if (ahdr->ar_op != htons(ARPOP_REPLY))
1582 		goto out;
1583 
1584 	tpriv->ok = true;
1585 	complete(&tpriv->comp);
1586 out:
1587 	kfree_skb(skb);
1588 	return 0;
1589 }
1590 
1591 static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1592 {
1593 	unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1594 	unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1595 	struct stmmac_packet_attrs attr = { };
1596 	struct stmmac_test_priv *tpriv;
1597 	struct sk_buff *skb = NULL;
1598 	u32 ip_addr = 0xdeadcafe;
1599 	u32 ip_src = 0xdeadbeef;
1600 	int ret;
1601 
1602 	if (!priv->dma_cap.arpoffsel)
1603 		return -EOPNOTSUPP;
1604 
1605 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1606 	if (!tpriv)
1607 		return -ENOMEM;
1608 
1609 	tpriv->ok = false;
1610 	init_completion(&tpriv->comp);
1611 
1612 	tpriv->pt.type = htons(ETH_P_ARP);
1613 	tpriv->pt.func = stmmac_test_arp_validate;
1614 	tpriv->pt.dev = priv->dev;
1615 	tpriv->pt.af_packet_priv = tpriv;
1616 	tpriv->packet = &attr;
1617 	dev_add_pack(&tpriv->pt);
1618 
1619 	attr.src = src;
1620 	attr.ip_src = ip_src;
1621 	attr.dst = dst;
1622 	attr.ip_dst = ip_addr;
1623 
1624 	skb = stmmac_test_get_arp_skb(priv, &attr);
1625 	if (!skb) {
1626 		ret = -ENOMEM;
1627 		goto cleanup;
1628 	}
1629 
1630 	ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1631 	if (ret)
1632 		goto cleanup;
1633 
1634 	ret = dev_set_promiscuity(priv->dev, 1);
1635 	if (ret)
1636 		goto cleanup;
1637 
1638 	skb_set_queue_mapping(skb, 0);
1639 	ret = dev_queue_xmit(skb);
1640 	if (ret)
1641 		goto cleanup_promisc;
1642 
1643 	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1644 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
1645 
1646 cleanup_promisc:
1647 	dev_set_promiscuity(priv->dev, -1);
1648 cleanup:
1649 	stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1650 	dev_remove_pack(&tpriv->pt);
1651 	kfree(tpriv);
1652 	return ret;
1653 }
1654 
1655 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1656 {
1657 	struct stmmac_packet_attrs attr = { };
1658 	int size = priv->dma_buf_sz;
1659 
1660 	attr.dst = priv->dev->dev_addr;
1661 	attr.max_size = size - ETH_FCS_LEN;
1662 	attr.queue_mapping = queue;
1663 
1664 	return __stmmac_test_loopback(priv, &attr);
1665 }
1666 
1667 static int stmmac_test_jumbo(struct stmmac_priv *priv)
1668 {
1669 	return __stmmac_test_jumbo(priv, 0);
1670 }
1671 
1672 static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1673 {
1674 	u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1675 	int ret;
1676 
1677 	if (tx_cnt <= 1)
1678 		return -EOPNOTSUPP;
1679 
1680 	for (chan = 0; chan < tx_cnt; chan++) {
1681 		ret = __stmmac_test_jumbo(priv, chan);
1682 		if (ret)
1683 			return ret;
1684 	}
1685 
1686 	return 0;
1687 }
1688 
1689 static int stmmac_test_sph(struct stmmac_priv *priv)
1690 {
1691 	unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1692 	struct stmmac_packet_attrs attr = { };
1693 	int ret;
1694 
1695 	if (!priv->sph)
1696 		return -EOPNOTSUPP;
1697 
1698 	/* Check for UDP first */
1699 	attr.dst = priv->dev->dev_addr;
1700 	attr.tcp = false;
1701 
1702 	ret = __stmmac_test_loopback(priv, &attr);
1703 	if (ret)
1704 		return ret;
1705 
1706 	cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1707 	if (cnt_end <= cnt_start)
1708 		return -EINVAL;
1709 
1710 	/* Check for TCP now */
1711 	cnt_start = cnt_end;
1712 
1713 	attr.dst = priv->dev->dev_addr;
1714 	attr.tcp = true;
1715 
1716 	ret = __stmmac_test_loopback(priv, &attr);
1717 	if (ret)
1718 		return ret;
1719 
1720 	cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1721 	if (cnt_end <= cnt_start)
1722 		return -EINVAL;
1723 
1724 	return 0;
1725 }
1726 
1727 #define STMMAC_LOOPBACK_NONE	0
1728 #define STMMAC_LOOPBACK_MAC	1
1729 #define STMMAC_LOOPBACK_PHY	2
1730 
1731 static const struct stmmac_test {
1732 	char name[ETH_GSTRING_LEN];
1733 	int lb;
1734 	int (*fn)(struct stmmac_priv *priv);
1735 } stmmac_selftests[] = {
1736 	{
1737 		.name = "MAC Loopback               ",
1738 		.lb = STMMAC_LOOPBACK_MAC,
1739 		.fn = stmmac_test_mac_loopback,
1740 	}, {
1741 		.name = "PHY Loopback               ",
1742 		.lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1743 		.fn = stmmac_test_phy_loopback,
1744 	}, {
1745 		.name = "MMC Counters               ",
1746 		.lb = STMMAC_LOOPBACK_PHY,
1747 		.fn = stmmac_test_mmc,
1748 	}, {
1749 		.name = "EEE                        ",
1750 		.lb = STMMAC_LOOPBACK_PHY,
1751 		.fn = stmmac_test_eee,
1752 	}, {
1753 		.name = "Hash Filter MC             ",
1754 		.lb = STMMAC_LOOPBACK_PHY,
1755 		.fn = stmmac_test_hfilt,
1756 	}, {
1757 		.name = "Perfect Filter UC          ",
1758 		.lb = STMMAC_LOOPBACK_PHY,
1759 		.fn = stmmac_test_pfilt,
1760 	}, {
1761 		.name = "MC Filter                  ",
1762 		.lb = STMMAC_LOOPBACK_PHY,
1763 		.fn = stmmac_test_mcfilt,
1764 	}, {
1765 		.name = "UC Filter                  ",
1766 		.lb = STMMAC_LOOPBACK_PHY,
1767 		.fn = stmmac_test_ucfilt,
1768 	}, {
1769 		.name = "Flow Control               ",
1770 		.lb = STMMAC_LOOPBACK_PHY,
1771 		.fn = stmmac_test_flowctrl,
1772 	}, {
1773 		.name = "RSS                        ",
1774 		.lb = STMMAC_LOOPBACK_PHY,
1775 		.fn = stmmac_test_rss,
1776 	}, {
1777 		.name = "VLAN Filtering             ",
1778 		.lb = STMMAC_LOOPBACK_PHY,
1779 		.fn = stmmac_test_vlanfilt,
1780 	}, {
1781 		.name = "VLAN Filtering (perf)      ",
1782 		.lb = STMMAC_LOOPBACK_PHY,
1783 		.fn = stmmac_test_vlanfilt_perfect,
1784 	}, {
1785 		.name = "Double VLAN Filter         ",
1786 		.lb = STMMAC_LOOPBACK_PHY,
1787 		.fn = stmmac_test_dvlanfilt,
1788 	}, {
1789 		.name = "Double VLAN Filter (perf)  ",
1790 		.lb = STMMAC_LOOPBACK_PHY,
1791 		.fn = stmmac_test_dvlanfilt_perfect,
1792 	}, {
1793 		.name = "Flexible RX Parser         ",
1794 		.lb = STMMAC_LOOPBACK_PHY,
1795 		.fn = stmmac_test_rxp,
1796 	}, {
1797 		.name = "SA Insertion (desc)        ",
1798 		.lb = STMMAC_LOOPBACK_PHY,
1799 		.fn = stmmac_test_desc_sai,
1800 	}, {
1801 		.name = "SA Replacement (desc)      ",
1802 		.lb = STMMAC_LOOPBACK_PHY,
1803 		.fn = stmmac_test_desc_sar,
1804 	}, {
1805 		.name = "SA Insertion (reg)         ",
1806 		.lb = STMMAC_LOOPBACK_PHY,
1807 		.fn = stmmac_test_reg_sai,
1808 	}, {
1809 		.name = "SA Replacement (reg)       ",
1810 		.lb = STMMAC_LOOPBACK_PHY,
1811 		.fn = stmmac_test_reg_sar,
1812 	}, {
1813 		.name = "VLAN TX Insertion          ",
1814 		.lb = STMMAC_LOOPBACK_PHY,
1815 		.fn = stmmac_test_vlanoff,
1816 	}, {
1817 		.name = "SVLAN TX Insertion         ",
1818 		.lb = STMMAC_LOOPBACK_PHY,
1819 		.fn = stmmac_test_svlanoff,
1820 	}, {
1821 		.name = "L3 DA Filtering            ",
1822 		.lb = STMMAC_LOOPBACK_PHY,
1823 		.fn = stmmac_test_l3filt_da,
1824 	}, {
1825 		.name = "L3 SA Filtering            ",
1826 		.lb = STMMAC_LOOPBACK_PHY,
1827 		.fn = stmmac_test_l3filt_sa,
1828 	}, {
1829 		.name = "L4 DA TCP Filtering        ",
1830 		.lb = STMMAC_LOOPBACK_PHY,
1831 		.fn = stmmac_test_l4filt_da_tcp,
1832 	}, {
1833 		.name = "L4 SA TCP Filtering        ",
1834 		.lb = STMMAC_LOOPBACK_PHY,
1835 		.fn = stmmac_test_l4filt_sa_tcp,
1836 	}, {
1837 		.name = "L4 DA UDP Filtering        ",
1838 		.lb = STMMAC_LOOPBACK_PHY,
1839 		.fn = stmmac_test_l4filt_da_udp,
1840 	}, {
1841 		.name = "L4 SA UDP Filtering        ",
1842 		.lb = STMMAC_LOOPBACK_PHY,
1843 		.fn = stmmac_test_l4filt_sa_udp,
1844 	}, {
1845 		.name = "ARP Offload                ",
1846 		.lb = STMMAC_LOOPBACK_PHY,
1847 		.fn = stmmac_test_arpoffload,
1848 	}, {
1849 		.name = "Jumbo Frame                ",
1850 		.lb = STMMAC_LOOPBACK_PHY,
1851 		.fn = stmmac_test_jumbo,
1852 	}, {
1853 		.name = "Multichannel Jumbo         ",
1854 		.lb = STMMAC_LOOPBACK_PHY,
1855 		.fn = stmmac_test_mjumbo,
1856 	}, {
1857 		.name = "Split Header               ",
1858 		.lb = STMMAC_LOOPBACK_PHY,
1859 		.fn = stmmac_test_sph,
1860 	},
1861 };
1862 
1863 void stmmac_selftest_run(struct net_device *dev,
1864 			 struct ethtool_test *etest, u64 *buf)
1865 {
1866 	struct stmmac_priv *priv = netdev_priv(dev);
1867 	int count = stmmac_selftest_get_count(priv);
1868 	int carrier = netif_carrier_ok(dev);
1869 	int i, ret;
1870 
1871 	memset(buf, 0, sizeof(*buf) * count);
1872 	stmmac_test_next_id = 0;
1873 
1874 	if (etest->flags != ETH_TEST_FL_OFFLINE) {
1875 		netdev_err(priv->dev, "Only offline tests are supported\n");
1876 		etest->flags |= ETH_TEST_FL_FAILED;
1877 		return;
1878 	} else if (!carrier) {
1879 		netdev_err(priv->dev, "You need valid Link to execute tests\n");
1880 		etest->flags |= ETH_TEST_FL_FAILED;
1881 		return;
1882 	}
1883 
1884 	/* We don't want extra traffic */
1885 	netif_carrier_off(dev);
1886 
1887 	/* Wait for queues drain */
1888 	msleep(200);
1889 
1890 	for (i = 0; i < count; i++) {
1891 		ret = 0;
1892 
1893 		switch (stmmac_selftests[i].lb) {
1894 		case STMMAC_LOOPBACK_PHY:
1895 			ret = -EOPNOTSUPP;
1896 			if (dev->phydev)
1897 				ret = phy_loopback(dev->phydev, true);
1898 			if (!ret)
1899 				break;
1900 			/* Fallthrough */
1901 		case STMMAC_LOOPBACK_MAC:
1902 			ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1903 			break;
1904 		case STMMAC_LOOPBACK_NONE:
1905 			break;
1906 		default:
1907 			ret = -EOPNOTSUPP;
1908 			break;
1909 		}
1910 
1911 		/*
1912 		 * First tests will always be MAC / PHY loobpack. If any of
1913 		 * them is not supported we abort earlier.
1914 		 */
1915 		if (ret) {
1916 			netdev_err(priv->dev, "Loopback is not supported\n");
1917 			etest->flags |= ETH_TEST_FL_FAILED;
1918 			break;
1919 		}
1920 
1921 		ret = stmmac_selftests[i].fn(priv);
1922 		if (ret && (ret != -EOPNOTSUPP))
1923 			etest->flags |= ETH_TEST_FL_FAILED;
1924 		buf[i] = ret;
1925 
1926 		switch (stmmac_selftests[i].lb) {
1927 		case STMMAC_LOOPBACK_PHY:
1928 			ret = -EOPNOTSUPP;
1929 			if (dev->phydev)
1930 				ret = phy_loopback(dev->phydev, false);
1931 			if (!ret)
1932 				break;
1933 			/* Fallthrough */
1934 		case STMMAC_LOOPBACK_MAC:
1935 			stmmac_set_mac_loopback(priv, priv->ioaddr, false);
1936 			break;
1937 		default:
1938 			break;
1939 		}
1940 	}
1941 
1942 	/* Restart everything */
1943 	if (carrier)
1944 		netif_carrier_on(dev);
1945 }
1946 
1947 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
1948 {
1949 	u8 *p = data;
1950 	int i;
1951 
1952 	for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
1953 		snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
1954 			 stmmac_selftests[i].name);
1955 		p += ETH_GSTRING_LEN;
1956 	}
1957 }
1958 
1959 int stmmac_selftest_get_count(struct stmmac_priv *priv)
1960 {
1961 	return ARRAY_SIZE(stmmac_selftests);
1962 }
1963