1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
4  * stmmac Selftests Support
5  *
6  * Author: Jose Abreu <joabreu@synopsys.com>
7  */
8 
9 #include <linux/completion.h>
10 #include <linux/ethtool.h>
11 #include <linux/ip.h>
12 #include <linux/phy.h>
13 #include <linux/udp.h>
14 #include <net/pkt_cls.h>
15 #include <net/tcp.h>
16 #include <net/udp.h>
17 #include <net/tc_act/tc_gact.h>
18 #include "stmmac.h"
19 
20 struct stmmachdr {
21 	__be32 version;
22 	__be64 magic;
23 	u8 id;
24 } __packed;
25 
26 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
27 			      sizeof(struct stmmachdr))
28 #define STMMAC_TEST_PKT_MAGIC	0xdeadcafecafedeadULL
29 #define STMMAC_LB_TIMEOUT	msecs_to_jiffies(200)
30 
31 struct stmmac_packet_attrs {
32 	int vlan;
33 	int vlan_id_in;
34 	int vlan_id_out;
35 	unsigned char *src;
36 	unsigned char *dst;
37 	u32 ip_src;
38 	u32 ip_dst;
39 	int tcp;
40 	int sport;
41 	int dport;
42 	u32 exp_hash;
43 	int dont_wait;
44 	int timeout;
45 	int size;
46 	int max_size;
47 	int remove_sa;
48 	u8 id;
49 	int sarc;
50 	u16 queue_mapping;
51 };
52 
53 static u8 stmmac_test_next_id;
54 
55 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
56 					       struct stmmac_packet_attrs *attr)
57 {
58 	struct sk_buff *skb = NULL;
59 	struct udphdr *uhdr = NULL;
60 	struct tcphdr *thdr = NULL;
61 	struct stmmachdr *shdr;
62 	struct ethhdr *ehdr;
63 	struct iphdr *ihdr;
64 	int iplen, size;
65 
66 	size = attr->size + STMMAC_TEST_PKT_SIZE;
67 	if (attr->vlan) {
68 		size += 4;
69 		if (attr->vlan > 1)
70 			size += 4;
71 	}
72 
73 	if (attr->tcp)
74 		size += sizeof(struct tcphdr);
75 	else
76 		size += sizeof(struct udphdr);
77 
78 	if (attr->max_size && (attr->max_size > size))
79 		size = attr->max_size;
80 
81 	skb = netdev_alloc_skb_ip_align(priv->dev, size);
82 	if (!skb)
83 		return NULL;
84 
85 	prefetchw(skb->data);
86 
87 	if (attr->vlan > 1)
88 		ehdr = skb_push(skb, ETH_HLEN + 8);
89 	else if (attr->vlan)
90 		ehdr = skb_push(skb, ETH_HLEN + 4);
91 	else if (attr->remove_sa)
92 		ehdr = skb_push(skb, ETH_HLEN - 6);
93 	else
94 		ehdr = skb_push(skb, ETH_HLEN);
95 	skb_reset_mac_header(skb);
96 
97 	skb_set_network_header(skb, skb->len);
98 	ihdr = skb_put(skb, sizeof(*ihdr));
99 
100 	skb_set_transport_header(skb, skb->len);
101 	if (attr->tcp)
102 		thdr = skb_put(skb, sizeof(*thdr));
103 	else
104 		uhdr = skb_put(skb, sizeof(*uhdr));
105 
106 	if (!attr->remove_sa)
107 		eth_zero_addr(ehdr->h_source);
108 	eth_zero_addr(ehdr->h_dest);
109 	if (attr->src && !attr->remove_sa)
110 		ether_addr_copy(ehdr->h_source, attr->src);
111 	if (attr->dst)
112 		ether_addr_copy(ehdr->h_dest, attr->dst);
113 
114 	if (!attr->remove_sa) {
115 		ehdr->h_proto = htons(ETH_P_IP);
116 	} else {
117 		__be16 *ptr = (__be16 *)ehdr;
118 
119 		/* HACK */
120 		ptr[3] = htons(ETH_P_IP);
121 	}
122 
123 	if (attr->vlan) {
124 		__be16 *tag, *proto;
125 
126 		if (!attr->remove_sa) {
127 			tag = (void *)ehdr + ETH_HLEN;
128 			proto = (void *)ehdr + (2 * ETH_ALEN);
129 		} else {
130 			tag = (void *)ehdr + ETH_HLEN - 6;
131 			proto = (void *)ehdr + ETH_ALEN;
132 		}
133 
134 		proto[0] = htons(ETH_P_8021Q);
135 		tag[0] = htons(attr->vlan_id_out);
136 		tag[1] = htons(ETH_P_IP);
137 		if (attr->vlan > 1) {
138 			proto[0] = htons(ETH_P_8021AD);
139 			tag[1] = htons(ETH_P_8021Q);
140 			tag[2] = htons(attr->vlan_id_in);
141 			tag[3] = htons(ETH_P_IP);
142 		}
143 	}
144 
145 	if (attr->tcp) {
146 		thdr->source = htons(attr->sport);
147 		thdr->dest = htons(attr->dport);
148 		thdr->doff = sizeof(struct tcphdr) / 4;
149 		thdr->check = 0;
150 	} else {
151 		uhdr->source = htons(attr->sport);
152 		uhdr->dest = htons(attr->dport);
153 		uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
154 		if (attr->max_size)
155 			uhdr->len = htons(attr->max_size -
156 					  (sizeof(*ihdr) + sizeof(*ehdr)));
157 		uhdr->check = 0;
158 	}
159 
160 	ihdr->ihl = 5;
161 	ihdr->ttl = 32;
162 	ihdr->version = 4;
163 	if (attr->tcp)
164 		ihdr->protocol = IPPROTO_TCP;
165 	else
166 		ihdr->protocol = IPPROTO_UDP;
167 	iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
168 	if (attr->tcp)
169 		iplen += sizeof(*thdr);
170 	else
171 		iplen += sizeof(*uhdr);
172 
173 	if (attr->max_size)
174 		iplen = attr->max_size - sizeof(*ehdr);
175 
176 	ihdr->tot_len = htons(iplen);
177 	ihdr->frag_off = 0;
178 	ihdr->saddr = htonl(attr->ip_src);
179 	ihdr->daddr = htonl(attr->ip_dst);
180 	ihdr->tos = 0;
181 	ihdr->id = 0;
182 	ip_send_check(ihdr);
183 
184 	shdr = skb_put(skb, sizeof(*shdr));
185 	shdr->version = 0;
186 	shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
187 	attr->id = stmmac_test_next_id;
188 	shdr->id = stmmac_test_next_id++;
189 
190 	if (attr->size)
191 		skb_put(skb, attr->size);
192 	if (attr->max_size && (attr->max_size > skb->len))
193 		skb_put(skb, attr->max_size - skb->len);
194 
195 	skb->csum = 0;
196 	skb->ip_summed = CHECKSUM_PARTIAL;
197 	if (attr->tcp) {
198 		thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
199 		skb->csum_start = skb_transport_header(skb) - skb->head;
200 		skb->csum_offset = offsetof(struct tcphdr, check);
201 	} else {
202 		udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
203 	}
204 
205 	skb->protocol = htons(ETH_P_IP);
206 	skb->pkt_type = PACKET_HOST;
207 	skb->dev = priv->dev;
208 
209 	return skb;
210 }
211 
212 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
213 					       struct stmmac_packet_attrs *attr)
214 {
215 	__be32 ip_src = htonl(attr->ip_src);
216 	__be32 ip_dst = htonl(attr->ip_dst);
217 	struct sk_buff *skb = NULL;
218 
219 	skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
220 			 NULL, attr->src, attr->dst);
221 	if (!skb)
222 		return NULL;
223 
224 	skb->pkt_type = PACKET_HOST;
225 	skb->dev = priv->dev;
226 
227 	return skb;
228 }
229 
230 struct stmmac_test_priv {
231 	struct stmmac_packet_attrs *packet;
232 	struct packet_type pt;
233 	struct completion comp;
234 	int double_vlan;
235 	int vlan_id;
236 	int ok;
237 };
238 
239 static int stmmac_test_loopback_validate(struct sk_buff *skb,
240 					 struct net_device *ndev,
241 					 struct packet_type *pt,
242 					 struct net_device *orig_ndev)
243 {
244 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
245 	struct stmmachdr *shdr;
246 	struct ethhdr *ehdr;
247 	struct udphdr *uhdr;
248 	struct tcphdr *thdr;
249 	struct iphdr *ihdr;
250 
251 	skb = skb_unshare(skb, GFP_ATOMIC);
252 	if (!skb)
253 		goto out;
254 
255 	if (skb_linearize(skb))
256 		goto out;
257 	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
258 		goto out;
259 
260 	ehdr = (struct ethhdr *)skb_mac_header(skb);
261 	if (tpriv->packet->dst) {
262 		if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
263 			goto out;
264 	}
265 	if (tpriv->packet->sarc) {
266 		if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
267 			goto out;
268 	} else if (tpriv->packet->src) {
269 		if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
270 			goto out;
271 	}
272 
273 	ihdr = ip_hdr(skb);
274 	if (tpriv->double_vlan)
275 		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
276 
277 	if (tpriv->packet->tcp) {
278 		if (ihdr->protocol != IPPROTO_TCP)
279 			goto out;
280 
281 		thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
282 		if (thdr->dest != htons(tpriv->packet->dport))
283 			goto out;
284 
285 		shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
286 	} else {
287 		if (ihdr->protocol != IPPROTO_UDP)
288 			goto out;
289 
290 		uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
291 		if (uhdr->dest != htons(tpriv->packet->dport))
292 			goto out;
293 
294 		shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
295 	}
296 
297 	if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
298 		goto out;
299 	if (tpriv->packet->exp_hash && !skb->hash)
300 		goto out;
301 	if (tpriv->packet->id != shdr->id)
302 		goto out;
303 
304 	tpriv->ok = true;
305 	complete(&tpriv->comp);
306 out:
307 	kfree_skb(skb);
308 	return 0;
309 }
310 
311 static int __stmmac_test_loopback(struct stmmac_priv *priv,
312 				  struct stmmac_packet_attrs *attr)
313 {
314 	struct stmmac_test_priv *tpriv;
315 	struct sk_buff *skb = NULL;
316 	int ret = 0;
317 
318 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
319 	if (!tpriv)
320 		return -ENOMEM;
321 
322 	tpriv->ok = false;
323 	init_completion(&tpriv->comp);
324 
325 	tpriv->pt.type = htons(ETH_P_IP);
326 	tpriv->pt.func = stmmac_test_loopback_validate;
327 	tpriv->pt.dev = priv->dev;
328 	tpriv->pt.af_packet_priv = tpriv;
329 	tpriv->packet = attr;
330 
331 	if (!attr->dont_wait)
332 		dev_add_pack(&tpriv->pt);
333 
334 	skb = stmmac_test_get_udp_skb(priv, attr);
335 	if (!skb) {
336 		ret = -ENOMEM;
337 		goto cleanup;
338 	}
339 
340 	skb_set_queue_mapping(skb, attr->queue_mapping);
341 	ret = dev_queue_xmit(skb);
342 	if (ret)
343 		goto cleanup;
344 
345 	if (attr->dont_wait)
346 		goto cleanup;
347 
348 	if (!attr->timeout)
349 		attr->timeout = STMMAC_LB_TIMEOUT;
350 
351 	wait_for_completion_timeout(&tpriv->comp, attr->timeout);
352 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
353 
354 cleanup:
355 	if (!attr->dont_wait)
356 		dev_remove_pack(&tpriv->pt);
357 	kfree(tpriv);
358 	return ret;
359 }
360 
361 static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
362 {
363 	struct stmmac_packet_attrs attr = { };
364 
365 	attr.dst = priv->dev->dev_addr;
366 	return __stmmac_test_loopback(priv, &attr);
367 }
368 
369 static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
370 {
371 	struct stmmac_packet_attrs attr = { };
372 	int ret;
373 
374 	if (!priv->dev->phydev)
375 		return -EBUSY;
376 
377 	ret = phy_loopback(priv->dev->phydev, true);
378 	if (ret)
379 		return ret;
380 
381 	attr.dst = priv->dev->dev_addr;
382 	ret = __stmmac_test_loopback(priv, &attr);
383 
384 	phy_loopback(priv->dev->phydev, false);
385 	return ret;
386 }
387 
388 static int stmmac_test_mmc(struct stmmac_priv *priv)
389 {
390 	struct stmmac_counters initial, final;
391 	int ret;
392 
393 	memset(&initial, 0, sizeof(initial));
394 	memset(&final, 0, sizeof(final));
395 
396 	if (!priv->dma_cap.rmon)
397 		return -EOPNOTSUPP;
398 
399 	/* Save previous results into internal struct */
400 	stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
401 
402 	ret = stmmac_test_mac_loopback(priv);
403 	if (ret)
404 		return ret;
405 
406 	/* These will be loopback results so no need to save them */
407 	stmmac_mmc_read(priv, priv->mmcaddr, &final);
408 
409 	/*
410 	 * The number of MMC counters available depends on HW configuration
411 	 * so we just use this one to validate the feature. I hope there is
412 	 * not a version without this counter.
413 	 */
414 	if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
415 		return -EINVAL;
416 
417 	return 0;
418 }
419 
420 static int stmmac_test_eee(struct stmmac_priv *priv)
421 {
422 	struct stmmac_extra_stats *initial, *final;
423 	int retries = 10;
424 	int ret;
425 
426 	if (!priv->dma_cap.eee || !priv->eee_active)
427 		return -EOPNOTSUPP;
428 
429 	initial = kzalloc(sizeof(*initial), GFP_KERNEL);
430 	if (!initial)
431 		return -ENOMEM;
432 
433 	final = kzalloc(sizeof(*final), GFP_KERNEL);
434 	if (!final) {
435 		ret = -ENOMEM;
436 		goto out_free_initial;
437 	}
438 
439 	memcpy(initial, &priv->xstats, sizeof(*initial));
440 
441 	ret = stmmac_test_mac_loopback(priv);
442 	if (ret)
443 		goto out_free_final;
444 
445 	/* We have no traffic in the line so, sooner or later it will go LPI */
446 	while (--retries) {
447 		memcpy(final, &priv->xstats, sizeof(*final));
448 
449 		if (final->irq_tx_path_in_lpi_mode_n >
450 		    initial->irq_tx_path_in_lpi_mode_n)
451 			break;
452 		msleep(100);
453 	}
454 
455 	if (!retries) {
456 		ret = -ETIMEDOUT;
457 		goto out_free_final;
458 	}
459 
460 	if (final->irq_tx_path_in_lpi_mode_n <=
461 	    initial->irq_tx_path_in_lpi_mode_n) {
462 		ret = -EINVAL;
463 		goto out_free_final;
464 	}
465 
466 	if (final->irq_tx_path_exit_lpi_mode_n <=
467 	    initial->irq_tx_path_exit_lpi_mode_n) {
468 		ret = -EINVAL;
469 		goto out_free_final;
470 	}
471 
472 out_free_final:
473 	kfree(final);
474 out_free_initial:
475 	kfree(initial);
476 	return ret;
477 }
478 
479 static int stmmac_filter_check(struct stmmac_priv *priv)
480 {
481 	if (!(priv->dev->flags & IFF_PROMISC))
482 		return 0;
483 
484 	netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
485 	return -EOPNOTSUPP;
486 }
487 
488 static int stmmac_test_hfilt(struct stmmac_priv *priv)
489 {
490 	unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
491 	unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
492 	struct stmmac_packet_attrs attr = { };
493 	int ret;
494 
495 	ret = stmmac_filter_check(priv);
496 	if (ret)
497 		return ret;
498 
499 	ret = dev_mc_add(priv->dev, gd_addr);
500 	if (ret)
501 		return ret;
502 
503 	attr.dst = gd_addr;
504 
505 	/* Shall receive packet */
506 	ret = __stmmac_test_loopback(priv, &attr);
507 	if (ret)
508 		goto cleanup;
509 
510 	attr.dst = bd_addr;
511 
512 	/* Shall NOT receive packet */
513 	ret = __stmmac_test_loopback(priv, &attr);
514 	ret = ret ? 0 : -EINVAL;
515 
516 cleanup:
517 	dev_mc_del(priv->dev, gd_addr);
518 	return ret;
519 }
520 
521 static int stmmac_test_pfilt(struct stmmac_priv *priv)
522 {
523 	unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
524 	unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55};
525 	struct stmmac_packet_attrs attr = { };
526 	int ret;
527 
528 	if (stmmac_filter_check(priv))
529 		return -EOPNOTSUPP;
530 
531 	ret = dev_uc_add(priv->dev, gd_addr);
532 	if (ret)
533 		return ret;
534 
535 	attr.dst = gd_addr;
536 
537 	/* Shall receive packet */
538 	ret = __stmmac_test_loopback(priv, &attr);
539 	if (ret)
540 		goto cleanup;
541 
542 	attr.dst = bd_addr;
543 
544 	/* Shall NOT receive packet */
545 	ret = __stmmac_test_loopback(priv, &attr);
546 	ret = ret ? 0 : -EINVAL;
547 
548 cleanup:
549 	dev_uc_del(priv->dev, gd_addr);
550 	return ret;
551 }
552 
553 static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
554 {
555 	return 0;
556 }
557 
558 static void stmmac_test_set_rx_mode(struct net_device *netdev)
559 {
560 	/* As we are in test mode of ethtool we already own the rtnl lock
561 	 * so no address will change from user. We can just call the
562 	 * ndo_set_rx_mode() callback directly */
563 	if (netdev->netdev_ops->ndo_set_rx_mode)
564 		netdev->netdev_ops->ndo_set_rx_mode(netdev);
565 }
566 
567 static int stmmac_test_mcfilt(struct stmmac_priv *priv)
568 {
569 	unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
570 	unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
571 	struct stmmac_packet_attrs attr = { };
572 	int ret;
573 
574 	if (stmmac_filter_check(priv))
575 		return -EOPNOTSUPP;
576 
577 	/* Remove all MC addresses */
578 	__dev_mc_unsync(priv->dev, NULL);
579 	stmmac_test_set_rx_mode(priv->dev);
580 
581 	ret = dev_uc_add(priv->dev, uc_addr);
582 	if (ret)
583 		goto cleanup;
584 
585 	attr.dst = uc_addr;
586 
587 	/* Shall receive packet */
588 	ret = __stmmac_test_loopback(priv, &attr);
589 	if (ret)
590 		goto cleanup;
591 
592 	attr.dst = mc_addr;
593 
594 	/* Shall NOT receive packet */
595 	ret = __stmmac_test_loopback(priv, &attr);
596 	ret = ret ? 0 : -EINVAL;
597 
598 cleanup:
599 	dev_uc_del(priv->dev, uc_addr);
600 	__dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
601 	stmmac_test_set_rx_mode(priv->dev);
602 	return ret;
603 }
604 
605 static int stmmac_test_ucfilt(struct stmmac_priv *priv)
606 {
607 	unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
608 	unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
609 	struct stmmac_packet_attrs attr = { };
610 	int ret;
611 
612 	if (stmmac_filter_check(priv))
613 		return -EOPNOTSUPP;
614 
615 	/* Remove all UC addresses */
616 	__dev_uc_unsync(priv->dev, NULL);
617 	stmmac_test_set_rx_mode(priv->dev);
618 
619 	ret = dev_mc_add(priv->dev, mc_addr);
620 	if (ret)
621 		goto cleanup;
622 
623 	attr.dst = mc_addr;
624 
625 	/* Shall receive packet */
626 	ret = __stmmac_test_loopback(priv, &attr);
627 	if (ret)
628 		goto cleanup;
629 
630 	attr.dst = uc_addr;
631 
632 	/* Shall NOT receive packet */
633 	ret = __stmmac_test_loopback(priv, &attr);
634 	ret = ret ? 0 : -EINVAL;
635 
636 cleanup:
637 	dev_mc_del(priv->dev, mc_addr);
638 	__dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
639 	stmmac_test_set_rx_mode(priv->dev);
640 	return ret;
641 }
642 
643 static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
644 					 struct net_device *ndev,
645 					 struct packet_type *pt,
646 					 struct net_device *orig_ndev)
647 {
648 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
649 	struct ethhdr *ehdr;
650 
651 	ehdr = (struct ethhdr *)skb_mac_header(skb);
652 	if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
653 		goto out;
654 	if (ehdr->h_proto != htons(ETH_P_PAUSE))
655 		goto out;
656 
657 	tpriv->ok = true;
658 	complete(&tpriv->comp);
659 out:
660 	kfree_skb(skb);
661 	return 0;
662 }
663 
664 static int stmmac_test_flowctrl(struct stmmac_priv *priv)
665 {
666 	unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
667 	struct phy_device *phydev = priv->dev->phydev;
668 	u32 rx_cnt = priv->plat->rx_queues_to_use;
669 	struct stmmac_test_priv *tpriv;
670 	unsigned int pkt_count;
671 	int i, ret = 0;
672 
673 	if (!phydev || (!phydev->pause && !phydev->asym_pause))
674 		return -EOPNOTSUPP;
675 
676 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
677 	if (!tpriv)
678 		return -ENOMEM;
679 
680 	tpriv->ok = false;
681 	init_completion(&tpriv->comp);
682 	tpriv->pt.type = htons(ETH_P_PAUSE);
683 	tpriv->pt.func = stmmac_test_flowctrl_validate;
684 	tpriv->pt.dev = priv->dev;
685 	tpriv->pt.af_packet_priv = tpriv;
686 	dev_add_pack(&tpriv->pt);
687 
688 	/* Compute minimum number of packets to make FIFO full */
689 	pkt_count = priv->plat->rx_fifo_size;
690 	if (!pkt_count)
691 		pkt_count = priv->dma_cap.rx_fifo_size;
692 	pkt_count /= 1400;
693 	pkt_count *= 2;
694 
695 	for (i = 0; i < rx_cnt; i++)
696 		stmmac_stop_rx(priv, priv->ioaddr, i);
697 
698 	ret = dev_set_promiscuity(priv->dev, 1);
699 	if (ret)
700 		goto cleanup;
701 
702 	ret = dev_mc_add(priv->dev, paddr);
703 	if (ret)
704 		goto cleanup;
705 
706 	for (i = 0; i < pkt_count; i++) {
707 		struct stmmac_packet_attrs attr = { };
708 
709 		attr.dst = priv->dev->dev_addr;
710 		attr.dont_wait = true;
711 		attr.size = 1400;
712 
713 		ret = __stmmac_test_loopback(priv, &attr);
714 		if (ret)
715 			goto cleanup;
716 		if (tpriv->ok)
717 			break;
718 	}
719 
720 	/* Wait for some time in case RX Watchdog is enabled */
721 	msleep(200);
722 
723 	for (i = 0; i < rx_cnt; i++) {
724 		struct stmmac_channel *ch = &priv->channel[i];
725 		u32 tail;
726 
727 		tail = priv->rx_queue[i].dma_rx_phy +
728 			(DMA_RX_SIZE * sizeof(struct dma_desc));
729 
730 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
731 		stmmac_start_rx(priv, priv->ioaddr, i);
732 
733 		local_bh_disable();
734 		napi_reschedule(&ch->rx_napi);
735 		local_bh_enable();
736 	}
737 
738 	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
739 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
740 
741 cleanup:
742 	dev_mc_del(priv->dev, paddr);
743 	dev_set_promiscuity(priv->dev, -1);
744 	dev_remove_pack(&tpriv->pt);
745 	kfree(tpriv);
746 	return ret;
747 }
748 
749 static int stmmac_test_rss(struct stmmac_priv *priv)
750 {
751 	struct stmmac_packet_attrs attr = { };
752 
753 	if (!priv->dma_cap.rssen || !priv->rss.enable)
754 		return -EOPNOTSUPP;
755 
756 	attr.dst = priv->dev->dev_addr;
757 	attr.exp_hash = true;
758 	attr.sport = 0x321;
759 	attr.dport = 0x123;
760 
761 	return __stmmac_test_loopback(priv, &attr);
762 }
763 
764 static int stmmac_test_vlan_validate(struct sk_buff *skb,
765 				     struct net_device *ndev,
766 				     struct packet_type *pt,
767 				     struct net_device *orig_ndev)
768 {
769 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
770 	struct stmmachdr *shdr;
771 	struct ethhdr *ehdr;
772 	struct udphdr *uhdr;
773 	struct iphdr *ihdr;
774 	u16 proto;
775 
776 	proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
777 
778 	skb = skb_unshare(skb, GFP_ATOMIC);
779 	if (!skb)
780 		goto out;
781 
782 	if (skb_linearize(skb))
783 		goto out;
784 	if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
785 		goto out;
786 	if (tpriv->vlan_id) {
787 		if (skb->vlan_proto != htons(proto))
788 			goto out;
789 		if (skb->vlan_tci != tpriv->vlan_id)
790 			goto out;
791 	}
792 
793 	ehdr = (struct ethhdr *)skb_mac_header(skb);
794 	if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
795 		goto out;
796 
797 	ihdr = ip_hdr(skb);
798 	if (tpriv->double_vlan)
799 		ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
800 	if (ihdr->protocol != IPPROTO_UDP)
801 		goto out;
802 
803 	uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
804 	if (uhdr->dest != htons(tpriv->packet->dport))
805 		goto out;
806 
807 	shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
808 	if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
809 		goto out;
810 
811 	tpriv->ok = true;
812 	complete(&tpriv->comp);
813 
814 out:
815 	kfree_skb(skb);
816 	return 0;
817 }
818 
819 static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
820 {
821 	struct stmmac_packet_attrs attr = { };
822 	struct stmmac_test_priv *tpriv;
823 	struct sk_buff *skb = NULL;
824 	int ret = 0, i;
825 
826 	if (!priv->dma_cap.vlhash)
827 		return -EOPNOTSUPP;
828 
829 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
830 	if (!tpriv)
831 		return -ENOMEM;
832 
833 	tpriv->ok = false;
834 	init_completion(&tpriv->comp);
835 
836 	tpriv->pt.type = htons(ETH_P_IP);
837 	tpriv->pt.func = stmmac_test_vlan_validate;
838 	tpriv->pt.dev = priv->dev;
839 	tpriv->pt.af_packet_priv = tpriv;
840 	tpriv->packet = &attr;
841 
842 	/*
843 	 * As we use HASH filtering, false positives may appear. This is a
844 	 * specially chosen ID so that adjacent IDs (+4) have different
845 	 * HASH values.
846 	 */
847 	tpriv->vlan_id = 0x123;
848 	dev_add_pack(&tpriv->pt);
849 
850 	ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
851 	if (ret)
852 		goto cleanup;
853 
854 	for (i = 0; i < 4; i++) {
855 		attr.vlan = 1;
856 		attr.vlan_id_out = tpriv->vlan_id + i;
857 		attr.dst = priv->dev->dev_addr;
858 		attr.sport = 9;
859 		attr.dport = 9;
860 
861 		skb = stmmac_test_get_udp_skb(priv, &attr);
862 		if (!skb) {
863 			ret = -ENOMEM;
864 			goto vlan_del;
865 		}
866 
867 		skb_set_queue_mapping(skb, 0);
868 		ret = dev_queue_xmit(skb);
869 		if (ret)
870 			goto vlan_del;
871 
872 		wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
873 		ret = tpriv->ok ? 0 : -ETIMEDOUT;
874 		if (ret && !i) {
875 			goto vlan_del;
876 		} else if (!ret && i) {
877 			ret = -EINVAL;
878 			goto vlan_del;
879 		} else {
880 			ret = 0;
881 		}
882 
883 		tpriv->ok = false;
884 	}
885 
886 vlan_del:
887 	vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
888 cleanup:
889 	dev_remove_pack(&tpriv->pt);
890 	kfree(tpriv);
891 	return ret;
892 }
893 
894 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
895 {
896 	struct stmmac_packet_attrs attr = { };
897 	struct stmmac_test_priv *tpriv;
898 	struct sk_buff *skb = NULL;
899 	int ret = 0, i;
900 
901 	if (!priv->dma_cap.vlhash)
902 		return -EOPNOTSUPP;
903 
904 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
905 	if (!tpriv)
906 		return -ENOMEM;
907 
908 	tpriv->ok = false;
909 	tpriv->double_vlan = true;
910 	init_completion(&tpriv->comp);
911 
912 	tpriv->pt.type = htons(ETH_P_8021Q);
913 	tpriv->pt.func = stmmac_test_vlan_validate;
914 	tpriv->pt.dev = priv->dev;
915 	tpriv->pt.af_packet_priv = tpriv;
916 	tpriv->packet = &attr;
917 
918 	/*
919 	 * As we use HASH filtering, false positives may appear. This is a
920 	 * specially chosen ID so that adjacent IDs (+4) have different
921 	 * HASH values.
922 	 */
923 	tpriv->vlan_id = 0x123;
924 	dev_add_pack(&tpriv->pt);
925 
926 	ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
927 	if (ret)
928 		goto cleanup;
929 
930 	for (i = 0; i < 4; i++) {
931 		attr.vlan = 2;
932 		attr.vlan_id_out = tpriv->vlan_id + i;
933 		attr.dst = priv->dev->dev_addr;
934 		attr.sport = 9;
935 		attr.dport = 9;
936 
937 		skb = stmmac_test_get_udp_skb(priv, &attr);
938 		if (!skb) {
939 			ret = -ENOMEM;
940 			goto vlan_del;
941 		}
942 
943 		skb_set_queue_mapping(skb, 0);
944 		ret = dev_queue_xmit(skb);
945 		if (ret)
946 			goto vlan_del;
947 
948 		wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
949 		ret = tpriv->ok ? 0 : -ETIMEDOUT;
950 		if (ret && !i) {
951 			goto vlan_del;
952 		} else if (!ret && i) {
953 			ret = -EINVAL;
954 			goto vlan_del;
955 		} else {
956 			ret = 0;
957 		}
958 
959 		tpriv->ok = false;
960 	}
961 
962 vlan_del:
963 	vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
964 cleanup:
965 	dev_remove_pack(&tpriv->pt);
966 	kfree(tpriv);
967 	return ret;
968 }
969 
970 #ifdef CONFIG_NET_CLS_ACT
971 static int stmmac_test_rxp(struct stmmac_priv *priv)
972 {
973 	unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
974 	struct tc_cls_u32_offload cls_u32 = { };
975 	struct stmmac_packet_attrs attr = { };
976 	struct tc_action **actions, *act;
977 	struct tc_u32_sel *sel;
978 	struct tcf_exts *exts;
979 	int ret, i, nk = 1;
980 
981 	if (!tc_can_offload(priv->dev))
982 		return -EOPNOTSUPP;
983 	if (!priv->dma_cap.frpsel)
984 		return -EOPNOTSUPP;
985 
986 	sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
987 	if (!sel)
988 		return -ENOMEM;
989 
990 	exts = kzalloc(sizeof(*exts), GFP_KERNEL);
991 	if (!exts) {
992 		ret = -ENOMEM;
993 		goto cleanup_sel;
994 	}
995 
996 	actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
997 	if (!actions) {
998 		ret = -ENOMEM;
999 		goto cleanup_exts;
1000 	}
1001 
1002 	act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
1003 	if (!act) {
1004 		ret = -ENOMEM;
1005 		goto cleanup_actions;
1006 	}
1007 
1008 	cls_u32.command = TC_CLSU32_NEW_KNODE;
1009 	cls_u32.common.chain_index = 0;
1010 	cls_u32.common.protocol = htons(ETH_P_ALL);
1011 	cls_u32.knode.exts = exts;
1012 	cls_u32.knode.sel = sel;
1013 	cls_u32.knode.handle = 0x123;
1014 
1015 	exts->nr_actions = nk;
1016 	exts->actions = actions;
1017 	for (i = 0; i < nk; i++) {
1018 		struct tcf_gact *gact = to_gact(&act[i]);
1019 
1020 		actions[i] = &act[i];
1021 		gact->tcf_action = TC_ACT_SHOT;
1022 	}
1023 
1024 	sel->nkeys = nk;
1025 	sel->offshift = 0;
1026 	sel->keys[0].off = 6;
1027 	sel->keys[0].val = htonl(0xdeadbeef);
1028 	sel->keys[0].mask = ~0x0;
1029 
1030 	ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1031 	if (ret)
1032 		goto cleanup_act;
1033 
1034 	attr.dst = priv->dev->dev_addr;
1035 	attr.src = addr;
1036 
1037 	ret = __stmmac_test_loopback(priv, &attr);
1038 	ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
1039 
1040 	cls_u32.command = TC_CLSU32_DELETE_KNODE;
1041 	stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
1042 
1043 cleanup_act:
1044 	kfree(act);
1045 cleanup_actions:
1046 	kfree(actions);
1047 cleanup_exts:
1048 	kfree(exts);
1049 cleanup_sel:
1050 	kfree(sel);
1051 	return ret;
1052 }
1053 #else
1054 static int stmmac_test_rxp(struct stmmac_priv *priv)
1055 {
1056 	return -EOPNOTSUPP;
1057 }
1058 #endif
1059 
1060 static int stmmac_test_desc_sai(struct stmmac_priv *priv)
1061 {
1062 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1063 	struct stmmac_packet_attrs attr = { };
1064 	int ret;
1065 
1066 	if (!priv->dma_cap.vlins)
1067 		return -EOPNOTSUPP;
1068 
1069 	attr.remove_sa = true;
1070 	attr.sarc = true;
1071 	attr.src = src;
1072 	attr.dst = priv->dev->dev_addr;
1073 
1074 	priv->sarc_type = 0x1;
1075 
1076 	ret = __stmmac_test_loopback(priv, &attr);
1077 
1078 	priv->sarc_type = 0x0;
1079 	return ret;
1080 }
1081 
1082 static int stmmac_test_desc_sar(struct stmmac_priv *priv)
1083 {
1084 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1085 	struct stmmac_packet_attrs attr = { };
1086 	int ret;
1087 
1088 	if (!priv->dma_cap.vlins)
1089 		return -EOPNOTSUPP;
1090 
1091 	attr.sarc = true;
1092 	attr.src = src;
1093 	attr.dst = priv->dev->dev_addr;
1094 
1095 	priv->sarc_type = 0x2;
1096 
1097 	ret = __stmmac_test_loopback(priv, &attr);
1098 
1099 	priv->sarc_type = 0x0;
1100 	return ret;
1101 }
1102 
1103 static int stmmac_test_reg_sai(struct stmmac_priv *priv)
1104 {
1105 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1106 	struct stmmac_packet_attrs attr = { };
1107 	int ret;
1108 
1109 	if (!priv->dma_cap.vlins)
1110 		return -EOPNOTSUPP;
1111 
1112 	attr.remove_sa = true;
1113 	attr.sarc = true;
1114 	attr.src = src;
1115 	attr.dst = priv->dev->dev_addr;
1116 
1117 	if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
1118 		return -EOPNOTSUPP;
1119 
1120 	ret = __stmmac_test_loopback(priv, &attr);
1121 
1122 	stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1123 	return ret;
1124 }
1125 
1126 static int stmmac_test_reg_sar(struct stmmac_priv *priv)
1127 {
1128 	unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1129 	struct stmmac_packet_attrs attr = { };
1130 	int ret;
1131 
1132 	if (!priv->dma_cap.vlins)
1133 		return -EOPNOTSUPP;
1134 
1135 	attr.sarc = true;
1136 	attr.src = src;
1137 	attr.dst = priv->dev->dev_addr;
1138 
1139 	if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
1140 		return -EOPNOTSUPP;
1141 
1142 	ret = __stmmac_test_loopback(priv, &attr);
1143 
1144 	stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
1145 	return ret;
1146 }
1147 
1148 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
1149 {
1150 	struct stmmac_packet_attrs attr = { };
1151 	struct stmmac_test_priv *tpriv;
1152 	struct sk_buff *skb = NULL;
1153 	int ret = 0;
1154 	u16 proto;
1155 
1156 	if (!priv->dma_cap.vlins)
1157 		return -EOPNOTSUPP;
1158 
1159 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1160 	if (!tpriv)
1161 		return -ENOMEM;
1162 
1163 	proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
1164 
1165 	tpriv->ok = false;
1166 	tpriv->double_vlan = svlan;
1167 	init_completion(&tpriv->comp);
1168 
1169 	tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
1170 	tpriv->pt.func = stmmac_test_vlan_validate;
1171 	tpriv->pt.dev = priv->dev;
1172 	tpriv->pt.af_packet_priv = tpriv;
1173 	tpriv->packet = &attr;
1174 	tpriv->vlan_id = 0x123;
1175 	dev_add_pack(&tpriv->pt);
1176 
1177 	ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
1178 	if (ret)
1179 		goto cleanup;
1180 
1181 	attr.dst = priv->dev->dev_addr;
1182 
1183 	skb = stmmac_test_get_udp_skb(priv, &attr);
1184 	if (!skb) {
1185 		ret = -ENOMEM;
1186 		goto vlan_del;
1187 	}
1188 
1189 	__vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
1190 	skb->protocol = htons(proto);
1191 
1192 	skb_set_queue_mapping(skb, 0);
1193 	ret = dev_queue_xmit(skb);
1194 	if (ret)
1195 		goto vlan_del;
1196 
1197 	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1198 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
1199 
1200 vlan_del:
1201 	vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
1202 cleanup:
1203 	dev_remove_pack(&tpriv->pt);
1204 	kfree(tpriv);
1205 	return ret;
1206 }
1207 
1208 static int stmmac_test_vlanoff(struct stmmac_priv *priv)
1209 {
1210 	return stmmac_test_vlanoff_common(priv, false);
1211 }
1212 
1213 static int stmmac_test_svlanoff(struct stmmac_priv *priv)
1214 {
1215 	if (!priv->dma_cap.dvlan)
1216 		return -EOPNOTSUPP;
1217 	return stmmac_test_vlanoff_common(priv, true);
1218 }
1219 
1220 #ifdef CONFIG_NET_CLS_ACT
1221 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1222 				u32 dst_mask, u32 src_mask)
1223 {
1224 	struct flow_dissector_key_ipv4_addrs key, mask;
1225 	unsigned long dummy_cookie = 0xdeadbeef;
1226 	struct stmmac_packet_attrs attr = { };
1227 	struct flow_dissector *dissector;
1228 	struct flow_cls_offload *cls;
1229 	struct flow_rule *rule;
1230 	int ret;
1231 
1232 	if (!tc_can_offload(priv->dev))
1233 		return -EOPNOTSUPP;
1234 	if (!priv->dma_cap.l3l4fnum)
1235 		return -EOPNOTSUPP;
1236 	if (priv->rss.enable)
1237 		stmmac_rss_configure(priv, priv->hw, NULL,
1238 				     priv->plat->rx_queues_to_use);
1239 
1240 	dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1241 	if (!dissector) {
1242 		ret = -ENOMEM;
1243 		goto cleanup_rss;
1244 	}
1245 
1246 	dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
1247 	dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
1248 
1249 	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1250 	if (!cls) {
1251 		ret = -ENOMEM;
1252 		goto cleanup_dissector;
1253 	}
1254 
1255 	cls->common.chain_index = 0;
1256 	cls->command = FLOW_CLS_REPLACE;
1257 	cls->cookie = dummy_cookie;
1258 
1259 	rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1260 	if (!rule) {
1261 		ret = -ENOMEM;
1262 		goto cleanup_cls;
1263 	}
1264 
1265 	rule->match.dissector = dissector;
1266 	rule->match.key = (void *)&key;
1267 	rule->match.mask = (void *)&mask;
1268 
1269 	key.src = htonl(src);
1270 	key.dst = htonl(dst);
1271 	mask.src = src_mask;
1272 	mask.dst = dst_mask;
1273 
1274 	cls->rule = rule;
1275 
1276 	rule->action.entries[0].id = FLOW_ACTION_DROP;
1277 	rule->action.num_entries = 1;
1278 
1279 	attr.dst = priv->dev->dev_addr;
1280 	attr.ip_dst = dst;
1281 	attr.ip_src = src;
1282 
1283 	/* Shall receive packet */
1284 	ret = __stmmac_test_loopback(priv, &attr);
1285 	if (ret)
1286 		goto cleanup_rule;
1287 
1288 	ret = stmmac_tc_setup_cls(priv, priv, cls);
1289 	if (ret)
1290 		goto cleanup_rule;
1291 
1292 	/* Shall NOT receive packet */
1293 	ret = __stmmac_test_loopback(priv, &attr);
1294 	ret = ret ? 0 : -EINVAL;
1295 
1296 	cls->command = FLOW_CLS_DESTROY;
1297 	stmmac_tc_setup_cls(priv, priv, cls);
1298 cleanup_rule:
1299 	kfree(rule);
1300 cleanup_cls:
1301 	kfree(cls);
1302 cleanup_dissector:
1303 	kfree(dissector);
1304 cleanup_rss:
1305 	if (priv->rss.enable) {
1306 		stmmac_rss_configure(priv, priv->hw, &priv->rss,
1307 				     priv->plat->rx_queues_to_use);
1308 	}
1309 
1310 	return ret;
1311 }
1312 #else
1313 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
1314 				u32 dst_mask, u32 src_mask)
1315 {
1316 	return -EOPNOTSUPP;
1317 }
1318 #endif
1319 
1320 static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
1321 {
1322 	u32 addr = 0x10203040;
1323 
1324 	return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
1325 }
1326 
1327 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
1328 {
1329 	u32 addr = 0x10203040;
1330 
1331 	return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
1332 }
1333 
1334 #ifdef CONFIG_NET_CLS_ACT
1335 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1336 				u32 dst_mask, u32 src_mask, bool udp)
1337 {
1338 	struct {
1339 		struct flow_dissector_key_basic bkey;
1340 		struct flow_dissector_key_ports key;
1341 	} __aligned(BITS_PER_LONG / 8) keys;
1342 	struct {
1343 		struct flow_dissector_key_basic bmask;
1344 		struct flow_dissector_key_ports mask;
1345 	} __aligned(BITS_PER_LONG / 8) masks;
1346 	unsigned long dummy_cookie = 0xdeadbeef;
1347 	struct stmmac_packet_attrs attr = { };
1348 	struct flow_dissector *dissector;
1349 	struct flow_cls_offload *cls;
1350 	struct flow_rule *rule;
1351 	int ret;
1352 
1353 	if (!tc_can_offload(priv->dev))
1354 		return -EOPNOTSUPP;
1355 	if (!priv->dma_cap.l3l4fnum)
1356 		return -EOPNOTSUPP;
1357 	if (priv->rss.enable)
1358 		stmmac_rss_configure(priv, priv->hw, NULL,
1359 				     priv->plat->rx_queues_to_use);
1360 
1361 	dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
1362 	if (!dissector) {
1363 		ret = -ENOMEM;
1364 		goto cleanup_rss;
1365 	}
1366 
1367 	dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
1368 	dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
1369 	dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
1370 	dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
1371 
1372 	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
1373 	if (!cls) {
1374 		ret = -ENOMEM;
1375 		goto cleanup_dissector;
1376 	}
1377 
1378 	cls->common.chain_index = 0;
1379 	cls->command = FLOW_CLS_REPLACE;
1380 	cls->cookie = dummy_cookie;
1381 
1382 	rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
1383 	if (!rule) {
1384 		ret = -ENOMEM;
1385 		goto cleanup_cls;
1386 	}
1387 
1388 	rule->match.dissector = dissector;
1389 	rule->match.key = (void *)&keys;
1390 	rule->match.mask = (void *)&masks;
1391 
1392 	keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
1393 	keys.key.src = htons(src);
1394 	keys.key.dst = htons(dst);
1395 	masks.mask.src = src_mask;
1396 	masks.mask.dst = dst_mask;
1397 
1398 	cls->rule = rule;
1399 
1400 	rule->action.entries[0].id = FLOW_ACTION_DROP;
1401 	rule->action.num_entries = 1;
1402 
1403 	attr.dst = priv->dev->dev_addr;
1404 	attr.tcp = !udp;
1405 	attr.sport = src;
1406 	attr.dport = dst;
1407 	attr.ip_dst = 0;
1408 
1409 	/* Shall receive packet */
1410 	ret = __stmmac_test_loopback(priv, &attr);
1411 	if (ret)
1412 		goto cleanup_rule;
1413 
1414 	ret = stmmac_tc_setup_cls(priv, priv, cls);
1415 	if (ret)
1416 		goto cleanup_rule;
1417 
1418 	/* Shall NOT receive packet */
1419 	ret = __stmmac_test_loopback(priv, &attr);
1420 	ret = ret ? 0 : -EINVAL;
1421 
1422 	cls->command = FLOW_CLS_DESTROY;
1423 	stmmac_tc_setup_cls(priv, priv, cls);
1424 cleanup_rule:
1425 	kfree(rule);
1426 cleanup_cls:
1427 	kfree(cls);
1428 cleanup_dissector:
1429 	kfree(dissector);
1430 cleanup_rss:
1431 	if (priv->rss.enable) {
1432 		stmmac_rss_configure(priv, priv->hw, &priv->rss,
1433 				     priv->plat->rx_queues_to_use);
1434 	}
1435 
1436 	return ret;
1437 }
1438 #else
1439 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
1440 				u32 dst_mask, u32 src_mask, bool udp)
1441 {
1442 	return -EOPNOTSUPP;
1443 }
1444 #endif
1445 
1446 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
1447 {
1448 	u16 dummy_port = 0x123;
1449 
1450 	return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
1451 }
1452 
1453 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
1454 {
1455 	u16 dummy_port = 0x123;
1456 
1457 	return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
1458 }
1459 
1460 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
1461 {
1462 	u16 dummy_port = 0x123;
1463 
1464 	return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
1465 }
1466 
1467 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
1468 {
1469 	u16 dummy_port = 0x123;
1470 
1471 	return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
1472 }
1473 
1474 static int stmmac_test_arp_validate(struct sk_buff *skb,
1475 				    struct net_device *ndev,
1476 				    struct packet_type *pt,
1477 				    struct net_device *orig_ndev)
1478 {
1479 	struct stmmac_test_priv *tpriv = pt->af_packet_priv;
1480 	struct ethhdr *ehdr;
1481 	struct arphdr *ahdr;
1482 
1483 	ehdr = (struct ethhdr *)skb_mac_header(skb);
1484 	if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
1485 		goto out;
1486 
1487 	ahdr = arp_hdr(skb);
1488 	if (ahdr->ar_op != htons(ARPOP_REPLY))
1489 		goto out;
1490 
1491 	tpriv->ok = true;
1492 	complete(&tpriv->comp);
1493 out:
1494 	kfree_skb(skb);
1495 	return 0;
1496 }
1497 
1498 static int stmmac_test_arpoffload(struct stmmac_priv *priv)
1499 {
1500 	unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
1501 	unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1502 	struct stmmac_packet_attrs attr = { };
1503 	struct stmmac_test_priv *tpriv;
1504 	struct sk_buff *skb = NULL;
1505 	u32 ip_addr = 0xdeadcafe;
1506 	u32 ip_src = 0xdeadbeef;
1507 	int ret;
1508 
1509 	if (!priv->dma_cap.arpoffsel)
1510 		return -EOPNOTSUPP;
1511 
1512 	tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
1513 	if (!tpriv)
1514 		return -ENOMEM;
1515 
1516 	tpriv->ok = false;
1517 	init_completion(&tpriv->comp);
1518 
1519 	tpriv->pt.type = htons(ETH_P_ARP);
1520 	tpriv->pt.func = stmmac_test_arp_validate;
1521 	tpriv->pt.dev = priv->dev;
1522 	tpriv->pt.af_packet_priv = tpriv;
1523 	tpriv->packet = &attr;
1524 	dev_add_pack(&tpriv->pt);
1525 
1526 	attr.src = src;
1527 	attr.ip_src = ip_src;
1528 	attr.dst = dst;
1529 	attr.ip_dst = ip_addr;
1530 
1531 	skb = stmmac_test_get_arp_skb(priv, &attr);
1532 	if (!skb) {
1533 		ret = -ENOMEM;
1534 		goto cleanup;
1535 	}
1536 
1537 	ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
1538 	if (ret)
1539 		goto cleanup;
1540 
1541 	ret = dev_set_promiscuity(priv->dev, 1);
1542 	if (ret)
1543 		goto cleanup;
1544 
1545 	skb_set_queue_mapping(skb, 0);
1546 	ret = dev_queue_xmit(skb);
1547 	if (ret)
1548 		goto cleanup_promisc;
1549 
1550 	wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
1551 	ret = tpriv->ok ? 0 : -ETIMEDOUT;
1552 
1553 cleanup_promisc:
1554 	dev_set_promiscuity(priv->dev, -1);
1555 cleanup:
1556 	stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
1557 	dev_remove_pack(&tpriv->pt);
1558 	kfree(tpriv);
1559 	return ret;
1560 }
1561 
1562 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
1563 {
1564 	struct stmmac_packet_attrs attr = { };
1565 	int size = priv->dma_buf_sz;
1566 
1567 	/* Only XGMAC has SW support for multiple RX descs in same packet */
1568 	if (priv->plat->has_xgmac)
1569 		size = priv->dev->max_mtu;
1570 
1571 	attr.dst = priv->dev->dev_addr;
1572 	attr.max_size = size - ETH_FCS_LEN;
1573 	attr.queue_mapping = queue;
1574 
1575 	return __stmmac_test_loopback(priv, &attr);
1576 }
1577 
1578 static int stmmac_test_jumbo(struct stmmac_priv *priv)
1579 {
1580 	return __stmmac_test_jumbo(priv, 0);
1581 }
1582 
1583 static int stmmac_test_mjumbo(struct stmmac_priv *priv)
1584 {
1585 	u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
1586 	int ret;
1587 
1588 	if (tx_cnt <= 1)
1589 		return -EOPNOTSUPP;
1590 
1591 	for (chan = 0; chan < tx_cnt; chan++) {
1592 		ret = __stmmac_test_jumbo(priv, chan);
1593 		if (ret)
1594 			return ret;
1595 	}
1596 
1597 	return 0;
1598 }
1599 
1600 static int stmmac_test_sph(struct stmmac_priv *priv)
1601 {
1602 	unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
1603 	struct stmmac_packet_attrs attr = { };
1604 	int ret;
1605 
1606 	if (!priv->sph)
1607 		return -EOPNOTSUPP;
1608 
1609 	/* Check for UDP first */
1610 	attr.dst = priv->dev->dev_addr;
1611 	attr.tcp = false;
1612 
1613 	ret = __stmmac_test_loopback(priv, &attr);
1614 	if (ret)
1615 		return ret;
1616 
1617 	cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1618 	if (cnt_end <= cnt_start)
1619 		return -EINVAL;
1620 
1621 	/* Check for TCP now */
1622 	cnt_start = cnt_end;
1623 
1624 	attr.dst = priv->dev->dev_addr;
1625 	attr.tcp = true;
1626 
1627 	ret = __stmmac_test_loopback(priv, &attr);
1628 	if (ret)
1629 		return ret;
1630 
1631 	cnt_end = priv->xstats.rx_split_hdr_pkt_n;
1632 	if (cnt_end <= cnt_start)
1633 		return -EINVAL;
1634 
1635 	return 0;
1636 }
1637 
1638 #define STMMAC_LOOPBACK_NONE	0
1639 #define STMMAC_LOOPBACK_MAC	1
1640 #define STMMAC_LOOPBACK_PHY	2
1641 
1642 static const struct stmmac_test {
1643 	char name[ETH_GSTRING_LEN];
1644 	int lb;
1645 	int (*fn)(struct stmmac_priv *priv);
1646 } stmmac_selftests[] = {
1647 	{
1648 		.name = "MAC Loopback         ",
1649 		.lb = STMMAC_LOOPBACK_MAC,
1650 		.fn = stmmac_test_mac_loopback,
1651 	}, {
1652 		.name = "PHY Loopback         ",
1653 		.lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
1654 		.fn = stmmac_test_phy_loopback,
1655 	}, {
1656 		.name = "MMC Counters         ",
1657 		.lb = STMMAC_LOOPBACK_PHY,
1658 		.fn = stmmac_test_mmc,
1659 	}, {
1660 		.name = "EEE                  ",
1661 		.lb = STMMAC_LOOPBACK_PHY,
1662 		.fn = stmmac_test_eee,
1663 	}, {
1664 		.name = "Hash Filter MC       ",
1665 		.lb = STMMAC_LOOPBACK_PHY,
1666 		.fn = stmmac_test_hfilt,
1667 	}, {
1668 		.name = "Perfect Filter UC    ",
1669 		.lb = STMMAC_LOOPBACK_PHY,
1670 		.fn = stmmac_test_pfilt,
1671 	}, {
1672 		.name = "MC Filter            ",
1673 		.lb = STMMAC_LOOPBACK_PHY,
1674 		.fn = stmmac_test_mcfilt,
1675 	}, {
1676 		.name = "UC Filter            ",
1677 		.lb = STMMAC_LOOPBACK_PHY,
1678 		.fn = stmmac_test_ucfilt,
1679 	}, {
1680 		.name = "Flow Control         ",
1681 		.lb = STMMAC_LOOPBACK_PHY,
1682 		.fn = stmmac_test_flowctrl,
1683 	}, {
1684 		.name = "RSS                  ",
1685 		.lb = STMMAC_LOOPBACK_PHY,
1686 		.fn = stmmac_test_rss,
1687 	}, {
1688 		.name = "VLAN Filtering       ",
1689 		.lb = STMMAC_LOOPBACK_PHY,
1690 		.fn = stmmac_test_vlanfilt,
1691 	}, {
1692 		.name = "Double VLAN Filtering",
1693 		.lb = STMMAC_LOOPBACK_PHY,
1694 		.fn = stmmac_test_dvlanfilt,
1695 	}, {
1696 		.name = "Flexible RX Parser   ",
1697 		.lb = STMMAC_LOOPBACK_PHY,
1698 		.fn = stmmac_test_rxp,
1699 	}, {
1700 		.name = "SA Insertion (desc)  ",
1701 		.lb = STMMAC_LOOPBACK_PHY,
1702 		.fn = stmmac_test_desc_sai,
1703 	}, {
1704 		.name = "SA Replacement (desc)",
1705 		.lb = STMMAC_LOOPBACK_PHY,
1706 		.fn = stmmac_test_desc_sar,
1707 	}, {
1708 		.name = "SA Insertion (reg)  ",
1709 		.lb = STMMAC_LOOPBACK_PHY,
1710 		.fn = stmmac_test_reg_sai,
1711 	}, {
1712 		.name = "SA Replacement (reg)",
1713 		.lb = STMMAC_LOOPBACK_PHY,
1714 		.fn = stmmac_test_reg_sar,
1715 	}, {
1716 		.name = "VLAN TX Insertion   ",
1717 		.lb = STMMAC_LOOPBACK_PHY,
1718 		.fn = stmmac_test_vlanoff,
1719 	}, {
1720 		.name = "SVLAN TX Insertion  ",
1721 		.lb = STMMAC_LOOPBACK_PHY,
1722 		.fn = stmmac_test_svlanoff,
1723 	}, {
1724 		.name = "L3 DA Filtering     ",
1725 		.lb = STMMAC_LOOPBACK_PHY,
1726 		.fn = stmmac_test_l3filt_da,
1727 	}, {
1728 		.name = "L3 SA Filtering     ",
1729 		.lb = STMMAC_LOOPBACK_PHY,
1730 		.fn = stmmac_test_l3filt_sa,
1731 	}, {
1732 		.name = "L4 DA TCP Filtering ",
1733 		.lb = STMMAC_LOOPBACK_PHY,
1734 		.fn = stmmac_test_l4filt_da_tcp,
1735 	}, {
1736 		.name = "L4 SA TCP Filtering ",
1737 		.lb = STMMAC_LOOPBACK_PHY,
1738 		.fn = stmmac_test_l4filt_sa_tcp,
1739 	}, {
1740 		.name = "L4 DA UDP Filtering ",
1741 		.lb = STMMAC_LOOPBACK_PHY,
1742 		.fn = stmmac_test_l4filt_da_udp,
1743 	}, {
1744 		.name = "L4 SA UDP Filtering ",
1745 		.lb = STMMAC_LOOPBACK_PHY,
1746 		.fn = stmmac_test_l4filt_sa_udp,
1747 	}, {
1748 		.name = "ARP Offload         ",
1749 		.lb = STMMAC_LOOPBACK_PHY,
1750 		.fn = stmmac_test_arpoffload,
1751 	}, {
1752 		.name = "Jumbo Frame         ",
1753 		.lb = STMMAC_LOOPBACK_PHY,
1754 		.fn = stmmac_test_jumbo,
1755 	}, {
1756 		.name = "Multichannel Jumbo  ",
1757 		.lb = STMMAC_LOOPBACK_PHY,
1758 		.fn = stmmac_test_mjumbo,
1759 	}, {
1760 		.name = "Split Header        ",
1761 		.lb = STMMAC_LOOPBACK_PHY,
1762 		.fn = stmmac_test_sph,
1763 	},
1764 };
1765 
1766 void stmmac_selftest_run(struct net_device *dev,
1767 			 struct ethtool_test *etest, u64 *buf)
1768 {
1769 	struct stmmac_priv *priv = netdev_priv(dev);
1770 	int count = stmmac_selftest_get_count(priv);
1771 	int carrier = netif_carrier_ok(dev);
1772 	int i, ret;
1773 
1774 	memset(buf, 0, sizeof(*buf) * count);
1775 	stmmac_test_next_id = 0;
1776 
1777 	if (etest->flags != ETH_TEST_FL_OFFLINE) {
1778 		netdev_err(priv->dev, "Only offline tests are supported\n");
1779 		etest->flags |= ETH_TEST_FL_FAILED;
1780 		return;
1781 	} else if (!carrier) {
1782 		netdev_err(priv->dev, "You need valid Link to execute tests\n");
1783 		etest->flags |= ETH_TEST_FL_FAILED;
1784 		return;
1785 	}
1786 
1787 	/* We don't want extra traffic */
1788 	netif_carrier_off(dev);
1789 
1790 	/* Wait for queues drain */
1791 	msleep(200);
1792 
1793 	for (i = 0; i < count; i++) {
1794 		ret = 0;
1795 
1796 		switch (stmmac_selftests[i].lb) {
1797 		case STMMAC_LOOPBACK_PHY:
1798 			ret = -EOPNOTSUPP;
1799 			if (dev->phydev)
1800 				ret = phy_loopback(dev->phydev, true);
1801 			if (!ret)
1802 				break;
1803 			/* Fallthrough */
1804 		case STMMAC_LOOPBACK_MAC:
1805 			ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
1806 			break;
1807 		case STMMAC_LOOPBACK_NONE:
1808 			break;
1809 		default:
1810 			ret = -EOPNOTSUPP;
1811 			break;
1812 		}
1813 
1814 		/*
1815 		 * First tests will always be MAC / PHY loobpack. If any of
1816 		 * them is not supported we abort earlier.
1817 		 */
1818 		if (ret) {
1819 			netdev_err(priv->dev, "Loopback is not supported\n");
1820 			etest->flags |= ETH_TEST_FL_FAILED;
1821 			break;
1822 		}
1823 
1824 		ret = stmmac_selftests[i].fn(priv);
1825 		if (ret && (ret != -EOPNOTSUPP))
1826 			etest->flags |= ETH_TEST_FL_FAILED;
1827 		buf[i] = ret;
1828 
1829 		switch (stmmac_selftests[i].lb) {
1830 		case STMMAC_LOOPBACK_PHY:
1831 			ret = -EOPNOTSUPP;
1832 			if (dev->phydev)
1833 				ret = phy_loopback(dev->phydev, false);
1834 			if (!ret)
1835 				break;
1836 			/* Fallthrough */
1837 		case STMMAC_LOOPBACK_MAC:
1838 			stmmac_set_mac_loopback(priv, priv->ioaddr, false);
1839 			break;
1840 		default:
1841 			break;
1842 		}
1843 	}
1844 
1845 	/* Restart everything */
1846 	if (carrier)
1847 		netif_carrier_on(dev);
1848 }
1849 
1850 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
1851 {
1852 	u8 *p = data;
1853 	int i;
1854 
1855 	for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
1856 		snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
1857 			 stmmac_selftests[i].name);
1858 		p += ETH_GSTRING_LEN;
1859 	}
1860 }
1861 
1862 int stmmac_selftest_get_count(struct stmmac_priv *priv)
1863 {
1864 	return ARRAY_SIZE(stmmac_selftests);
1865 }
1866