1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates. 4 * stmmac Selftests Support 5 * 6 * Author: Jose Abreu <joabreu@synopsys.com> 7 */ 8 9 #include <linux/bitrev.h> 10 #include <linux/completion.h> 11 #include <linux/crc32.h> 12 #include <linux/ethtool.h> 13 #include <linux/ip.h> 14 #include <linux/phy.h> 15 #include <linux/udp.h> 16 #include <net/pkt_cls.h> 17 #include <net/pkt_sched.h> 18 #include <net/tcp.h> 19 #include <net/udp.h> 20 #include <net/tc_act/tc_gact.h> 21 #include "stmmac.h" 22 23 struct stmmachdr { 24 __be32 version; 25 __be64 magic; 26 u8 id; 27 } __packed; 28 29 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ 30 sizeof(struct stmmachdr)) 31 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL 32 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200) 33 34 struct stmmac_packet_attrs { 35 int vlan; 36 int vlan_id_in; 37 int vlan_id_out; 38 unsigned char *src; 39 unsigned char *dst; 40 u32 ip_src; 41 u32 ip_dst; 42 int tcp; 43 int sport; 44 int dport; 45 u32 exp_hash; 46 int dont_wait; 47 int timeout; 48 int size; 49 int max_size; 50 int remove_sa; 51 u8 id; 52 int sarc; 53 u16 queue_mapping; 54 u64 timestamp; 55 }; 56 57 static u8 stmmac_test_next_id; 58 59 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, 60 struct stmmac_packet_attrs *attr) 61 { 62 struct sk_buff *skb = NULL; 63 struct udphdr *uhdr = NULL; 64 struct tcphdr *thdr = NULL; 65 struct stmmachdr *shdr; 66 struct ethhdr *ehdr; 67 struct iphdr *ihdr; 68 int iplen, size; 69 70 size = attr->size + STMMAC_TEST_PKT_SIZE; 71 if (attr->vlan) { 72 size += 4; 73 if (attr->vlan > 1) 74 size += 4; 75 } 76 77 if (attr->tcp) 78 size += sizeof(struct tcphdr); 79 else 80 size += sizeof(struct udphdr); 81 82 if (attr->max_size && (attr->max_size > size)) 83 size = attr->max_size; 84 85 skb = netdev_alloc_skb(priv->dev, size); 86 if (!skb) 87 return NULL; 88 89 prefetchw(skb->data); 90 91 if (attr->vlan > 1) 92 ehdr = skb_push(skb, ETH_HLEN + 8); 93 else if (attr->vlan) 94 ehdr = skb_push(skb, ETH_HLEN + 4); 95 else if (attr->remove_sa) 96 ehdr = skb_push(skb, ETH_HLEN - 6); 97 else 98 ehdr = skb_push(skb, ETH_HLEN); 99 skb_reset_mac_header(skb); 100 101 skb_set_network_header(skb, skb->len); 102 ihdr = skb_put(skb, sizeof(*ihdr)); 103 104 skb_set_transport_header(skb, skb->len); 105 if (attr->tcp) 106 thdr = skb_put(skb, sizeof(*thdr)); 107 else 108 uhdr = skb_put(skb, sizeof(*uhdr)); 109 110 if (!attr->remove_sa) 111 eth_zero_addr(ehdr->h_source); 112 eth_zero_addr(ehdr->h_dest); 113 if (attr->src && !attr->remove_sa) 114 ether_addr_copy(ehdr->h_source, attr->src); 115 if (attr->dst) 116 ether_addr_copy(ehdr->h_dest, attr->dst); 117 118 if (!attr->remove_sa) { 119 ehdr->h_proto = htons(ETH_P_IP); 120 } else { 121 __be16 *ptr = (__be16 *)ehdr; 122 123 /* HACK */ 124 ptr[3] = htons(ETH_P_IP); 125 } 126 127 if (attr->vlan) { 128 __be16 *tag, *proto; 129 130 if (!attr->remove_sa) { 131 tag = (void *)ehdr + ETH_HLEN; 132 proto = (void *)ehdr + (2 * ETH_ALEN); 133 } else { 134 tag = (void *)ehdr + ETH_HLEN - 6; 135 proto = (void *)ehdr + ETH_ALEN; 136 } 137 138 proto[0] = htons(ETH_P_8021Q); 139 tag[0] = htons(attr->vlan_id_out); 140 tag[1] = htons(ETH_P_IP); 141 if (attr->vlan > 1) { 142 proto[0] = htons(ETH_P_8021AD); 143 tag[1] = htons(ETH_P_8021Q); 144 tag[2] = htons(attr->vlan_id_in); 145 tag[3] = htons(ETH_P_IP); 146 } 147 } 148 149 if (attr->tcp) { 150 thdr->source = htons(attr->sport); 151 thdr->dest = htons(attr->dport); 152 thdr->doff = sizeof(struct tcphdr) / 4; 153 thdr->check = 0; 154 } else { 155 uhdr->source = htons(attr->sport); 156 uhdr->dest = htons(attr->dport); 157 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); 158 if (attr->max_size) 159 uhdr->len = htons(attr->max_size - 160 (sizeof(*ihdr) + sizeof(*ehdr))); 161 uhdr->check = 0; 162 } 163 164 ihdr->ihl = 5; 165 ihdr->ttl = 32; 166 ihdr->version = 4; 167 if (attr->tcp) 168 ihdr->protocol = IPPROTO_TCP; 169 else 170 ihdr->protocol = IPPROTO_UDP; 171 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size; 172 if (attr->tcp) 173 iplen += sizeof(*thdr); 174 else 175 iplen += sizeof(*uhdr); 176 177 if (attr->max_size) 178 iplen = attr->max_size - sizeof(*ehdr); 179 180 ihdr->tot_len = htons(iplen); 181 ihdr->frag_off = 0; 182 ihdr->saddr = htonl(attr->ip_src); 183 ihdr->daddr = htonl(attr->ip_dst); 184 ihdr->tos = 0; 185 ihdr->id = 0; 186 ip_send_check(ihdr); 187 188 shdr = skb_put(skb, sizeof(*shdr)); 189 shdr->version = 0; 190 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC); 191 attr->id = stmmac_test_next_id; 192 shdr->id = stmmac_test_next_id++; 193 194 if (attr->size) 195 skb_put(skb, attr->size); 196 if (attr->max_size && (attr->max_size > skb->len)) 197 skb_put(skb, attr->max_size - skb->len); 198 199 skb->csum = 0; 200 skb->ip_summed = CHECKSUM_PARTIAL; 201 if (attr->tcp) { 202 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0); 203 skb->csum_start = skb_transport_header(skb) - skb->head; 204 skb->csum_offset = offsetof(struct tcphdr, check); 205 } else { 206 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr); 207 } 208 209 skb->protocol = htons(ETH_P_IP); 210 skb->pkt_type = PACKET_HOST; 211 skb->dev = priv->dev; 212 213 if (attr->timestamp) 214 skb->tstamp = ns_to_ktime(attr->timestamp); 215 216 return skb; 217 } 218 219 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv, 220 struct stmmac_packet_attrs *attr) 221 { 222 __be32 ip_src = htonl(attr->ip_src); 223 __be32 ip_dst = htonl(attr->ip_dst); 224 struct sk_buff *skb = NULL; 225 226 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src, 227 NULL, attr->src, attr->dst); 228 if (!skb) 229 return NULL; 230 231 skb->pkt_type = PACKET_HOST; 232 skb->dev = priv->dev; 233 234 return skb; 235 } 236 237 struct stmmac_test_priv { 238 struct stmmac_packet_attrs *packet; 239 struct packet_type pt; 240 struct completion comp; 241 int double_vlan; 242 int vlan_id; 243 int ok; 244 }; 245 246 static int stmmac_test_loopback_validate(struct sk_buff *skb, 247 struct net_device *ndev, 248 struct packet_type *pt, 249 struct net_device *orig_ndev) 250 { 251 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 252 unsigned char *src = tpriv->packet->src; 253 unsigned char *dst = tpriv->packet->dst; 254 struct stmmachdr *shdr; 255 struct ethhdr *ehdr; 256 struct udphdr *uhdr; 257 struct tcphdr *thdr; 258 struct iphdr *ihdr; 259 260 skb = skb_unshare(skb, GFP_ATOMIC); 261 if (!skb) 262 goto out; 263 264 if (skb_linearize(skb)) 265 goto out; 266 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 267 goto out; 268 269 ehdr = (struct ethhdr *)skb_mac_header(skb); 270 if (dst) { 271 if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) 272 goto out; 273 } 274 if (tpriv->packet->sarc) { 275 if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest)) 276 goto out; 277 } else if (src) { 278 if (!ether_addr_equal_unaligned(ehdr->h_source, src)) 279 goto out; 280 } 281 282 ihdr = ip_hdr(skb); 283 if (tpriv->double_vlan) 284 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 285 286 if (tpriv->packet->tcp) { 287 if (ihdr->protocol != IPPROTO_TCP) 288 goto out; 289 290 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 291 if (thdr->dest != htons(tpriv->packet->dport)) 292 goto out; 293 294 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr)); 295 } else { 296 if (ihdr->protocol != IPPROTO_UDP) 297 goto out; 298 299 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 300 if (uhdr->dest != htons(tpriv->packet->dport)) 301 goto out; 302 303 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 304 } 305 306 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 307 goto out; 308 if (tpriv->packet->exp_hash && !skb->hash) 309 goto out; 310 if (tpriv->packet->id != shdr->id) 311 goto out; 312 313 tpriv->ok = true; 314 complete(&tpriv->comp); 315 out: 316 kfree_skb(skb); 317 return 0; 318 } 319 320 static int __stmmac_test_loopback(struct stmmac_priv *priv, 321 struct stmmac_packet_attrs *attr) 322 { 323 struct stmmac_test_priv *tpriv; 324 struct sk_buff *skb = NULL; 325 int ret = 0; 326 327 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 328 if (!tpriv) 329 return -ENOMEM; 330 331 tpriv->ok = false; 332 init_completion(&tpriv->comp); 333 334 tpriv->pt.type = htons(ETH_P_IP); 335 tpriv->pt.func = stmmac_test_loopback_validate; 336 tpriv->pt.dev = priv->dev; 337 tpriv->pt.af_packet_priv = tpriv; 338 tpriv->packet = attr; 339 340 if (!attr->dont_wait) 341 dev_add_pack(&tpriv->pt); 342 343 skb = stmmac_test_get_udp_skb(priv, attr); 344 if (!skb) { 345 ret = -ENOMEM; 346 goto cleanup; 347 } 348 349 ret = dev_direct_xmit(skb, attr->queue_mapping); 350 if (ret) 351 goto cleanup; 352 353 if (attr->dont_wait) 354 goto cleanup; 355 356 if (!attr->timeout) 357 attr->timeout = STMMAC_LB_TIMEOUT; 358 359 wait_for_completion_timeout(&tpriv->comp, attr->timeout); 360 ret = tpriv->ok ? 0 : -ETIMEDOUT; 361 362 cleanup: 363 if (!attr->dont_wait) 364 dev_remove_pack(&tpriv->pt); 365 kfree(tpriv); 366 return ret; 367 } 368 369 static int stmmac_test_mac_loopback(struct stmmac_priv *priv) 370 { 371 struct stmmac_packet_attrs attr = { }; 372 373 attr.dst = priv->dev->dev_addr; 374 return __stmmac_test_loopback(priv, &attr); 375 } 376 377 static int stmmac_test_phy_loopback(struct stmmac_priv *priv) 378 { 379 struct stmmac_packet_attrs attr = { }; 380 int ret; 381 382 if (!priv->dev->phydev) 383 return -EBUSY; 384 385 ret = phy_loopback(priv->dev->phydev, true); 386 if (ret) 387 return ret; 388 389 attr.dst = priv->dev->dev_addr; 390 ret = __stmmac_test_loopback(priv, &attr); 391 392 phy_loopback(priv->dev->phydev, false); 393 return ret; 394 } 395 396 static int stmmac_test_mmc(struct stmmac_priv *priv) 397 { 398 struct stmmac_counters initial, final; 399 int ret; 400 401 memset(&initial, 0, sizeof(initial)); 402 memset(&final, 0, sizeof(final)); 403 404 if (!priv->dma_cap.rmon) 405 return -EOPNOTSUPP; 406 407 /* Save previous results into internal struct */ 408 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); 409 410 ret = stmmac_test_mac_loopback(priv); 411 if (ret) 412 return ret; 413 414 /* These will be loopback results so no need to save them */ 415 stmmac_mmc_read(priv, priv->mmcaddr, &final); 416 417 /* 418 * The number of MMC counters available depends on HW configuration 419 * so we just use this one to validate the feature. I hope there is 420 * not a version without this counter. 421 */ 422 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g) 423 return -EINVAL; 424 425 return 0; 426 } 427 428 static int stmmac_test_eee(struct stmmac_priv *priv) 429 { 430 struct stmmac_extra_stats *initial, *final; 431 int retries = 10; 432 int ret; 433 434 if (!priv->dma_cap.eee || !priv->eee_active) 435 return -EOPNOTSUPP; 436 437 initial = kzalloc(sizeof(*initial), GFP_KERNEL); 438 if (!initial) 439 return -ENOMEM; 440 441 final = kzalloc(sizeof(*final), GFP_KERNEL); 442 if (!final) { 443 ret = -ENOMEM; 444 goto out_free_initial; 445 } 446 447 memcpy(initial, &priv->xstats, sizeof(*initial)); 448 449 ret = stmmac_test_mac_loopback(priv); 450 if (ret) 451 goto out_free_final; 452 453 /* We have no traffic in the line so, sooner or later it will go LPI */ 454 while (--retries) { 455 memcpy(final, &priv->xstats, sizeof(*final)); 456 457 if (final->irq_tx_path_in_lpi_mode_n > 458 initial->irq_tx_path_in_lpi_mode_n) 459 break; 460 msleep(100); 461 } 462 463 if (!retries) { 464 ret = -ETIMEDOUT; 465 goto out_free_final; 466 } 467 468 if (final->irq_tx_path_in_lpi_mode_n <= 469 initial->irq_tx_path_in_lpi_mode_n) { 470 ret = -EINVAL; 471 goto out_free_final; 472 } 473 474 if (final->irq_tx_path_exit_lpi_mode_n <= 475 initial->irq_tx_path_exit_lpi_mode_n) { 476 ret = -EINVAL; 477 goto out_free_final; 478 } 479 480 out_free_final: 481 kfree(final); 482 out_free_initial: 483 kfree(initial); 484 return ret; 485 } 486 487 static int stmmac_filter_check(struct stmmac_priv *priv) 488 { 489 if (!(priv->dev->flags & IFF_PROMISC)) 490 return 0; 491 492 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n"); 493 return -EOPNOTSUPP; 494 } 495 496 static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr) 497 { 498 int mc_offset = 32 - priv->hw->mcast_bits_log2; 499 struct netdev_hw_addr *ha; 500 u32 hash, hash_nr; 501 502 /* First compute the hash for desired addr */ 503 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset; 504 hash_nr = hash >> 5; 505 hash = 1 << (hash & 0x1f); 506 507 /* Now, check if it collides with any existing one */ 508 netdev_for_each_mc_addr(ha, priv->dev) { 509 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset; 510 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash)) 511 return false; 512 } 513 514 /* No collisions, address is good to go */ 515 return true; 516 } 517 518 static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr) 519 { 520 struct netdev_hw_addr *ha; 521 522 /* Check if it collides with any existing one */ 523 netdev_for_each_uc_addr(ha, priv->dev) { 524 if (!memcmp(ha->addr, addr, ETH_ALEN)) 525 return false; 526 } 527 528 /* No collisions, address is good to go */ 529 return true; 530 } 531 532 static int stmmac_test_hfilt(struct stmmac_priv *priv) 533 { 534 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; 535 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 536 struct stmmac_packet_attrs attr = { }; 537 int ret, tries = 256; 538 539 ret = stmmac_filter_check(priv); 540 if (ret) 541 return ret; 542 543 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 544 return -EOPNOTSUPP; 545 546 while (--tries) { 547 /* We only need to check the bd_addr for collisions */ 548 bd_addr[ETH_ALEN - 1] = tries; 549 if (stmmac_hash_check(priv, bd_addr)) 550 break; 551 } 552 553 if (!tries) 554 return -EOPNOTSUPP; 555 556 ret = dev_mc_add(priv->dev, gd_addr); 557 if (ret) 558 return ret; 559 560 attr.dst = gd_addr; 561 562 /* Shall receive packet */ 563 ret = __stmmac_test_loopback(priv, &attr); 564 if (ret) 565 goto cleanup; 566 567 attr.dst = bd_addr; 568 569 /* Shall NOT receive packet */ 570 ret = __stmmac_test_loopback(priv, &attr); 571 ret = ret ? 0 : -EINVAL; 572 573 cleanup: 574 dev_mc_del(priv->dev, gd_addr); 575 return ret; 576 } 577 578 static int stmmac_test_pfilt(struct stmmac_priv *priv) 579 { 580 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77}; 581 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 582 struct stmmac_packet_attrs attr = { }; 583 int ret, tries = 256; 584 585 if (stmmac_filter_check(priv)) 586 return -EOPNOTSUPP; 587 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 588 return -EOPNOTSUPP; 589 590 while (--tries) { 591 /* We only need to check the bd_addr for collisions */ 592 bd_addr[ETH_ALEN - 1] = tries; 593 if (stmmac_perfect_check(priv, bd_addr)) 594 break; 595 } 596 597 if (!tries) 598 return -EOPNOTSUPP; 599 600 ret = dev_uc_add(priv->dev, gd_addr); 601 if (ret) 602 return ret; 603 604 attr.dst = gd_addr; 605 606 /* Shall receive packet */ 607 ret = __stmmac_test_loopback(priv, &attr); 608 if (ret) 609 goto cleanup; 610 611 attr.dst = bd_addr; 612 613 /* Shall NOT receive packet */ 614 ret = __stmmac_test_loopback(priv, &attr); 615 ret = ret ? 0 : -EINVAL; 616 617 cleanup: 618 dev_uc_del(priv->dev, gd_addr); 619 return ret; 620 } 621 622 static int stmmac_test_mcfilt(struct stmmac_priv *priv) 623 { 624 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 625 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 626 struct stmmac_packet_attrs attr = { }; 627 int ret, tries = 256; 628 629 if (stmmac_filter_check(priv)) 630 return -EOPNOTSUPP; 631 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 632 return -EOPNOTSUPP; 633 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 634 return -EOPNOTSUPP; 635 636 while (--tries) { 637 /* We only need to check the mc_addr for collisions */ 638 mc_addr[ETH_ALEN - 1] = tries; 639 if (stmmac_hash_check(priv, mc_addr)) 640 break; 641 } 642 643 if (!tries) 644 return -EOPNOTSUPP; 645 646 ret = dev_uc_add(priv->dev, uc_addr); 647 if (ret) 648 return ret; 649 650 attr.dst = uc_addr; 651 652 /* Shall receive packet */ 653 ret = __stmmac_test_loopback(priv, &attr); 654 if (ret) 655 goto cleanup; 656 657 attr.dst = mc_addr; 658 659 /* Shall NOT receive packet */ 660 ret = __stmmac_test_loopback(priv, &attr); 661 ret = ret ? 0 : -EINVAL; 662 663 cleanup: 664 dev_uc_del(priv->dev, uc_addr); 665 return ret; 666 } 667 668 static int stmmac_test_ucfilt(struct stmmac_priv *priv) 669 { 670 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 671 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 672 struct stmmac_packet_attrs attr = { }; 673 int ret, tries = 256; 674 675 if (stmmac_filter_check(priv)) 676 return -EOPNOTSUPP; 677 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 678 return -EOPNOTSUPP; 679 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 680 return -EOPNOTSUPP; 681 682 while (--tries) { 683 /* We only need to check the uc_addr for collisions */ 684 uc_addr[ETH_ALEN - 1] = tries; 685 if (stmmac_perfect_check(priv, uc_addr)) 686 break; 687 } 688 689 if (!tries) 690 return -EOPNOTSUPP; 691 692 ret = dev_mc_add(priv->dev, mc_addr); 693 if (ret) 694 return ret; 695 696 attr.dst = mc_addr; 697 698 /* Shall receive packet */ 699 ret = __stmmac_test_loopback(priv, &attr); 700 if (ret) 701 goto cleanup; 702 703 attr.dst = uc_addr; 704 705 /* Shall NOT receive packet */ 706 ret = __stmmac_test_loopback(priv, &attr); 707 ret = ret ? 0 : -EINVAL; 708 709 cleanup: 710 dev_mc_del(priv->dev, mc_addr); 711 return ret; 712 } 713 714 static int stmmac_test_flowctrl_validate(struct sk_buff *skb, 715 struct net_device *ndev, 716 struct packet_type *pt, 717 struct net_device *orig_ndev) 718 { 719 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 720 struct ethhdr *ehdr; 721 722 ehdr = (struct ethhdr *)skb_mac_header(skb); 723 if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr)) 724 goto out; 725 if (ehdr->h_proto != htons(ETH_P_PAUSE)) 726 goto out; 727 728 tpriv->ok = true; 729 complete(&tpriv->comp); 730 out: 731 kfree_skb(skb); 732 return 0; 733 } 734 735 static int stmmac_test_flowctrl(struct stmmac_priv *priv) 736 { 737 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01}; 738 struct phy_device *phydev = priv->dev->phydev; 739 u32 rx_cnt = priv->plat->rx_queues_to_use; 740 struct stmmac_test_priv *tpriv; 741 unsigned int pkt_count; 742 int i, ret = 0; 743 744 if (!phydev || (!phydev->pause && !phydev->asym_pause)) 745 return -EOPNOTSUPP; 746 747 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 748 if (!tpriv) 749 return -ENOMEM; 750 751 tpriv->ok = false; 752 init_completion(&tpriv->comp); 753 tpriv->pt.type = htons(ETH_P_PAUSE); 754 tpriv->pt.func = stmmac_test_flowctrl_validate; 755 tpriv->pt.dev = priv->dev; 756 tpriv->pt.af_packet_priv = tpriv; 757 dev_add_pack(&tpriv->pt); 758 759 /* Compute minimum number of packets to make FIFO full */ 760 pkt_count = priv->plat->rx_fifo_size; 761 if (!pkt_count) 762 pkt_count = priv->dma_cap.rx_fifo_size; 763 pkt_count /= 1400; 764 pkt_count *= 2; 765 766 for (i = 0; i < rx_cnt; i++) 767 stmmac_stop_rx(priv, priv->ioaddr, i); 768 769 ret = dev_set_promiscuity(priv->dev, 1); 770 if (ret) 771 goto cleanup; 772 773 ret = dev_mc_add(priv->dev, paddr); 774 if (ret) 775 goto cleanup; 776 777 for (i = 0; i < pkt_count; i++) { 778 struct stmmac_packet_attrs attr = { }; 779 780 attr.dst = priv->dev->dev_addr; 781 attr.dont_wait = true; 782 attr.size = 1400; 783 784 ret = __stmmac_test_loopback(priv, &attr); 785 if (ret) 786 goto cleanup; 787 if (tpriv->ok) 788 break; 789 } 790 791 /* Wait for some time in case RX Watchdog is enabled */ 792 msleep(200); 793 794 for (i = 0; i < rx_cnt; i++) { 795 struct stmmac_channel *ch = &priv->channel[i]; 796 u32 tail; 797 798 tail = priv->rx_queue[i].dma_rx_phy + 799 (DMA_RX_SIZE * sizeof(struct dma_desc)); 800 801 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); 802 stmmac_start_rx(priv, priv->ioaddr, i); 803 804 local_bh_disable(); 805 napi_reschedule(&ch->rx_napi); 806 local_bh_enable(); 807 } 808 809 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 810 ret = tpriv->ok ? 0 : -ETIMEDOUT; 811 812 cleanup: 813 dev_mc_del(priv->dev, paddr); 814 dev_set_promiscuity(priv->dev, -1); 815 dev_remove_pack(&tpriv->pt); 816 kfree(tpriv); 817 return ret; 818 } 819 820 static int stmmac_test_rss(struct stmmac_priv *priv) 821 { 822 struct stmmac_packet_attrs attr = { }; 823 824 if (!priv->dma_cap.rssen || !priv->rss.enable) 825 return -EOPNOTSUPP; 826 827 attr.dst = priv->dev->dev_addr; 828 attr.exp_hash = true; 829 attr.sport = 0x321; 830 attr.dport = 0x123; 831 832 return __stmmac_test_loopback(priv, &attr); 833 } 834 835 static int stmmac_test_vlan_validate(struct sk_buff *skb, 836 struct net_device *ndev, 837 struct packet_type *pt, 838 struct net_device *orig_ndev) 839 { 840 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 841 struct stmmachdr *shdr; 842 struct ethhdr *ehdr; 843 struct udphdr *uhdr; 844 struct iphdr *ihdr; 845 u16 proto; 846 847 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q; 848 849 skb = skb_unshare(skb, GFP_ATOMIC); 850 if (!skb) 851 goto out; 852 853 if (skb_linearize(skb)) 854 goto out; 855 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 856 goto out; 857 if (tpriv->vlan_id) { 858 if (skb->vlan_proto != htons(proto)) 859 goto out; 860 if (skb->vlan_tci != tpriv->vlan_id) { 861 /* Means filter did not work. */ 862 tpriv->ok = false; 863 complete(&tpriv->comp); 864 goto out; 865 } 866 } 867 868 ehdr = (struct ethhdr *)skb_mac_header(skb); 869 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst)) 870 goto out; 871 872 ihdr = ip_hdr(skb); 873 if (tpriv->double_vlan) 874 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 875 if (ihdr->protocol != IPPROTO_UDP) 876 goto out; 877 878 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 879 if (uhdr->dest != htons(tpriv->packet->dport)) 880 goto out; 881 882 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 883 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 884 goto out; 885 886 tpriv->ok = true; 887 complete(&tpriv->comp); 888 889 out: 890 kfree_skb(skb); 891 return 0; 892 } 893 894 static int __stmmac_test_vlanfilt(struct stmmac_priv *priv) 895 { 896 struct stmmac_packet_attrs attr = { }; 897 struct stmmac_test_priv *tpriv; 898 struct sk_buff *skb = NULL; 899 int ret = 0, i; 900 901 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 902 if (!tpriv) 903 return -ENOMEM; 904 905 tpriv->ok = false; 906 init_completion(&tpriv->comp); 907 908 tpriv->pt.type = htons(ETH_P_IP); 909 tpriv->pt.func = stmmac_test_vlan_validate; 910 tpriv->pt.dev = priv->dev; 911 tpriv->pt.af_packet_priv = tpriv; 912 tpriv->packet = &attr; 913 914 /* 915 * As we use HASH filtering, false positives may appear. This is a 916 * specially chosen ID so that adjacent IDs (+4) have different 917 * HASH values. 918 */ 919 tpriv->vlan_id = 0x123; 920 dev_add_pack(&tpriv->pt); 921 922 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 923 if (ret) 924 goto cleanup; 925 926 for (i = 0; i < 4; i++) { 927 attr.vlan = 1; 928 attr.vlan_id_out = tpriv->vlan_id + i; 929 attr.dst = priv->dev->dev_addr; 930 attr.sport = 9; 931 attr.dport = 9; 932 933 skb = stmmac_test_get_udp_skb(priv, &attr); 934 if (!skb) { 935 ret = -ENOMEM; 936 goto vlan_del; 937 } 938 939 ret = dev_direct_xmit(skb, 0); 940 if (ret) 941 goto vlan_del; 942 943 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 944 ret = tpriv->ok ? 0 : -ETIMEDOUT; 945 if (ret && !i) { 946 goto vlan_del; 947 } else if (!ret && i) { 948 ret = -EINVAL; 949 goto vlan_del; 950 } else { 951 ret = 0; 952 } 953 954 tpriv->ok = false; 955 } 956 957 vlan_del: 958 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 959 cleanup: 960 dev_remove_pack(&tpriv->pt); 961 kfree(tpriv); 962 return ret; 963 } 964 965 static int stmmac_test_vlanfilt(struct stmmac_priv *priv) 966 { 967 if (!priv->dma_cap.vlhash) 968 return -EOPNOTSUPP; 969 970 return __stmmac_test_vlanfilt(priv); 971 } 972 973 static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv) 974 { 975 int ret, prev_cap = priv->dma_cap.vlhash; 976 977 if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 978 return -EOPNOTSUPP; 979 980 priv->dma_cap.vlhash = 0; 981 ret = __stmmac_test_vlanfilt(priv); 982 priv->dma_cap.vlhash = prev_cap; 983 984 return ret; 985 } 986 987 static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv) 988 { 989 struct stmmac_packet_attrs attr = { }; 990 struct stmmac_test_priv *tpriv; 991 struct sk_buff *skb = NULL; 992 int ret = 0, i; 993 994 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 995 if (!tpriv) 996 return -ENOMEM; 997 998 tpriv->ok = false; 999 tpriv->double_vlan = true; 1000 init_completion(&tpriv->comp); 1001 1002 tpriv->pt.type = htons(ETH_P_8021Q); 1003 tpriv->pt.func = stmmac_test_vlan_validate; 1004 tpriv->pt.dev = priv->dev; 1005 tpriv->pt.af_packet_priv = tpriv; 1006 tpriv->packet = &attr; 1007 1008 /* 1009 * As we use HASH filtering, false positives may appear. This is a 1010 * specially chosen ID so that adjacent IDs (+4) have different 1011 * HASH values. 1012 */ 1013 tpriv->vlan_id = 0x123; 1014 dev_add_pack(&tpriv->pt); 1015 1016 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 1017 if (ret) 1018 goto cleanup; 1019 1020 for (i = 0; i < 4; i++) { 1021 attr.vlan = 2; 1022 attr.vlan_id_out = tpriv->vlan_id + i; 1023 attr.dst = priv->dev->dev_addr; 1024 attr.sport = 9; 1025 attr.dport = 9; 1026 1027 skb = stmmac_test_get_udp_skb(priv, &attr); 1028 if (!skb) { 1029 ret = -ENOMEM; 1030 goto vlan_del; 1031 } 1032 1033 ret = dev_direct_xmit(skb, 0); 1034 if (ret) 1035 goto vlan_del; 1036 1037 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1038 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1039 if (ret && !i) { 1040 goto vlan_del; 1041 } else if (!ret && i) { 1042 ret = -EINVAL; 1043 goto vlan_del; 1044 } else { 1045 ret = 0; 1046 } 1047 1048 tpriv->ok = false; 1049 } 1050 1051 vlan_del: 1052 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 1053 cleanup: 1054 dev_remove_pack(&tpriv->pt); 1055 kfree(tpriv); 1056 return ret; 1057 } 1058 1059 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv) 1060 { 1061 if (!priv->dma_cap.vlhash) 1062 return -EOPNOTSUPP; 1063 1064 return __stmmac_test_dvlanfilt(priv); 1065 } 1066 1067 static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv) 1068 { 1069 int ret, prev_cap = priv->dma_cap.vlhash; 1070 1071 if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER)) 1072 return -EOPNOTSUPP; 1073 1074 priv->dma_cap.vlhash = 0; 1075 ret = __stmmac_test_dvlanfilt(priv); 1076 priv->dma_cap.vlhash = prev_cap; 1077 1078 return ret; 1079 } 1080 1081 #ifdef CONFIG_NET_CLS_ACT 1082 static int stmmac_test_rxp(struct stmmac_priv *priv) 1083 { 1084 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; 1085 struct tc_cls_u32_offload cls_u32 = { }; 1086 struct stmmac_packet_attrs attr = { }; 1087 struct tc_action **actions, *act; 1088 struct tc_u32_sel *sel; 1089 struct tcf_exts *exts; 1090 int ret, i, nk = 1; 1091 1092 if (!tc_can_offload(priv->dev)) 1093 return -EOPNOTSUPP; 1094 if (!priv->dma_cap.frpsel) 1095 return -EOPNOTSUPP; 1096 1097 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); 1098 if (!sel) 1099 return -ENOMEM; 1100 1101 exts = kzalloc(sizeof(*exts), GFP_KERNEL); 1102 if (!exts) { 1103 ret = -ENOMEM; 1104 goto cleanup_sel; 1105 } 1106 1107 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); 1108 if (!actions) { 1109 ret = -ENOMEM; 1110 goto cleanup_exts; 1111 } 1112 1113 act = kzalloc(nk * sizeof(*act), GFP_KERNEL); 1114 if (!act) { 1115 ret = -ENOMEM; 1116 goto cleanup_actions; 1117 } 1118 1119 cls_u32.command = TC_CLSU32_NEW_KNODE; 1120 cls_u32.common.chain_index = 0; 1121 cls_u32.common.protocol = htons(ETH_P_ALL); 1122 cls_u32.knode.exts = exts; 1123 cls_u32.knode.sel = sel; 1124 cls_u32.knode.handle = 0x123; 1125 1126 exts->nr_actions = nk; 1127 exts->actions = actions; 1128 for (i = 0; i < nk; i++) { 1129 struct tcf_gact *gact = to_gact(&act[i]); 1130 1131 actions[i] = &act[i]; 1132 gact->tcf_action = TC_ACT_SHOT; 1133 } 1134 1135 sel->nkeys = nk; 1136 sel->offshift = 0; 1137 sel->keys[0].off = 6; 1138 sel->keys[0].val = htonl(0xdeadbeef); 1139 sel->keys[0].mask = ~0x0; 1140 1141 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1142 if (ret) 1143 goto cleanup_act; 1144 1145 attr.dst = priv->dev->dev_addr; 1146 attr.src = addr; 1147 1148 ret = __stmmac_test_loopback(priv, &attr); 1149 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */ 1150 1151 cls_u32.command = TC_CLSU32_DELETE_KNODE; 1152 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1153 1154 cleanup_act: 1155 kfree(act); 1156 cleanup_actions: 1157 kfree(actions); 1158 cleanup_exts: 1159 kfree(exts); 1160 cleanup_sel: 1161 kfree(sel); 1162 return ret; 1163 } 1164 #else 1165 static int stmmac_test_rxp(struct stmmac_priv *priv) 1166 { 1167 return -EOPNOTSUPP; 1168 } 1169 #endif 1170 1171 static int stmmac_test_desc_sai(struct stmmac_priv *priv) 1172 { 1173 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1174 struct stmmac_packet_attrs attr = { }; 1175 int ret; 1176 1177 if (!priv->dma_cap.vlins) 1178 return -EOPNOTSUPP; 1179 1180 attr.remove_sa = true; 1181 attr.sarc = true; 1182 attr.src = src; 1183 attr.dst = priv->dev->dev_addr; 1184 1185 priv->sarc_type = 0x1; 1186 1187 ret = __stmmac_test_loopback(priv, &attr); 1188 1189 priv->sarc_type = 0x0; 1190 return ret; 1191 } 1192 1193 static int stmmac_test_desc_sar(struct stmmac_priv *priv) 1194 { 1195 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1196 struct stmmac_packet_attrs attr = { }; 1197 int ret; 1198 1199 if (!priv->dma_cap.vlins) 1200 return -EOPNOTSUPP; 1201 1202 attr.sarc = true; 1203 attr.src = src; 1204 attr.dst = priv->dev->dev_addr; 1205 1206 priv->sarc_type = 0x2; 1207 1208 ret = __stmmac_test_loopback(priv, &attr); 1209 1210 priv->sarc_type = 0x0; 1211 return ret; 1212 } 1213 1214 static int stmmac_test_reg_sai(struct stmmac_priv *priv) 1215 { 1216 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1217 struct stmmac_packet_attrs attr = { }; 1218 int ret; 1219 1220 if (!priv->dma_cap.vlins) 1221 return -EOPNOTSUPP; 1222 1223 attr.remove_sa = true; 1224 attr.sarc = true; 1225 attr.src = src; 1226 attr.dst = priv->dev->dev_addr; 1227 1228 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2)) 1229 return -EOPNOTSUPP; 1230 1231 ret = __stmmac_test_loopback(priv, &attr); 1232 1233 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1234 return ret; 1235 } 1236 1237 static int stmmac_test_reg_sar(struct stmmac_priv *priv) 1238 { 1239 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1240 struct stmmac_packet_attrs attr = { }; 1241 int ret; 1242 1243 if (!priv->dma_cap.vlins) 1244 return -EOPNOTSUPP; 1245 1246 attr.sarc = true; 1247 attr.src = src; 1248 attr.dst = priv->dev->dev_addr; 1249 1250 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3)) 1251 return -EOPNOTSUPP; 1252 1253 ret = __stmmac_test_loopback(priv, &attr); 1254 1255 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1256 return ret; 1257 } 1258 1259 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan) 1260 { 1261 struct stmmac_packet_attrs attr = { }; 1262 struct stmmac_test_priv *tpriv; 1263 struct sk_buff *skb = NULL; 1264 int ret = 0; 1265 u16 proto; 1266 1267 if (!priv->dma_cap.vlins) 1268 return -EOPNOTSUPP; 1269 1270 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1271 if (!tpriv) 1272 return -ENOMEM; 1273 1274 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q; 1275 1276 tpriv->ok = false; 1277 tpriv->double_vlan = svlan; 1278 init_completion(&tpriv->comp); 1279 1280 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP); 1281 tpriv->pt.func = stmmac_test_vlan_validate; 1282 tpriv->pt.dev = priv->dev; 1283 tpriv->pt.af_packet_priv = tpriv; 1284 tpriv->packet = &attr; 1285 tpriv->vlan_id = 0x123; 1286 dev_add_pack(&tpriv->pt); 1287 1288 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id); 1289 if (ret) 1290 goto cleanup; 1291 1292 attr.dst = priv->dev->dev_addr; 1293 1294 skb = stmmac_test_get_udp_skb(priv, &attr); 1295 if (!skb) { 1296 ret = -ENOMEM; 1297 goto vlan_del; 1298 } 1299 1300 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); 1301 skb->protocol = htons(proto); 1302 1303 ret = dev_direct_xmit(skb, 0); 1304 if (ret) 1305 goto vlan_del; 1306 1307 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1308 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1309 1310 vlan_del: 1311 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id); 1312 cleanup: 1313 dev_remove_pack(&tpriv->pt); 1314 kfree(tpriv); 1315 return ret; 1316 } 1317 1318 static int stmmac_test_vlanoff(struct stmmac_priv *priv) 1319 { 1320 return stmmac_test_vlanoff_common(priv, false); 1321 } 1322 1323 static int stmmac_test_svlanoff(struct stmmac_priv *priv) 1324 { 1325 if (!priv->dma_cap.dvlan) 1326 return -EOPNOTSUPP; 1327 return stmmac_test_vlanoff_common(priv, true); 1328 } 1329 1330 #ifdef CONFIG_NET_CLS_ACT 1331 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1332 u32 dst_mask, u32 src_mask) 1333 { 1334 struct flow_dissector_key_ipv4_addrs key, mask; 1335 unsigned long dummy_cookie = 0xdeadbeef; 1336 struct stmmac_packet_attrs attr = { }; 1337 struct flow_dissector *dissector; 1338 struct flow_cls_offload *cls; 1339 int ret, old_enable = 0; 1340 struct flow_rule *rule; 1341 1342 if (!tc_can_offload(priv->dev)) 1343 return -EOPNOTSUPP; 1344 if (!priv->dma_cap.l3l4fnum) 1345 return -EOPNOTSUPP; 1346 if (priv->rss.enable) { 1347 old_enable = priv->rss.enable; 1348 priv->rss.enable = false; 1349 stmmac_rss_configure(priv, priv->hw, NULL, 1350 priv->plat->rx_queues_to_use); 1351 } 1352 1353 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1354 if (!dissector) { 1355 ret = -ENOMEM; 1356 goto cleanup_rss; 1357 } 1358 1359 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); 1360 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; 1361 1362 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1363 if (!cls) { 1364 ret = -ENOMEM; 1365 goto cleanup_dissector; 1366 } 1367 1368 cls->common.chain_index = 0; 1369 cls->command = FLOW_CLS_REPLACE; 1370 cls->cookie = dummy_cookie; 1371 1372 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1373 if (!rule) { 1374 ret = -ENOMEM; 1375 goto cleanup_cls; 1376 } 1377 1378 rule->match.dissector = dissector; 1379 rule->match.key = (void *)&key; 1380 rule->match.mask = (void *)&mask; 1381 1382 key.src = htonl(src); 1383 key.dst = htonl(dst); 1384 mask.src = src_mask; 1385 mask.dst = dst_mask; 1386 1387 cls->rule = rule; 1388 1389 rule->action.entries[0].id = FLOW_ACTION_DROP; 1390 rule->action.num_entries = 1; 1391 1392 attr.dst = priv->dev->dev_addr; 1393 attr.ip_dst = dst; 1394 attr.ip_src = src; 1395 1396 /* Shall receive packet */ 1397 ret = __stmmac_test_loopback(priv, &attr); 1398 if (ret) 1399 goto cleanup_rule; 1400 1401 ret = stmmac_tc_setup_cls(priv, priv, cls); 1402 if (ret) 1403 goto cleanup_rule; 1404 1405 /* Shall NOT receive packet */ 1406 ret = __stmmac_test_loopback(priv, &attr); 1407 ret = ret ? 0 : -EINVAL; 1408 1409 cls->command = FLOW_CLS_DESTROY; 1410 stmmac_tc_setup_cls(priv, priv, cls); 1411 cleanup_rule: 1412 kfree(rule); 1413 cleanup_cls: 1414 kfree(cls); 1415 cleanup_dissector: 1416 kfree(dissector); 1417 cleanup_rss: 1418 if (old_enable) { 1419 priv->rss.enable = old_enable; 1420 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1421 priv->plat->rx_queues_to_use); 1422 } 1423 1424 return ret; 1425 } 1426 #else 1427 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1428 u32 dst_mask, u32 src_mask) 1429 { 1430 return -EOPNOTSUPP; 1431 } 1432 #endif 1433 1434 static int stmmac_test_l3filt_da(struct stmmac_priv *priv) 1435 { 1436 u32 addr = 0x10203040; 1437 1438 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0); 1439 } 1440 1441 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv) 1442 { 1443 u32 addr = 0x10203040; 1444 1445 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0); 1446 } 1447 1448 #ifdef CONFIG_NET_CLS_ACT 1449 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1450 u32 dst_mask, u32 src_mask, bool udp) 1451 { 1452 struct { 1453 struct flow_dissector_key_basic bkey; 1454 struct flow_dissector_key_ports key; 1455 } __aligned(BITS_PER_LONG / 8) keys; 1456 struct { 1457 struct flow_dissector_key_basic bmask; 1458 struct flow_dissector_key_ports mask; 1459 } __aligned(BITS_PER_LONG / 8) masks; 1460 unsigned long dummy_cookie = 0xdeadbeef; 1461 struct stmmac_packet_attrs attr = { }; 1462 struct flow_dissector *dissector; 1463 struct flow_cls_offload *cls; 1464 int ret, old_enable = 0; 1465 struct flow_rule *rule; 1466 1467 if (!tc_can_offload(priv->dev)) 1468 return -EOPNOTSUPP; 1469 if (!priv->dma_cap.l3l4fnum) 1470 return -EOPNOTSUPP; 1471 if (priv->rss.enable) { 1472 old_enable = priv->rss.enable; 1473 priv->rss.enable = false; 1474 stmmac_rss_configure(priv, priv->hw, NULL, 1475 priv->plat->rx_queues_to_use); 1476 } 1477 1478 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1479 if (!dissector) { 1480 ret = -ENOMEM; 1481 goto cleanup_rss; 1482 } 1483 1484 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); 1485 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); 1486 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; 1487 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); 1488 1489 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1490 if (!cls) { 1491 ret = -ENOMEM; 1492 goto cleanup_dissector; 1493 } 1494 1495 cls->common.chain_index = 0; 1496 cls->command = FLOW_CLS_REPLACE; 1497 cls->cookie = dummy_cookie; 1498 1499 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1500 if (!rule) { 1501 ret = -ENOMEM; 1502 goto cleanup_cls; 1503 } 1504 1505 rule->match.dissector = dissector; 1506 rule->match.key = (void *)&keys; 1507 rule->match.mask = (void *)&masks; 1508 1509 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP; 1510 keys.key.src = htons(src); 1511 keys.key.dst = htons(dst); 1512 masks.mask.src = src_mask; 1513 masks.mask.dst = dst_mask; 1514 1515 cls->rule = rule; 1516 1517 rule->action.entries[0].id = FLOW_ACTION_DROP; 1518 rule->action.num_entries = 1; 1519 1520 attr.dst = priv->dev->dev_addr; 1521 attr.tcp = !udp; 1522 attr.sport = src; 1523 attr.dport = dst; 1524 attr.ip_dst = 0; 1525 1526 /* Shall receive packet */ 1527 ret = __stmmac_test_loopback(priv, &attr); 1528 if (ret) 1529 goto cleanup_rule; 1530 1531 ret = stmmac_tc_setup_cls(priv, priv, cls); 1532 if (ret) 1533 goto cleanup_rule; 1534 1535 /* Shall NOT receive packet */ 1536 ret = __stmmac_test_loopback(priv, &attr); 1537 ret = ret ? 0 : -EINVAL; 1538 1539 cls->command = FLOW_CLS_DESTROY; 1540 stmmac_tc_setup_cls(priv, priv, cls); 1541 cleanup_rule: 1542 kfree(rule); 1543 cleanup_cls: 1544 kfree(cls); 1545 cleanup_dissector: 1546 kfree(dissector); 1547 cleanup_rss: 1548 if (old_enable) { 1549 priv->rss.enable = old_enable; 1550 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1551 priv->plat->rx_queues_to_use); 1552 } 1553 1554 return ret; 1555 } 1556 #else 1557 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1558 u32 dst_mask, u32 src_mask, bool udp) 1559 { 1560 return -EOPNOTSUPP; 1561 } 1562 #endif 1563 1564 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv) 1565 { 1566 u16 dummy_port = 0x123; 1567 1568 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false); 1569 } 1570 1571 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv) 1572 { 1573 u16 dummy_port = 0x123; 1574 1575 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false); 1576 } 1577 1578 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv) 1579 { 1580 u16 dummy_port = 0x123; 1581 1582 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true); 1583 } 1584 1585 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv) 1586 { 1587 u16 dummy_port = 0x123; 1588 1589 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true); 1590 } 1591 1592 static int stmmac_test_arp_validate(struct sk_buff *skb, 1593 struct net_device *ndev, 1594 struct packet_type *pt, 1595 struct net_device *orig_ndev) 1596 { 1597 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 1598 struct ethhdr *ehdr; 1599 struct arphdr *ahdr; 1600 1601 ehdr = (struct ethhdr *)skb_mac_header(skb); 1602 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src)) 1603 goto out; 1604 1605 ahdr = arp_hdr(skb); 1606 if (ahdr->ar_op != htons(ARPOP_REPLY)) 1607 goto out; 1608 1609 tpriv->ok = true; 1610 complete(&tpriv->comp); 1611 out: 1612 kfree_skb(skb); 1613 return 0; 1614 } 1615 1616 static int stmmac_test_arpoffload(struct stmmac_priv *priv) 1617 { 1618 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}; 1619 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1620 struct stmmac_packet_attrs attr = { }; 1621 struct stmmac_test_priv *tpriv; 1622 struct sk_buff *skb = NULL; 1623 u32 ip_addr = 0xdeadcafe; 1624 u32 ip_src = 0xdeadbeef; 1625 int ret; 1626 1627 if (!priv->dma_cap.arpoffsel) 1628 return -EOPNOTSUPP; 1629 1630 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1631 if (!tpriv) 1632 return -ENOMEM; 1633 1634 tpriv->ok = false; 1635 init_completion(&tpriv->comp); 1636 1637 tpriv->pt.type = htons(ETH_P_ARP); 1638 tpriv->pt.func = stmmac_test_arp_validate; 1639 tpriv->pt.dev = priv->dev; 1640 tpriv->pt.af_packet_priv = tpriv; 1641 tpriv->packet = &attr; 1642 dev_add_pack(&tpriv->pt); 1643 1644 attr.src = src; 1645 attr.ip_src = ip_src; 1646 attr.dst = dst; 1647 attr.ip_dst = ip_addr; 1648 1649 skb = stmmac_test_get_arp_skb(priv, &attr); 1650 if (!skb) { 1651 ret = -ENOMEM; 1652 goto cleanup; 1653 } 1654 1655 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); 1656 if (ret) 1657 goto cleanup; 1658 1659 ret = dev_set_promiscuity(priv->dev, 1); 1660 if (ret) 1661 goto cleanup; 1662 1663 ret = dev_direct_xmit(skb, 0); 1664 if (ret) 1665 goto cleanup_promisc; 1666 1667 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1668 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1669 1670 cleanup_promisc: 1671 dev_set_promiscuity(priv->dev, -1); 1672 cleanup: 1673 stmmac_set_arp_offload(priv, priv->hw, false, 0x0); 1674 dev_remove_pack(&tpriv->pt); 1675 kfree(tpriv); 1676 return ret; 1677 } 1678 1679 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) 1680 { 1681 struct stmmac_packet_attrs attr = { }; 1682 int size = priv->dma_buf_sz; 1683 1684 attr.dst = priv->dev->dev_addr; 1685 attr.max_size = size - ETH_FCS_LEN; 1686 attr.queue_mapping = queue; 1687 1688 return __stmmac_test_loopback(priv, &attr); 1689 } 1690 1691 static int stmmac_test_jumbo(struct stmmac_priv *priv) 1692 { 1693 return __stmmac_test_jumbo(priv, 0); 1694 } 1695 1696 static int stmmac_test_mjumbo(struct stmmac_priv *priv) 1697 { 1698 u32 chan, tx_cnt = priv->plat->tx_queues_to_use; 1699 int ret; 1700 1701 if (tx_cnt <= 1) 1702 return -EOPNOTSUPP; 1703 1704 for (chan = 0; chan < tx_cnt; chan++) { 1705 ret = __stmmac_test_jumbo(priv, chan); 1706 if (ret) 1707 return ret; 1708 } 1709 1710 return 0; 1711 } 1712 1713 static int stmmac_test_sph(struct stmmac_priv *priv) 1714 { 1715 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n; 1716 struct stmmac_packet_attrs attr = { }; 1717 int ret; 1718 1719 if (!priv->sph) 1720 return -EOPNOTSUPP; 1721 1722 /* Check for UDP first */ 1723 attr.dst = priv->dev->dev_addr; 1724 attr.tcp = false; 1725 1726 ret = __stmmac_test_loopback(priv, &attr); 1727 if (ret) 1728 return ret; 1729 1730 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1731 if (cnt_end <= cnt_start) 1732 return -EINVAL; 1733 1734 /* Check for TCP now */ 1735 cnt_start = cnt_end; 1736 1737 attr.dst = priv->dev->dev_addr; 1738 attr.tcp = true; 1739 1740 ret = __stmmac_test_loopback(priv, &attr); 1741 if (ret) 1742 return ret; 1743 1744 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1745 if (cnt_end <= cnt_start) 1746 return -EINVAL; 1747 1748 return 0; 1749 } 1750 1751 static int stmmac_test_tbs(struct stmmac_priv *priv) 1752 { 1753 #define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/ 1754 struct stmmac_packet_attrs attr = { }; 1755 struct tc_etf_qopt_offload qopt; 1756 u64 start_time, curr_time = 0; 1757 unsigned long flags; 1758 int ret, i; 1759 1760 if (!priv->hwts_tx_en) 1761 return -EOPNOTSUPP; 1762 1763 /* Find first TBS enabled Queue, if any */ 1764 for (i = 0; i < priv->plat->tx_queues_to_use; i++) 1765 if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL) 1766 break; 1767 1768 if (i >= priv->plat->tx_queues_to_use) 1769 return -EOPNOTSUPP; 1770 1771 qopt.enable = true; 1772 qopt.queue = i; 1773 1774 ret = stmmac_tc_setup_etf(priv, priv, &qopt); 1775 if (ret) 1776 return ret; 1777 1778 spin_lock_irqsave(&priv->ptp_lock, flags); 1779 stmmac_get_systime(priv, priv->ptpaddr, &curr_time); 1780 spin_unlock_irqrestore(&priv->ptp_lock, flags); 1781 1782 if (!curr_time) { 1783 ret = -EOPNOTSUPP; 1784 goto fail_disable; 1785 } 1786 1787 start_time = curr_time; 1788 curr_time += STMMAC_TBS_LT_OFFSET; 1789 1790 attr.dst = priv->dev->dev_addr; 1791 attr.timestamp = curr_time; 1792 attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET); 1793 attr.queue_mapping = i; 1794 1795 ret = __stmmac_test_loopback(priv, &attr); 1796 if (ret) 1797 goto fail_disable; 1798 1799 /* Check if expected time has elapsed */ 1800 spin_lock_irqsave(&priv->ptp_lock, flags); 1801 stmmac_get_systime(priv, priv->ptpaddr, &curr_time); 1802 spin_unlock_irqrestore(&priv->ptp_lock, flags); 1803 1804 if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET) 1805 ret = -EINVAL; 1806 1807 fail_disable: 1808 qopt.enable = false; 1809 stmmac_tc_setup_etf(priv, priv, &qopt); 1810 return ret; 1811 } 1812 1813 #define STMMAC_LOOPBACK_NONE 0 1814 #define STMMAC_LOOPBACK_MAC 1 1815 #define STMMAC_LOOPBACK_PHY 2 1816 1817 static const struct stmmac_test { 1818 char name[ETH_GSTRING_LEN]; 1819 int lb; 1820 int (*fn)(struct stmmac_priv *priv); 1821 } stmmac_selftests[] = { 1822 { 1823 .name = "MAC Loopback ", 1824 .lb = STMMAC_LOOPBACK_MAC, 1825 .fn = stmmac_test_mac_loopback, 1826 }, { 1827 .name = "PHY Loopback ", 1828 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */ 1829 .fn = stmmac_test_phy_loopback, 1830 }, { 1831 .name = "MMC Counters ", 1832 .lb = STMMAC_LOOPBACK_PHY, 1833 .fn = stmmac_test_mmc, 1834 }, { 1835 .name = "EEE ", 1836 .lb = STMMAC_LOOPBACK_PHY, 1837 .fn = stmmac_test_eee, 1838 }, { 1839 .name = "Hash Filter MC ", 1840 .lb = STMMAC_LOOPBACK_PHY, 1841 .fn = stmmac_test_hfilt, 1842 }, { 1843 .name = "Perfect Filter UC ", 1844 .lb = STMMAC_LOOPBACK_PHY, 1845 .fn = stmmac_test_pfilt, 1846 }, { 1847 .name = "MC Filter ", 1848 .lb = STMMAC_LOOPBACK_PHY, 1849 .fn = stmmac_test_mcfilt, 1850 }, { 1851 .name = "UC Filter ", 1852 .lb = STMMAC_LOOPBACK_PHY, 1853 .fn = stmmac_test_ucfilt, 1854 }, { 1855 .name = "Flow Control ", 1856 .lb = STMMAC_LOOPBACK_PHY, 1857 .fn = stmmac_test_flowctrl, 1858 }, { 1859 .name = "RSS ", 1860 .lb = STMMAC_LOOPBACK_PHY, 1861 .fn = stmmac_test_rss, 1862 }, { 1863 .name = "VLAN Filtering ", 1864 .lb = STMMAC_LOOPBACK_PHY, 1865 .fn = stmmac_test_vlanfilt, 1866 }, { 1867 .name = "VLAN Filtering (perf) ", 1868 .lb = STMMAC_LOOPBACK_PHY, 1869 .fn = stmmac_test_vlanfilt_perfect, 1870 }, { 1871 .name = "Double VLAN Filter ", 1872 .lb = STMMAC_LOOPBACK_PHY, 1873 .fn = stmmac_test_dvlanfilt, 1874 }, { 1875 .name = "Double VLAN Filter (perf) ", 1876 .lb = STMMAC_LOOPBACK_PHY, 1877 .fn = stmmac_test_dvlanfilt_perfect, 1878 }, { 1879 .name = "Flexible RX Parser ", 1880 .lb = STMMAC_LOOPBACK_PHY, 1881 .fn = stmmac_test_rxp, 1882 }, { 1883 .name = "SA Insertion (desc) ", 1884 .lb = STMMAC_LOOPBACK_PHY, 1885 .fn = stmmac_test_desc_sai, 1886 }, { 1887 .name = "SA Replacement (desc) ", 1888 .lb = STMMAC_LOOPBACK_PHY, 1889 .fn = stmmac_test_desc_sar, 1890 }, { 1891 .name = "SA Insertion (reg) ", 1892 .lb = STMMAC_LOOPBACK_PHY, 1893 .fn = stmmac_test_reg_sai, 1894 }, { 1895 .name = "SA Replacement (reg) ", 1896 .lb = STMMAC_LOOPBACK_PHY, 1897 .fn = stmmac_test_reg_sar, 1898 }, { 1899 .name = "VLAN TX Insertion ", 1900 .lb = STMMAC_LOOPBACK_PHY, 1901 .fn = stmmac_test_vlanoff, 1902 }, { 1903 .name = "SVLAN TX Insertion ", 1904 .lb = STMMAC_LOOPBACK_PHY, 1905 .fn = stmmac_test_svlanoff, 1906 }, { 1907 .name = "L3 DA Filtering ", 1908 .lb = STMMAC_LOOPBACK_PHY, 1909 .fn = stmmac_test_l3filt_da, 1910 }, { 1911 .name = "L3 SA Filtering ", 1912 .lb = STMMAC_LOOPBACK_PHY, 1913 .fn = stmmac_test_l3filt_sa, 1914 }, { 1915 .name = "L4 DA TCP Filtering ", 1916 .lb = STMMAC_LOOPBACK_PHY, 1917 .fn = stmmac_test_l4filt_da_tcp, 1918 }, { 1919 .name = "L4 SA TCP Filtering ", 1920 .lb = STMMAC_LOOPBACK_PHY, 1921 .fn = stmmac_test_l4filt_sa_tcp, 1922 }, { 1923 .name = "L4 DA UDP Filtering ", 1924 .lb = STMMAC_LOOPBACK_PHY, 1925 .fn = stmmac_test_l4filt_da_udp, 1926 }, { 1927 .name = "L4 SA UDP Filtering ", 1928 .lb = STMMAC_LOOPBACK_PHY, 1929 .fn = stmmac_test_l4filt_sa_udp, 1930 }, { 1931 .name = "ARP Offload ", 1932 .lb = STMMAC_LOOPBACK_PHY, 1933 .fn = stmmac_test_arpoffload, 1934 }, { 1935 .name = "Jumbo Frame ", 1936 .lb = STMMAC_LOOPBACK_PHY, 1937 .fn = stmmac_test_jumbo, 1938 }, { 1939 .name = "Multichannel Jumbo ", 1940 .lb = STMMAC_LOOPBACK_PHY, 1941 .fn = stmmac_test_mjumbo, 1942 }, { 1943 .name = "Split Header ", 1944 .lb = STMMAC_LOOPBACK_PHY, 1945 .fn = stmmac_test_sph, 1946 }, { 1947 .name = "TBS (ETF Scheduler) ", 1948 .lb = STMMAC_LOOPBACK_PHY, 1949 .fn = stmmac_test_tbs, 1950 }, 1951 }; 1952 1953 void stmmac_selftest_run(struct net_device *dev, 1954 struct ethtool_test *etest, u64 *buf) 1955 { 1956 struct stmmac_priv *priv = netdev_priv(dev); 1957 int count = stmmac_selftest_get_count(priv); 1958 int i, ret; 1959 1960 memset(buf, 0, sizeof(*buf) * count); 1961 stmmac_test_next_id = 0; 1962 1963 if (etest->flags != ETH_TEST_FL_OFFLINE) { 1964 netdev_err(priv->dev, "Only offline tests are supported\n"); 1965 etest->flags |= ETH_TEST_FL_FAILED; 1966 return; 1967 } else if (!netif_carrier_ok(dev)) { 1968 netdev_err(priv->dev, "You need valid Link to execute tests\n"); 1969 etest->flags |= ETH_TEST_FL_FAILED; 1970 return; 1971 } 1972 1973 /* Wait for queues drain */ 1974 msleep(200); 1975 1976 for (i = 0; i < count; i++) { 1977 ret = 0; 1978 1979 switch (stmmac_selftests[i].lb) { 1980 case STMMAC_LOOPBACK_PHY: 1981 ret = -EOPNOTSUPP; 1982 if (dev->phydev) 1983 ret = phy_loopback(dev->phydev, true); 1984 if (!ret) 1985 break; 1986 /* Fallthrough */ 1987 case STMMAC_LOOPBACK_MAC: 1988 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); 1989 break; 1990 case STMMAC_LOOPBACK_NONE: 1991 break; 1992 default: 1993 ret = -EOPNOTSUPP; 1994 break; 1995 } 1996 1997 /* 1998 * First tests will always be MAC / PHY loobpack. If any of 1999 * them is not supported we abort earlier. 2000 */ 2001 if (ret) { 2002 netdev_err(priv->dev, "Loopback is not supported\n"); 2003 etest->flags |= ETH_TEST_FL_FAILED; 2004 break; 2005 } 2006 2007 ret = stmmac_selftests[i].fn(priv); 2008 if (ret && (ret != -EOPNOTSUPP)) 2009 etest->flags |= ETH_TEST_FL_FAILED; 2010 buf[i] = ret; 2011 2012 switch (stmmac_selftests[i].lb) { 2013 case STMMAC_LOOPBACK_PHY: 2014 ret = -EOPNOTSUPP; 2015 if (dev->phydev) 2016 ret = phy_loopback(dev->phydev, false); 2017 if (!ret) 2018 break; 2019 /* Fallthrough */ 2020 case STMMAC_LOOPBACK_MAC: 2021 stmmac_set_mac_loopback(priv, priv->ioaddr, false); 2022 break; 2023 default: 2024 break; 2025 } 2026 } 2027 } 2028 2029 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data) 2030 { 2031 u8 *p = data; 2032 int i; 2033 2034 for (i = 0; i < stmmac_selftest_get_count(priv); i++) { 2035 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1, 2036 stmmac_selftests[i].name); 2037 p += ETH_GSTRING_LEN; 2038 } 2039 } 2040 2041 int stmmac_selftest_get_count(struct stmmac_priv *priv) 2042 { 2043 return ARRAY_SIZE(stmmac_selftests); 2044 } 2045