1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates. 4 * stmmac Selftests Support 5 * 6 * Author: Jose Abreu <joabreu@synopsys.com> 7 */ 8 9 #include <linux/bitrev.h> 10 #include <linux/completion.h> 11 #include <linux/crc32.h> 12 #include <linux/ethtool.h> 13 #include <linux/ip.h> 14 #include <linux/phy.h> 15 #include <linux/udp.h> 16 #include <net/pkt_cls.h> 17 #include <net/tcp.h> 18 #include <net/udp.h> 19 #include <net/tc_act/tc_gact.h> 20 #include "stmmac.h" 21 22 struct stmmachdr { 23 __be32 version; 24 __be64 magic; 25 u8 id; 26 } __packed; 27 28 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ 29 sizeof(struct stmmachdr)) 30 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL 31 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200) 32 33 struct stmmac_packet_attrs { 34 int vlan; 35 int vlan_id_in; 36 int vlan_id_out; 37 unsigned char *src; 38 unsigned char *dst; 39 u32 ip_src; 40 u32 ip_dst; 41 int tcp; 42 int sport; 43 int dport; 44 u32 exp_hash; 45 int dont_wait; 46 int timeout; 47 int size; 48 int max_size; 49 int remove_sa; 50 u8 id; 51 int sarc; 52 u16 queue_mapping; 53 }; 54 55 static u8 stmmac_test_next_id; 56 57 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, 58 struct stmmac_packet_attrs *attr) 59 { 60 struct sk_buff *skb = NULL; 61 struct udphdr *uhdr = NULL; 62 struct tcphdr *thdr = NULL; 63 struct stmmachdr *shdr; 64 struct ethhdr *ehdr; 65 struct iphdr *ihdr; 66 int iplen, size; 67 68 size = attr->size + STMMAC_TEST_PKT_SIZE; 69 if (attr->vlan) { 70 size += 4; 71 if (attr->vlan > 1) 72 size += 4; 73 } 74 75 if (attr->tcp) 76 size += sizeof(struct tcphdr); 77 else 78 size += sizeof(struct udphdr); 79 80 if (attr->max_size && (attr->max_size > size)) 81 size = attr->max_size; 82 83 skb = netdev_alloc_skb(priv->dev, size); 84 if (!skb) 85 return NULL; 86 87 prefetchw(skb->data); 88 89 if (attr->vlan > 1) 90 ehdr = skb_push(skb, ETH_HLEN + 8); 91 else if (attr->vlan) 92 ehdr = skb_push(skb, ETH_HLEN + 4); 93 else if (attr->remove_sa) 94 ehdr = skb_push(skb, ETH_HLEN - 6); 95 else 96 ehdr = skb_push(skb, ETH_HLEN); 97 skb_reset_mac_header(skb); 98 99 skb_set_network_header(skb, skb->len); 100 ihdr = skb_put(skb, sizeof(*ihdr)); 101 102 skb_set_transport_header(skb, skb->len); 103 if (attr->tcp) 104 thdr = skb_put(skb, sizeof(*thdr)); 105 else 106 uhdr = skb_put(skb, sizeof(*uhdr)); 107 108 if (!attr->remove_sa) 109 eth_zero_addr(ehdr->h_source); 110 eth_zero_addr(ehdr->h_dest); 111 if (attr->src && !attr->remove_sa) 112 ether_addr_copy(ehdr->h_source, attr->src); 113 if (attr->dst) 114 ether_addr_copy(ehdr->h_dest, attr->dst); 115 116 if (!attr->remove_sa) { 117 ehdr->h_proto = htons(ETH_P_IP); 118 } else { 119 __be16 *ptr = (__be16 *)ehdr; 120 121 /* HACK */ 122 ptr[3] = htons(ETH_P_IP); 123 } 124 125 if (attr->vlan) { 126 __be16 *tag, *proto; 127 128 if (!attr->remove_sa) { 129 tag = (void *)ehdr + ETH_HLEN; 130 proto = (void *)ehdr + (2 * ETH_ALEN); 131 } else { 132 tag = (void *)ehdr + ETH_HLEN - 6; 133 proto = (void *)ehdr + ETH_ALEN; 134 } 135 136 proto[0] = htons(ETH_P_8021Q); 137 tag[0] = htons(attr->vlan_id_out); 138 tag[1] = htons(ETH_P_IP); 139 if (attr->vlan > 1) { 140 proto[0] = htons(ETH_P_8021AD); 141 tag[1] = htons(ETH_P_8021Q); 142 tag[2] = htons(attr->vlan_id_in); 143 tag[3] = htons(ETH_P_IP); 144 } 145 } 146 147 if (attr->tcp) { 148 thdr->source = htons(attr->sport); 149 thdr->dest = htons(attr->dport); 150 thdr->doff = sizeof(struct tcphdr) / 4; 151 thdr->check = 0; 152 } else { 153 uhdr->source = htons(attr->sport); 154 uhdr->dest = htons(attr->dport); 155 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); 156 if (attr->max_size) 157 uhdr->len = htons(attr->max_size - 158 (sizeof(*ihdr) + sizeof(*ehdr))); 159 uhdr->check = 0; 160 } 161 162 ihdr->ihl = 5; 163 ihdr->ttl = 32; 164 ihdr->version = 4; 165 if (attr->tcp) 166 ihdr->protocol = IPPROTO_TCP; 167 else 168 ihdr->protocol = IPPROTO_UDP; 169 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size; 170 if (attr->tcp) 171 iplen += sizeof(*thdr); 172 else 173 iplen += sizeof(*uhdr); 174 175 if (attr->max_size) 176 iplen = attr->max_size - sizeof(*ehdr); 177 178 ihdr->tot_len = htons(iplen); 179 ihdr->frag_off = 0; 180 ihdr->saddr = htonl(attr->ip_src); 181 ihdr->daddr = htonl(attr->ip_dst); 182 ihdr->tos = 0; 183 ihdr->id = 0; 184 ip_send_check(ihdr); 185 186 shdr = skb_put(skb, sizeof(*shdr)); 187 shdr->version = 0; 188 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC); 189 attr->id = stmmac_test_next_id; 190 shdr->id = stmmac_test_next_id++; 191 192 if (attr->size) 193 skb_put(skb, attr->size); 194 if (attr->max_size && (attr->max_size > skb->len)) 195 skb_put(skb, attr->max_size - skb->len); 196 197 skb->csum = 0; 198 skb->ip_summed = CHECKSUM_PARTIAL; 199 if (attr->tcp) { 200 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0); 201 skb->csum_start = skb_transport_header(skb) - skb->head; 202 skb->csum_offset = offsetof(struct tcphdr, check); 203 } else { 204 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr); 205 } 206 207 skb->protocol = htons(ETH_P_IP); 208 skb->pkt_type = PACKET_HOST; 209 skb->dev = priv->dev; 210 211 return skb; 212 } 213 214 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv, 215 struct stmmac_packet_attrs *attr) 216 { 217 __be32 ip_src = htonl(attr->ip_src); 218 __be32 ip_dst = htonl(attr->ip_dst); 219 struct sk_buff *skb = NULL; 220 221 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src, 222 NULL, attr->src, attr->dst); 223 if (!skb) 224 return NULL; 225 226 skb->pkt_type = PACKET_HOST; 227 skb->dev = priv->dev; 228 229 return skb; 230 } 231 232 struct stmmac_test_priv { 233 struct stmmac_packet_attrs *packet; 234 struct packet_type pt; 235 struct completion comp; 236 int double_vlan; 237 int vlan_id; 238 int ok; 239 }; 240 241 static int stmmac_test_loopback_validate(struct sk_buff *skb, 242 struct net_device *ndev, 243 struct packet_type *pt, 244 struct net_device *orig_ndev) 245 { 246 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 247 unsigned char *src = tpriv->packet->src; 248 unsigned char *dst = tpriv->packet->dst; 249 struct stmmachdr *shdr; 250 struct ethhdr *ehdr; 251 struct udphdr *uhdr; 252 struct tcphdr *thdr; 253 struct iphdr *ihdr; 254 255 skb = skb_unshare(skb, GFP_ATOMIC); 256 if (!skb) 257 goto out; 258 259 if (skb_linearize(skb)) 260 goto out; 261 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 262 goto out; 263 264 ehdr = (struct ethhdr *)skb_mac_header(skb); 265 if (dst) { 266 if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) 267 goto out; 268 } 269 if (tpriv->packet->sarc) { 270 if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest)) 271 goto out; 272 } else if (src) { 273 if (!ether_addr_equal_unaligned(ehdr->h_source, src)) 274 goto out; 275 } 276 277 ihdr = ip_hdr(skb); 278 if (tpriv->double_vlan) 279 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 280 281 if (tpriv->packet->tcp) { 282 if (ihdr->protocol != IPPROTO_TCP) 283 goto out; 284 285 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 286 if (thdr->dest != htons(tpriv->packet->dport)) 287 goto out; 288 289 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr)); 290 } else { 291 if (ihdr->protocol != IPPROTO_UDP) 292 goto out; 293 294 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 295 if (uhdr->dest != htons(tpriv->packet->dport)) 296 goto out; 297 298 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 299 } 300 301 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 302 goto out; 303 if (tpriv->packet->exp_hash && !skb->hash) 304 goto out; 305 if (tpriv->packet->id != shdr->id) 306 goto out; 307 308 tpriv->ok = true; 309 complete(&tpriv->comp); 310 out: 311 kfree_skb(skb); 312 return 0; 313 } 314 315 static int __stmmac_test_loopback(struct stmmac_priv *priv, 316 struct stmmac_packet_attrs *attr) 317 { 318 struct stmmac_test_priv *tpriv; 319 struct sk_buff *skb = NULL; 320 int ret = 0; 321 322 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 323 if (!tpriv) 324 return -ENOMEM; 325 326 tpriv->ok = false; 327 init_completion(&tpriv->comp); 328 329 tpriv->pt.type = htons(ETH_P_IP); 330 tpriv->pt.func = stmmac_test_loopback_validate; 331 tpriv->pt.dev = priv->dev; 332 tpriv->pt.af_packet_priv = tpriv; 333 tpriv->packet = attr; 334 335 if (!attr->dont_wait) 336 dev_add_pack(&tpriv->pt); 337 338 skb = stmmac_test_get_udp_skb(priv, attr); 339 if (!skb) { 340 ret = -ENOMEM; 341 goto cleanup; 342 } 343 344 skb_set_queue_mapping(skb, attr->queue_mapping); 345 ret = dev_queue_xmit(skb); 346 if (ret) 347 goto cleanup; 348 349 if (attr->dont_wait) 350 goto cleanup; 351 352 if (!attr->timeout) 353 attr->timeout = STMMAC_LB_TIMEOUT; 354 355 wait_for_completion_timeout(&tpriv->comp, attr->timeout); 356 ret = tpriv->ok ? 0 : -ETIMEDOUT; 357 358 cleanup: 359 if (!attr->dont_wait) 360 dev_remove_pack(&tpriv->pt); 361 kfree(tpriv); 362 return ret; 363 } 364 365 static int stmmac_test_mac_loopback(struct stmmac_priv *priv) 366 { 367 struct stmmac_packet_attrs attr = { }; 368 369 attr.dst = priv->dev->dev_addr; 370 return __stmmac_test_loopback(priv, &attr); 371 } 372 373 static int stmmac_test_phy_loopback(struct stmmac_priv *priv) 374 { 375 struct stmmac_packet_attrs attr = { }; 376 int ret; 377 378 if (!priv->dev->phydev) 379 return -EBUSY; 380 381 ret = phy_loopback(priv->dev->phydev, true); 382 if (ret) 383 return ret; 384 385 attr.dst = priv->dev->dev_addr; 386 ret = __stmmac_test_loopback(priv, &attr); 387 388 phy_loopback(priv->dev->phydev, false); 389 return ret; 390 } 391 392 static int stmmac_test_mmc(struct stmmac_priv *priv) 393 { 394 struct stmmac_counters initial, final; 395 int ret; 396 397 memset(&initial, 0, sizeof(initial)); 398 memset(&final, 0, sizeof(final)); 399 400 if (!priv->dma_cap.rmon) 401 return -EOPNOTSUPP; 402 403 /* Save previous results into internal struct */ 404 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); 405 406 ret = stmmac_test_mac_loopback(priv); 407 if (ret) 408 return ret; 409 410 /* These will be loopback results so no need to save them */ 411 stmmac_mmc_read(priv, priv->mmcaddr, &final); 412 413 /* 414 * The number of MMC counters available depends on HW configuration 415 * so we just use this one to validate the feature. I hope there is 416 * not a version without this counter. 417 */ 418 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g) 419 return -EINVAL; 420 421 return 0; 422 } 423 424 static int stmmac_test_eee(struct stmmac_priv *priv) 425 { 426 struct stmmac_extra_stats *initial, *final; 427 int retries = 10; 428 int ret; 429 430 if (!priv->dma_cap.eee || !priv->eee_active) 431 return -EOPNOTSUPP; 432 433 initial = kzalloc(sizeof(*initial), GFP_KERNEL); 434 if (!initial) 435 return -ENOMEM; 436 437 final = kzalloc(sizeof(*final), GFP_KERNEL); 438 if (!final) { 439 ret = -ENOMEM; 440 goto out_free_initial; 441 } 442 443 memcpy(initial, &priv->xstats, sizeof(*initial)); 444 445 ret = stmmac_test_mac_loopback(priv); 446 if (ret) 447 goto out_free_final; 448 449 /* We have no traffic in the line so, sooner or later it will go LPI */ 450 while (--retries) { 451 memcpy(final, &priv->xstats, sizeof(*final)); 452 453 if (final->irq_tx_path_in_lpi_mode_n > 454 initial->irq_tx_path_in_lpi_mode_n) 455 break; 456 msleep(100); 457 } 458 459 if (!retries) { 460 ret = -ETIMEDOUT; 461 goto out_free_final; 462 } 463 464 if (final->irq_tx_path_in_lpi_mode_n <= 465 initial->irq_tx_path_in_lpi_mode_n) { 466 ret = -EINVAL; 467 goto out_free_final; 468 } 469 470 if (final->irq_tx_path_exit_lpi_mode_n <= 471 initial->irq_tx_path_exit_lpi_mode_n) { 472 ret = -EINVAL; 473 goto out_free_final; 474 } 475 476 out_free_final: 477 kfree(final); 478 out_free_initial: 479 kfree(initial); 480 return ret; 481 } 482 483 static int stmmac_filter_check(struct stmmac_priv *priv) 484 { 485 if (!(priv->dev->flags & IFF_PROMISC)) 486 return 0; 487 488 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n"); 489 return -EOPNOTSUPP; 490 } 491 492 static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr) 493 { 494 int mc_offset = 32 - priv->hw->mcast_bits_log2; 495 struct netdev_hw_addr *ha; 496 u32 hash, hash_nr; 497 498 /* First compute the hash for desired addr */ 499 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset; 500 hash_nr = hash >> 5; 501 hash = 1 << (hash & 0x1f); 502 503 /* Now, check if it collides with any existing one */ 504 netdev_for_each_mc_addr(ha, priv->dev) { 505 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset; 506 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash)) 507 return false; 508 } 509 510 /* No collisions, address is good to go */ 511 return true; 512 } 513 514 static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr) 515 { 516 struct netdev_hw_addr *ha; 517 518 /* Check if it collides with any existing one */ 519 netdev_for_each_uc_addr(ha, priv->dev) { 520 if (!memcmp(ha->addr, addr, ETH_ALEN)) 521 return false; 522 } 523 524 /* No collisions, address is good to go */ 525 return true; 526 } 527 528 static int stmmac_test_hfilt(struct stmmac_priv *priv) 529 { 530 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; 531 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 532 struct stmmac_packet_attrs attr = { }; 533 int ret, tries = 256; 534 535 ret = stmmac_filter_check(priv); 536 if (ret) 537 return ret; 538 539 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 540 return -EOPNOTSUPP; 541 542 while (--tries) { 543 /* We only need to check the bd_addr for collisions */ 544 bd_addr[ETH_ALEN - 1] = tries; 545 if (stmmac_hash_check(priv, bd_addr)) 546 break; 547 } 548 549 if (!tries) 550 return -EOPNOTSUPP; 551 552 ret = dev_mc_add(priv->dev, gd_addr); 553 if (ret) 554 return ret; 555 556 attr.dst = gd_addr; 557 558 /* Shall receive packet */ 559 ret = __stmmac_test_loopback(priv, &attr); 560 if (ret) 561 goto cleanup; 562 563 attr.dst = bd_addr; 564 565 /* Shall NOT receive packet */ 566 ret = __stmmac_test_loopback(priv, &attr); 567 ret = ret ? 0 : -EINVAL; 568 569 cleanup: 570 dev_mc_del(priv->dev, gd_addr); 571 return ret; 572 } 573 574 static int stmmac_test_pfilt(struct stmmac_priv *priv) 575 { 576 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77}; 577 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 578 struct stmmac_packet_attrs attr = { }; 579 int ret, tries = 256; 580 581 if (stmmac_filter_check(priv)) 582 return -EOPNOTSUPP; 583 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 584 return -EOPNOTSUPP; 585 586 while (--tries) { 587 /* We only need to check the bd_addr for collisions */ 588 bd_addr[ETH_ALEN - 1] = tries; 589 if (stmmac_perfect_check(priv, bd_addr)) 590 break; 591 } 592 593 if (!tries) 594 return -EOPNOTSUPP; 595 596 ret = dev_uc_add(priv->dev, gd_addr); 597 if (ret) 598 return ret; 599 600 attr.dst = gd_addr; 601 602 /* Shall receive packet */ 603 ret = __stmmac_test_loopback(priv, &attr); 604 if (ret) 605 goto cleanup; 606 607 attr.dst = bd_addr; 608 609 /* Shall NOT receive packet */ 610 ret = __stmmac_test_loopback(priv, &attr); 611 ret = ret ? 0 : -EINVAL; 612 613 cleanup: 614 dev_uc_del(priv->dev, gd_addr); 615 return ret; 616 } 617 618 static int stmmac_test_mcfilt(struct stmmac_priv *priv) 619 { 620 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 621 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 622 struct stmmac_packet_attrs attr = { }; 623 int ret, tries = 256; 624 625 if (stmmac_filter_check(priv)) 626 return -EOPNOTSUPP; 627 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 628 return -EOPNOTSUPP; 629 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 630 return -EOPNOTSUPP; 631 632 while (--tries) { 633 /* We only need to check the mc_addr for collisions */ 634 mc_addr[ETH_ALEN - 1] = tries; 635 if (stmmac_hash_check(priv, mc_addr)) 636 break; 637 } 638 639 if (!tries) 640 return -EOPNOTSUPP; 641 642 ret = dev_uc_add(priv->dev, uc_addr); 643 if (ret) 644 return ret; 645 646 attr.dst = uc_addr; 647 648 /* Shall receive packet */ 649 ret = __stmmac_test_loopback(priv, &attr); 650 if (ret) 651 goto cleanup; 652 653 attr.dst = mc_addr; 654 655 /* Shall NOT receive packet */ 656 ret = __stmmac_test_loopback(priv, &attr); 657 ret = ret ? 0 : -EINVAL; 658 659 cleanup: 660 dev_uc_del(priv->dev, uc_addr); 661 return ret; 662 } 663 664 static int stmmac_test_ucfilt(struct stmmac_priv *priv) 665 { 666 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 667 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 668 struct stmmac_packet_attrs attr = { }; 669 int ret, tries = 256; 670 671 if (stmmac_filter_check(priv)) 672 return -EOPNOTSUPP; 673 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 674 return -EOPNOTSUPP; 675 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 676 return -EOPNOTSUPP; 677 678 while (--tries) { 679 /* We only need to check the uc_addr for collisions */ 680 uc_addr[ETH_ALEN - 1] = tries; 681 if (stmmac_perfect_check(priv, uc_addr)) 682 break; 683 } 684 685 if (!tries) 686 return -EOPNOTSUPP; 687 688 ret = dev_mc_add(priv->dev, mc_addr); 689 if (ret) 690 return ret; 691 692 attr.dst = mc_addr; 693 694 /* Shall receive packet */ 695 ret = __stmmac_test_loopback(priv, &attr); 696 if (ret) 697 goto cleanup; 698 699 attr.dst = uc_addr; 700 701 /* Shall NOT receive packet */ 702 ret = __stmmac_test_loopback(priv, &attr); 703 ret = ret ? 0 : -EINVAL; 704 705 cleanup: 706 dev_mc_del(priv->dev, mc_addr); 707 return ret; 708 } 709 710 static int stmmac_test_flowctrl_validate(struct sk_buff *skb, 711 struct net_device *ndev, 712 struct packet_type *pt, 713 struct net_device *orig_ndev) 714 { 715 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 716 struct ethhdr *ehdr; 717 718 ehdr = (struct ethhdr *)skb_mac_header(skb); 719 if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr)) 720 goto out; 721 if (ehdr->h_proto != htons(ETH_P_PAUSE)) 722 goto out; 723 724 tpriv->ok = true; 725 complete(&tpriv->comp); 726 out: 727 kfree_skb(skb); 728 return 0; 729 } 730 731 static int stmmac_test_flowctrl(struct stmmac_priv *priv) 732 { 733 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01}; 734 struct phy_device *phydev = priv->dev->phydev; 735 u32 rx_cnt = priv->plat->rx_queues_to_use; 736 struct stmmac_test_priv *tpriv; 737 unsigned int pkt_count; 738 int i, ret = 0; 739 740 if (!phydev || (!phydev->pause && !phydev->asym_pause)) 741 return -EOPNOTSUPP; 742 743 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 744 if (!tpriv) 745 return -ENOMEM; 746 747 tpriv->ok = false; 748 init_completion(&tpriv->comp); 749 tpriv->pt.type = htons(ETH_P_PAUSE); 750 tpriv->pt.func = stmmac_test_flowctrl_validate; 751 tpriv->pt.dev = priv->dev; 752 tpriv->pt.af_packet_priv = tpriv; 753 dev_add_pack(&tpriv->pt); 754 755 /* Compute minimum number of packets to make FIFO full */ 756 pkt_count = priv->plat->rx_fifo_size; 757 if (!pkt_count) 758 pkt_count = priv->dma_cap.rx_fifo_size; 759 pkt_count /= 1400; 760 pkt_count *= 2; 761 762 for (i = 0; i < rx_cnt; i++) 763 stmmac_stop_rx(priv, priv->ioaddr, i); 764 765 ret = dev_set_promiscuity(priv->dev, 1); 766 if (ret) 767 goto cleanup; 768 769 ret = dev_mc_add(priv->dev, paddr); 770 if (ret) 771 goto cleanup; 772 773 for (i = 0; i < pkt_count; i++) { 774 struct stmmac_packet_attrs attr = { }; 775 776 attr.dst = priv->dev->dev_addr; 777 attr.dont_wait = true; 778 attr.size = 1400; 779 780 ret = __stmmac_test_loopback(priv, &attr); 781 if (ret) 782 goto cleanup; 783 if (tpriv->ok) 784 break; 785 } 786 787 /* Wait for some time in case RX Watchdog is enabled */ 788 msleep(200); 789 790 for (i = 0; i < rx_cnt; i++) { 791 struct stmmac_channel *ch = &priv->channel[i]; 792 u32 tail; 793 794 tail = priv->rx_queue[i].dma_rx_phy + 795 (DMA_RX_SIZE * sizeof(struct dma_desc)); 796 797 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); 798 stmmac_start_rx(priv, priv->ioaddr, i); 799 800 local_bh_disable(); 801 napi_reschedule(&ch->rx_napi); 802 local_bh_enable(); 803 } 804 805 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 806 ret = tpriv->ok ? 0 : -ETIMEDOUT; 807 808 cleanup: 809 dev_mc_del(priv->dev, paddr); 810 dev_set_promiscuity(priv->dev, -1); 811 dev_remove_pack(&tpriv->pt); 812 kfree(tpriv); 813 return ret; 814 } 815 816 static int stmmac_test_rss(struct stmmac_priv *priv) 817 { 818 struct stmmac_packet_attrs attr = { }; 819 820 if (!priv->dma_cap.rssen || !priv->rss.enable) 821 return -EOPNOTSUPP; 822 823 attr.dst = priv->dev->dev_addr; 824 attr.exp_hash = true; 825 attr.sport = 0x321; 826 attr.dport = 0x123; 827 828 return __stmmac_test_loopback(priv, &attr); 829 } 830 831 static int stmmac_test_vlan_validate(struct sk_buff *skb, 832 struct net_device *ndev, 833 struct packet_type *pt, 834 struct net_device *orig_ndev) 835 { 836 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 837 struct stmmachdr *shdr; 838 struct ethhdr *ehdr; 839 struct udphdr *uhdr; 840 struct iphdr *ihdr; 841 u16 proto; 842 843 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q; 844 845 skb = skb_unshare(skb, GFP_ATOMIC); 846 if (!skb) 847 goto out; 848 849 if (skb_linearize(skb)) 850 goto out; 851 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 852 goto out; 853 if (tpriv->vlan_id) { 854 if (skb->vlan_proto != htons(proto)) 855 goto out; 856 if (skb->vlan_tci != tpriv->vlan_id) { 857 /* Means filter did not work. */ 858 tpriv->ok = false; 859 complete(&tpriv->comp); 860 goto out; 861 } 862 } 863 864 ehdr = (struct ethhdr *)skb_mac_header(skb); 865 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst)) 866 goto out; 867 868 ihdr = ip_hdr(skb); 869 if (tpriv->double_vlan) 870 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 871 if (ihdr->protocol != IPPROTO_UDP) 872 goto out; 873 874 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 875 if (uhdr->dest != htons(tpriv->packet->dport)) 876 goto out; 877 878 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 879 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 880 goto out; 881 882 tpriv->ok = true; 883 complete(&tpriv->comp); 884 885 out: 886 kfree_skb(skb); 887 return 0; 888 } 889 890 static int __stmmac_test_vlanfilt(struct stmmac_priv *priv) 891 { 892 struct stmmac_packet_attrs attr = { }; 893 struct stmmac_test_priv *tpriv; 894 struct sk_buff *skb = NULL; 895 int ret = 0, i; 896 897 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 898 if (!tpriv) 899 return -ENOMEM; 900 901 tpriv->ok = false; 902 init_completion(&tpriv->comp); 903 904 tpriv->pt.type = htons(ETH_P_IP); 905 tpriv->pt.func = stmmac_test_vlan_validate; 906 tpriv->pt.dev = priv->dev; 907 tpriv->pt.af_packet_priv = tpriv; 908 tpriv->packet = &attr; 909 910 /* 911 * As we use HASH filtering, false positives may appear. This is a 912 * specially chosen ID so that adjacent IDs (+4) have different 913 * HASH values. 914 */ 915 tpriv->vlan_id = 0x123; 916 dev_add_pack(&tpriv->pt); 917 918 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 919 if (ret) 920 goto cleanup; 921 922 for (i = 0; i < 4; i++) { 923 attr.vlan = 1; 924 attr.vlan_id_out = tpriv->vlan_id + i; 925 attr.dst = priv->dev->dev_addr; 926 attr.sport = 9; 927 attr.dport = 9; 928 929 skb = stmmac_test_get_udp_skb(priv, &attr); 930 if (!skb) { 931 ret = -ENOMEM; 932 goto vlan_del; 933 } 934 935 skb_set_queue_mapping(skb, 0); 936 ret = dev_queue_xmit(skb); 937 if (ret) 938 goto vlan_del; 939 940 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 941 ret = tpriv->ok ? 0 : -ETIMEDOUT; 942 if (ret && !i) { 943 goto vlan_del; 944 } else if (!ret && i) { 945 ret = -EINVAL; 946 goto vlan_del; 947 } else { 948 ret = 0; 949 } 950 951 tpriv->ok = false; 952 } 953 954 vlan_del: 955 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 956 cleanup: 957 dev_remove_pack(&tpriv->pt); 958 kfree(tpriv); 959 return ret; 960 } 961 962 static int stmmac_test_vlanfilt(struct stmmac_priv *priv) 963 { 964 if (!priv->dma_cap.vlhash) 965 return -EOPNOTSUPP; 966 967 return __stmmac_test_vlanfilt(priv); 968 } 969 970 static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv) 971 { 972 int ret, prev_cap = priv->dma_cap.vlhash; 973 974 if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 975 return -EOPNOTSUPP; 976 977 priv->dma_cap.vlhash = 0; 978 ret = __stmmac_test_vlanfilt(priv); 979 priv->dma_cap.vlhash = prev_cap; 980 981 return ret; 982 } 983 984 static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv) 985 { 986 struct stmmac_packet_attrs attr = { }; 987 struct stmmac_test_priv *tpriv; 988 struct sk_buff *skb = NULL; 989 int ret = 0, i; 990 991 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 992 if (!tpriv) 993 return -ENOMEM; 994 995 tpriv->ok = false; 996 tpriv->double_vlan = true; 997 init_completion(&tpriv->comp); 998 999 tpriv->pt.type = htons(ETH_P_8021Q); 1000 tpriv->pt.func = stmmac_test_vlan_validate; 1001 tpriv->pt.dev = priv->dev; 1002 tpriv->pt.af_packet_priv = tpriv; 1003 tpriv->packet = &attr; 1004 1005 /* 1006 * As we use HASH filtering, false positives may appear. This is a 1007 * specially chosen ID so that adjacent IDs (+4) have different 1008 * HASH values. 1009 */ 1010 tpriv->vlan_id = 0x123; 1011 dev_add_pack(&tpriv->pt); 1012 1013 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 1014 if (ret) 1015 goto cleanup; 1016 1017 for (i = 0; i < 4; i++) { 1018 attr.vlan = 2; 1019 attr.vlan_id_out = tpriv->vlan_id + i; 1020 attr.dst = priv->dev->dev_addr; 1021 attr.sport = 9; 1022 attr.dport = 9; 1023 1024 skb = stmmac_test_get_udp_skb(priv, &attr); 1025 if (!skb) { 1026 ret = -ENOMEM; 1027 goto vlan_del; 1028 } 1029 1030 skb_set_queue_mapping(skb, 0); 1031 ret = dev_queue_xmit(skb); 1032 if (ret) 1033 goto vlan_del; 1034 1035 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1036 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1037 if (ret && !i) { 1038 goto vlan_del; 1039 } else if (!ret && i) { 1040 ret = -EINVAL; 1041 goto vlan_del; 1042 } else { 1043 ret = 0; 1044 } 1045 1046 tpriv->ok = false; 1047 } 1048 1049 vlan_del: 1050 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 1051 cleanup: 1052 dev_remove_pack(&tpriv->pt); 1053 kfree(tpriv); 1054 return ret; 1055 } 1056 1057 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv) 1058 { 1059 if (!priv->dma_cap.vlhash) 1060 return -EOPNOTSUPP; 1061 1062 return __stmmac_test_dvlanfilt(priv); 1063 } 1064 1065 static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv) 1066 { 1067 int ret, prev_cap = priv->dma_cap.vlhash; 1068 1069 if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER)) 1070 return -EOPNOTSUPP; 1071 1072 priv->dma_cap.vlhash = 0; 1073 ret = __stmmac_test_dvlanfilt(priv); 1074 priv->dma_cap.vlhash = prev_cap; 1075 1076 return ret; 1077 } 1078 1079 #ifdef CONFIG_NET_CLS_ACT 1080 static int stmmac_test_rxp(struct stmmac_priv *priv) 1081 { 1082 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; 1083 struct tc_cls_u32_offload cls_u32 = { }; 1084 struct stmmac_packet_attrs attr = { }; 1085 struct tc_action **actions, *act; 1086 struct tc_u32_sel *sel; 1087 struct tcf_exts *exts; 1088 int ret, i, nk = 1; 1089 1090 if (!tc_can_offload(priv->dev)) 1091 return -EOPNOTSUPP; 1092 if (!priv->dma_cap.frpsel) 1093 return -EOPNOTSUPP; 1094 1095 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); 1096 if (!sel) 1097 return -ENOMEM; 1098 1099 exts = kzalloc(sizeof(*exts), GFP_KERNEL); 1100 if (!exts) { 1101 ret = -ENOMEM; 1102 goto cleanup_sel; 1103 } 1104 1105 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); 1106 if (!actions) { 1107 ret = -ENOMEM; 1108 goto cleanup_exts; 1109 } 1110 1111 act = kzalloc(nk * sizeof(*act), GFP_KERNEL); 1112 if (!act) { 1113 ret = -ENOMEM; 1114 goto cleanup_actions; 1115 } 1116 1117 cls_u32.command = TC_CLSU32_NEW_KNODE; 1118 cls_u32.common.chain_index = 0; 1119 cls_u32.common.protocol = htons(ETH_P_ALL); 1120 cls_u32.knode.exts = exts; 1121 cls_u32.knode.sel = sel; 1122 cls_u32.knode.handle = 0x123; 1123 1124 exts->nr_actions = nk; 1125 exts->actions = actions; 1126 for (i = 0; i < nk; i++) { 1127 struct tcf_gact *gact = to_gact(&act[i]); 1128 1129 actions[i] = &act[i]; 1130 gact->tcf_action = TC_ACT_SHOT; 1131 } 1132 1133 sel->nkeys = nk; 1134 sel->offshift = 0; 1135 sel->keys[0].off = 6; 1136 sel->keys[0].val = htonl(0xdeadbeef); 1137 sel->keys[0].mask = ~0x0; 1138 1139 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1140 if (ret) 1141 goto cleanup_act; 1142 1143 attr.dst = priv->dev->dev_addr; 1144 attr.src = addr; 1145 1146 ret = __stmmac_test_loopback(priv, &attr); 1147 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */ 1148 1149 cls_u32.command = TC_CLSU32_DELETE_KNODE; 1150 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1151 1152 cleanup_act: 1153 kfree(act); 1154 cleanup_actions: 1155 kfree(actions); 1156 cleanup_exts: 1157 kfree(exts); 1158 cleanup_sel: 1159 kfree(sel); 1160 return ret; 1161 } 1162 #else 1163 static int stmmac_test_rxp(struct stmmac_priv *priv) 1164 { 1165 return -EOPNOTSUPP; 1166 } 1167 #endif 1168 1169 static int stmmac_test_desc_sai(struct stmmac_priv *priv) 1170 { 1171 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1172 struct stmmac_packet_attrs attr = { }; 1173 int ret; 1174 1175 if (!priv->dma_cap.vlins) 1176 return -EOPNOTSUPP; 1177 1178 attr.remove_sa = true; 1179 attr.sarc = true; 1180 attr.src = src; 1181 attr.dst = priv->dev->dev_addr; 1182 1183 priv->sarc_type = 0x1; 1184 1185 ret = __stmmac_test_loopback(priv, &attr); 1186 1187 priv->sarc_type = 0x0; 1188 return ret; 1189 } 1190 1191 static int stmmac_test_desc_sar(struct stmmac_priv *priv) 1192 { 1193 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1194 struct stmmac_packet_attrs attr = { }; 1195 int ret; 1196 1197 if (!priv->dma_cap.vlins) 1198 return -EOPNOTSUPP; 1199 1200 attr.sarc = true; 1201 attr.src = src; 1202 attr.dst = priv->dev->dev_addr; 1203 1204 priv->sarc_type = 0x2; 1205 1206 ret = __stmmac_test_loopback(priv, &attr); 1207 1208 priv->sarc_type = 0x0; 1209 return ret; 1210 } 1211 1212 static int stmmac_test_reg_sai(struct stmmac_priv *priv) 1213 { 1214 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1215 struct stmmac_packet_attrs attr = { }; 1216 int ret; 1217 1218 if (!priv->dma_cap.vlins) 1219 return -EOPNOTSUPP; 1220 1221 attr.remove_sa = true; 1222 attr.sarc = true; 1223 attr.src = src; 1224 attr.dst = priv->dev->dev_addr; 1225 1226 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2)) 1227 return -EOPNOTSUPP; 1228 1229 ret = __stmmac_test_loopback(priv, &attr); 1230 1231 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1232 return ret; 1233 } 1234 1235 static int stmmac_test_reg_sar(struct stmmac_priv *priv) 1236 { 1237 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1238 struct stmmac_packet_attrs attr = { }; 1239 int ret; 1240 1241 if (!priv->dma_cap.vlins) 1242 return -EOPNOTSUPP; 1243 1244 attr.sarc = true; 1245 attr.src = src; 1246 attr.dst = priv->dev->dev_addr; 1247 1248 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3)) 1249 return -EOPNOTSUPP; 1250 1251 ret = __stmmac_test_loopback(priv, &attr); 1252 1253 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1254 return ret; 1255 } 1256 1257 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan) 1258 { 1259 struct stmmac_packet_attrs attr = { }; 1260 struct stmmac_test_priv *tpriv; 1261 struct sk_buff *skb = NULL; 1262 int ret = 0; 1263 u16 proto; 1264 1265 if (!priv->dma_cap.vlins) 1266 return -EOPNOTSUPP; 1267 1268 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1269 if (!tpriv) 1270 return -ENOMEM; 1271 1272 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q; 1273 1274 tpriv->ok = false; 1275 tpriv->double_vlan = svlan; 1276 init_completion(&tpriv->comp); 1277 1278 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP); 1279 tpriv->pt.func = stmmac_test_vlan_validate; 1280 tpriv->pt.dev = priv->dev; 1281 tpriv->pt.af_packet_priv = tpriv; 1282 tpriv->packet = &attr; 1283 tpriv->vlan_id = 0x123; 1284 dev_add_pack(&tpriv->pt); 1285 1286 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id); 1287 if (ret) 1288 goto cleanup; 1289 1290 attr.dst = priv->dev->dev_addr; 1291 1292 skb = stmmac_test_get_udp_skb(priv, &attr); 1293 if (!skb) { 1294 ret = -ENOMEM; 1295 goto vlan_del; 1296 } 1297 1298 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); 1299 skb->protocol = htons(proto); 1300 1301 skb_set_queue_mapping(skb, 0); 1302 ret = dev_queue_xmit(skb); 1303 if (ret) 1304 goto vlan_del; 1305 1306 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1307 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1308 1309 vlan_del: 1310 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id); 1311 cleanup: 1312 dev_remove_pack(&tpriv->pt); 1313 kfree(tpriv); 1314 return ret; 1315 } 1316 1317 static int stmmac_test_vlanoff(struct stmmac_priv *priv) 1318 { 1319 return stmmac_test_vlanoff_common(priv, false); 1320 } 1321 1322 static int stmmac_test_svlanoff(struct stmmac_priv *priv) 1323 { 1324 if (!priv->dma_cap.dvlan) 1325 return -EOPNOTSUPP; 1326 return stmmac_test_vlanoff_common(priv, true); 1327 } 1328 1329 #ifdef CONFIG_NET_CLS_ACT 1330 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1331 u32 dst_mask, u32 src_mask) 1332 { 1333 struct flow_dissector_key_ipv4_addrs key, mask; 1334 unsigned long dummy_cookie = 0xdeadbeef; 1335 struct stmmac_packet_attrs attr = { }; 1336 struct flow_dissector *dissector; 1337 struct flow_cls_offload *cls; 1338 int ret, old_enable = 0; 1339 struct flow_rule *rule; 1340 1341 if (!tc_can_offload(priv->dev)) 1342 return -EOPNOTSUPP; 1343 if (!priv->dma_cap.l3l4fnum) 1344 return -EOPNOTSUPP; 1345 if (priv->rss.enable) { 1346 old_enable = priv->rss.enable; 1347 priv->rss.enable = false; 1348 stmmac_rss_configure(priv, priv->hw, NULL, 1349 priv->plat->rx_queues_to_use); 1350 } 1351 1352 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1353 if (!dissector) { 1354 ret = -ENOMEM; 1355 goto cleanup_rss; 1356 } 1357 1358 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); 1359 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; 1360 1361 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1362 if (!cls) { 1363 ret = -ENOMEM; 1364 goto cleanup_dissector; 1365 } 1366 1367 cls->common.chain_index = 0; 1368 cls->command = FLOW_CLS_REPLACE; 1369 cls->cookie = dummy_cookie; 1370 1371 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1372 if (!rule) { 1373 ret = -ENOMEM; 1374 goto cleanup_cls; 1375 } 1376 1377 rule->match.dissector = dissector; 1378 rule->match.key = (void *)&key; 1379 rule->match.mask = (void *)&mask; 1380 1381 key.src = htonl(src); 1382 key.dst = htonl(dst); 1383 mask.src = src_mask; 1384 mask.dst = dst_mask; 1385 1386 cls->rule = rule; 1387 1388 rule->action.entries[0].id = FLOW_ACTION_DROP; 1389 rule->action.num_entries = 1; 1390 1391 attr.dst = priv->dev->dev_addr; 1392 attr.ip_dst = dst; 1393 attr.ip_src = src; 1394 1395 /* Shall receive packet */ 1396 ret = __stmmac_test_loopback(priv, &attr); 1397 if (ret) 1398 goto cleanup_rule; 1399 1400 ret = stmmac_tc_setup_cls(priv, priv, cls); 1401 if (ret) 1402 goto cleanup_rule; 1403 1404 /* Shall NOT receive packet */ 1405 ret = __stmmac_test_loopback(priv, &attr); 1406 ret = ret ? 0 : -EINVAL; 1407 1408 cls->command = FLOW_CLS_DESTROY; 1409 stmmac_tc_setup_cls(priv, priv, cls); 1410 cleanup_rule: 1411 kfree(rule); 1412 cleanup_cls: 1413 kfree(cls); 1414 cleanup_dissector: 1415 kfree(dissector); 1416 cleanup_rss: 1417 if (old_enable) { 1418 priv->rss.enable = old_enable; 1419 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1420 priv->plat->rx_queues_to_use); 1421 } 1422 1423 return ret; 1424 } 1425 #else 1426 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1427 u32 dst_mask, u32 src_mask) 1428 { 1429 return -EOPNOTSUPP; 1430 } 1431 #endif 1432 1433 static int stmmac_test_l3filt_da(struct stmmac_priv *priv) 1434 { 1435 u32 addr = 0x10203040; 1436 1437 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0); 1438 } 1439 1440 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv) 1441 { 1442 u32 addr = 0x10203040; 1443 1444 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0); 1445 } 1446 1447 #ifdef CONFIG_NET_CLS_ACT 1448 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1449 u32 dst_mask, u32 src_mask, bool udp) 1450 { 1451 struct { 1452 struct flow_dissector_key_basic bkey; 1453 struct flow_dissector_key_ports key; 1454 } __aligned(BITS_PER_LONG / 8) keys; 1455 struct { 1456 struct flow_dissector_key_basic bmask; 1457 struct flow_dissector_key_ports mask; 1458 } __aligned(BITS_PER_LONG / 8) masks; 1459 unsigned long dummy_cookie = 0xdeadbeef; 1460 struct stmmac_packet_attrs attr = { }; 1461 struct flow_dissector *dissector; 1462 struct flow_cls_offload *cls; 1463 int ret, old_enable = 0; 1464 struct flow_rule *rule; 1465 1466 if (!tc_can_offload(priv->dev)) 1467 return -EOPNOTSUPP; 1468 if (!priv->dma_cap.l3l4fnum) 1469 return -EOPNOTSUPP; 1470 if (priv->rss.enable) { 1471 old_enable = priv->rss.enable; 1472 priv->rss.enable = false; 1473 stmmac_rss_configure(priv, priv->hw, NULL, 1474 priv->plat->rx_queues_to_use); 1475 } 1476 1477 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1478 if (!dissector) { 1479 ret = -ENOMEM; 1480 goto cleanup_rss; 1481 } 1482 1483 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); 1484 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); 1485 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; 1486 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); 1487 1488 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1489 if (!cls) { 1490 ret = -ENOMEM; 1491 goto cleanup_dissector; 1492 } 1493 1494 cls->common.chain_index = 0; 1495 cls->command = FLOW_CLS_REPLACE; 1496 cls->cookie = dummy_cookie; 1497 1498 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1499 if (!rule) { 1500 ret = -ENOMEM; 1501 goto cleanup_cls; 1502 } 1503 1504 rule->match.dissector = dissector; 1505 rule->match.key = (void *)&keys; 1506 rule->match.mask = (void *)&masks; 1507 1508 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP; 1509 keys.key.src = htons(src); 1510 keys.key.dst = htons(dst); 1511 masks.mask.src = src_mask; 1512 masks.mask.dst = dst_mask; 1513 1514 cls->rule = rule; 1515 1516 rule->action.entries[0].id = FLOW_ACTION_DROP; 1517 rule->action.num_entries = 1; 1518 1519 attr.dst = priv->dev->dev_addr; 1520 attr.tcp = !udp; 1521 attr.sport = src; 1522 attr.dport = dst; 1523 attr.ip_dst = 0; 1524 1525 /* Shall receive packet */ 1526 ret = __stmmac_test_loopback(priv, &attr); 1527 if (ret) 1528 goto cleanup_rule; 1529 1530 ret = stmmac_tc_setup_cls(priv, priv, cls); 1531 if (ret) 1532 goto cleanup_rule; 1533 1534 /* Shall NOT receive packet */ 1535 ret = __stmmac_test_loopback(priv, &attr); 1536 ret = ret ? 0 : -EINVAL; 1537 1538 cls->command = FLOW_CLS_DESTROY; 1539 stmmac_tc_setup_cls(priv, priv, cls); 1540 cleanup_rule: 1541 kfree(rule); 1542 cleanup_cls: 1543 kfree(cls); 1544 cleanup_dissector: 1545 kfree(dissector); 1546 cleanup_rss: 1547 if (old_enable) { 1548 priv->rss.enable = old_enable; 1549 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1550 priv->plat->rx_queues_to_use); 1551 } 1552 1553 return ret; 1554 } 1555 #else 1556 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1557 u32 dst_mask, u32 src_mask, bool udp) 1558 { 1559 return -EOPNOTSUPP; 1560 } 1561 #endif 1562 1563 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv) 1564 { 1565 u16 dummy_port = 0x123; 1566 1567 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false); 1568 } 1569 1570 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv) 1571 { 1572 u16 dummy_port = 0x123; 1573 1574 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false); 1575 } 1576 1577 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv) 1578 { 1579 u16 dummy_port = 0x123; 1580 1581 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true); 1582 } 1583 1584 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv) 1585 { 1586 u16 dummy_port = 0x123; 1587 1588 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true); 1589 } 1590 1591 static int stmmac_test_arp_validate(struct sk_buff *skb, 1592 struct net_device *ndev, 1593 struct packet_type *pt, 1594 struct net_device *orig_ndev) 1595 { 1596 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 1597 struct ethhdr *ehdr; 1598 struct arphdr *ahdr; 1599 1600 ehdr = (struct ethhdr *)skb_mac_header(skb); 1601 if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src)) 1602 goto out; 1603 1604 ahdr = arp_hdr(skb); 1605 if (ahdr->ar_op != htons(ARPOP_REPLY)) 1606 goto out; 1607 1608 tpriv->ok = true; 1609 complete(&tpriv->comp); 1610 out: 1611 kfree_skb(skb); 1612 return 0; 1613 } 1614 1615 static int stmmac_test_arpoffload(struct stmmac_priv *priv) 1616 { 1617 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}; 1618 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1619 struct stmmac_packet_attrs attr = { }; 1620 struct stmmac_test_priv *tpriv; 1621 struct sk_buff *skb = NULL; 1622 u32 ip_addr = 0xdeadcafe; 1623 u32 ip_src = 0xdeadbeef; 1624 int ret; 1625 1626 if (!priv->dma_cap.arpoffsel) 1627 return -EOPNOTSUPP; 1628 1629 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1630 if (!tpriv) 1631 return -ENOMEM; 1632 1633 tpriv->ok = false; 1634 init_completion(&tpriv->comp); 1635 1636 tpriv->pt.type = htons(ETH_P_ARP); 1637 tpriv->pt.func = stmmac_test_arp_validate; 1638 tpriv->pt.dev = priv->dev; 1639 tpriv->pt.af_packet_priv = tpriv; 1640 tpriv->packet = &attr; 1641 dev_add_pack(&tpriv->pt); 1642 1643 attr.src = src; 1644 attr.ip_src = ip_src; 1645 attr.dst = dst; 1646 attr.ip_dst = ip_addr; 1647 1648 skb = stmmac_test_get_arp_skb(priv, &attr); 1649 if (!skb) { 1650 ret = -ENOMEM; 1651 goto cleanup; 1652 } 1653 1654 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); 1655 if (ret) 1656 goto cleanup; 1657 1658 ret = dev_set_promiscuity(priv->dev, 1); 1659 if (ret) 1660 goto cleanup; 1661 1662 skb_set_queue_mapping(skb, 0); 1663 ret = dev_queue_xmit(skb); 1664 if (ret) 1665 goto cleanup_promisc; 1666 1667 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1668 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1669 1670 cleanup_promisc: 1671 dev_set_promiscuity(priv->dev, -1); 1672 cleanup: 1673 stmmac_set_arp_offload(priv, priv->hw, false, 0x0); 1674 dev_remove_pack(&tpriv->pt); 1675 kfree(tpriv); 1676 return ret; 1677 } 1678 1679 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) 1680 { 1681 struct stmmac_packet_attrs attr = { }; 1682 int size = priv->dma_buf_sz; 1683 1684 attr.dst = priv->dev->dev_addr; 1685 attr.max_size = size - ETH_FCS_LEN; 1686 attr.queue_mapping = queue; 1687 1688 return __stmmac_test_loopback(priv, &attr); 1689 } 1690 1691 static int stmmac_test_jumbo(struct stmmac_priv *priv) 1692 { 1693 return __stmmac_test_jumbo(priv, 0); 1694 } 1695 1696 static int stmmac_test_mjumbo(struct stmmac_priv *priv) 1697 { 1698 u32 chan, tx_cnt = priv->plat->tx_queues_to_use; 1699 int ret; 1700 1701 if (tx_cnt <= 1) 1702 return -EOPNOTSUPP; 1703 1704 for (chan = 0; chan < tx_cnt; chan++) { 1705 ret = __stmmac_test_jumbo(priv, chan); 1706 if (ret) 1707 return ret; 1708 } 1709 1710 return 0; 1711 } 1712 1713 static int stmmac_test_sph(struct stmmac_priv *priv) 1714 { 1715 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n; 1716 struct stmmac_packet_attrs attr = { }; 1717 int ret; 1718 1719 if (!priv->sph) 1720 return -EOPNOTSUPP; 1721 1722 /* Check for UDP first */ 1723 attr.dst = priv->dev->dev_addr; 1724 attr.tcp = false; 1725 1726 ret = __stmmac_test_loopback(priv, &attr); 1727 if (ret) 1728 return ret; 1729 1730 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1731 if (cnt_end <= cnt_start) 1732 return -EINVAL; 1733 1734 /* Check for TCP now */ 1735 cnt_start = cnt_end; 1736 1737 attr.dst = priv->dev->dev_addr; 1738 attr.tcp = true; 1739 1740 ret = __stmmac_test_loopback(priv, &attr); 1741 if (ret) 1742 return ret; 1743 1744 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1745 if (cnt_end <= cnt_start) 1746 return -EINVAL; 1747 1748 return 0; 1749 } 1750 1751 #define STMMAC_LOOPBACK_NONE 0 1752 #define STMMAC_LOOPBACK_MAC 1 1753 #define STMMAC_LOOPBACK_PHY 2 1754 1755 static const struct stmmac_test { 1756 char name[ETH_GSTRING_LEN]; 1757 int lb; 1758 int (*fn)(struct stmmac_priv *priv); 1759 } stmmac_selftests[] = { 1760 { 1761 .name = "MAC Loopback ", 1762 .lb = STMMAC_LOOPBACK_MAC, 1763 .fn = stmmac_test_mac_loopback, 1764 }, { 1765 .name = "PHY Loopback ", 1766 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */ 1767 .fn = stmmac_test_phy_loopback, 1768 }, { 1769 .name = "MMC Counters ", 1770 .lb = STMMAC_LOOPBACK_PHY, 1771 .fn = stmmac_test_mmc, 1772 }, { 1773 .name = "EEE ", 1774 .lb = STMMAC_LOOPBACK_PHY, 1775 .fn = stmmac_test_eee, 1776 }, { 1777 .name = "Hash Filter MC ", 1778 .lb = STMMAC_LOOPBACK_PHY, 1779 .fn = stmmac_test_hfilt, 1780 }, { 1781 .name = "Perfect Filter UC ", 1782 .lb = STMMAC_LOOPBACK_PHY, 1783 .fn = stmmac_test_pfilt, 1784 }, { 1785 .name = "MC Filter ", 1786 .lb = STMMAC_LOOPBACK_PHY, 1787 .fn = stmmac_test_mcfilt, 1788 }, { 1789 .name = "UC Filter ", 1790 .lb = STMMAC_LOOPBACK_PHY, 1791 .fn = stmmac_test_ucfilt, 1792 }, { 1793 .name = "Flow Control ", 1794 .lb = STMMAC_LOOPBACK_PHY, 1795 .fn = stmmac_test_flowctrl, 1796 }, { 1797 .name = "RSS ", 1798 .lb = STMMAC_LOOPBACK_PHY, 1799 .fn = stmmac_test_rss, 1800 }, { 1801 .name = "VLAN Filtering ", 1802 .lb = STMMAC_LOOPBACK_PHY, 1803 .fn = stmmac_test_vlanfilt, 1804 }, { 1805 .name = "VLAN Filtering (perf) ", 1806 .lb = STMMAC_LOOPBACK_PHY, 1807 .fn = stmmac_test_vlanfilt_perfect, 1808 }, { 1809 .name = "Double VLAN Filter ", 1810 .lb = STMMAC_LOOPBACK_PHY, 1811 .fn = stmmac_test_dvlanfilt, 1812 }, { 1813 .name = "Double VLAN Filter (perf) ", 1814 .lb = STMMAC_LOOPBACK_PHY, 1815 .fn = stmmac_test_dvlanfilt_perfect, 1816 }, { 1817 .name = "Flexible RX Parser ", 1818 .lb = STMMAC_LOOPBACK_PHY, 1819 .fn = stmmac_test_rxp, 1820 }, { 1821 .name = "SA Insertion (desc) ", 1822 .lb = STMMAC_LOOPBACK_PHY, 1823 .fn = stmmac_test_desc_sai, 1824 }, { 1825 .name = "SA Replacement (desc) ", 1826 .lb = STMMAC_LOOPBACK_PHY, 1827 .fn = stmmac_test_desc_sar, 1828 }, { 1829 .name = "SA Insertion (reg) ", 1830 .lb = STMMAC_LOOPBACK_PHY, 1831 .fn = stmmac_test_reg_sai, 1832 }, { 1833 .name = "SA Replacement (reg) ", 1834 .lb = STMMAC_LOOPBACK_PHY, 1835 .fn = stmmac_test_reg_sar, 1836 }, { 1837 .name = "VLAN TX Insertion ", 1838 .lb = STMMAC_LOOPBACK_PHY, 1839 .fn = stmmac_test_vlanoff, 1840 }, { 1841 .name = "SVLAN TX Insertion ", 1842 .lb = STMMAC_LOOPBACK_PHY, 1843 .fn = stmmac_test_svlanoff, 1844 }, { 1845 .name = "L3 DA Filtering ", 1846 .lb = STMMAC_LOOPBACK_PHY, 1847 .fn = stmmac_test_l3filt_da, 1848 }, { 1849 .name = "L3 SA Filtering ", 1850 .lb = STMMAC_LOOPBACK_PHY, 1851 .fn = stmmac_test_l3filt_sa, 1852 }, { 1853 .name = "L4 DA TCP Filtering ", 1854 .lb = STMMAC_LOOPBACK_PHY, 1855 .fn = stmmac_test_l4filt_da_tcp, 1856 }, { 1857 .name = "L4 SA TCP Filtering ", 1858 .lb = STMMAC_LOOPBACK_PHY, 1859 .fn = stmmac_test_l4filt_sa_tcp, 1860 }, { 1861 .name = "L4 DA UDP Filtering ", 1862 .lb = STMMAC_LOOPBACK_PHY, 1863 .fn = stmmac_test_l4filt_da_udp, 1864 }, { 1865 .name = "L4 SA UDP Filtering ", 1866 .lb = STMMAC_LOOPBACK_PHY, 1867 .fn = stmmac_test_l4filt_sa_udp, 1868 }, { 1869 .name = "ARP Offload ", 1870 .lb = STMMAC_LOOPBACK_PHY, 1871 .fn = stmmac_test_arpoffload, 1872 }, { 1873 .name = "Jumbo Frame ", 1874 .lb = STMMAC_LOOPBACK_PHY, 1875 .fn = stmmac_test_jumbo, 1876 }, { 1877 .name = "Multichannel Jumbo ", 1878 .lb = STMMAC_LOOPBACK_PHY, 1879 .fn = stmmac_test_mjumbo, 1880 }, { 1881 .name = "Split Header ", 1882 .lb = STMMAC_LOOPBACK_PHY, 1883 .fn = stmmac_test_sph, 1884 }, 1885 }; 1886 1887 void stmmac_selftest_run(struct net_device *dev, 1888 struct ethtool_test *etest, u64 *buf) 1889 { 1890 struct stmmac_priv *priv = netdev_priv(dev); 1891 int count = stmmac_selftest_get_count(priv); 1892 int carrier = netif_carrier_ok(dev); 1893 int i, ret; 1894 1895 memset(buf, 0, sizeof(*buf) * count); 1896 stmmac_test_next_id = 0; 1897 1898 if (etest->flags != ETH_TEST_FL_OFFLINE) { 1899 netdev_err(priv->dev, "Only offline tests are supported\n"); 1900 etest->flags |= ETH_TEST_FL_FAILED; 1901 return; 1902 } else if (!carrier) { 1903 netdev_err(priv->dev, "You need valid Link to execute tests\n"); 1904 etest->flags |= ETH_TEST_FL_FAILED; 1905 return; 1906 } 1907 1908 /* We don't want extra traffic */ 1909 netif_carrier_off(dev); 1910 1911 /* Wait for queues drain */ 1912 msleep(200); 1913 1914 for (i = 0; i < count; i++) { 1915 ret = 0; 1916 1917 switch (stmmac_selftests[i].lb) { 1918 case STMMAC_LOOPBACK_PHY: 1919 ret = -EOPNOTSUPP; 1920 if (dev->phydev) 1921 ret = phy_loopback(dev->phydev, true); 1922 if (!ret) 1923 break; 1924 /* Fallthrough */ 1925 case STMMAC_LOOPBACK_MAC: 1926 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); 1927 break; 1928 case STMMAC_LOOPBACK_NONE: 1929 break; 1930 default: 1931 ret = -EOPNOTSUPP; 1932 break; 1933 } 1934 1935 /* 1936 * First tests will always be MAC / PHY loobpack. If any of 1937 * them is not supported we abort earlier. 1938 */ 1939 if (ret) { 1940 netdev_err(priv->dev, "Loopback is not supported\n"); 1941 etest->flags |= ETH_TEST_FL_FAILED; 1942 break; 1943 } 1944 1945 ret = stmmac_selftests[i].fn(priv); 1946 if (ret && (ret != -EOPNOTSUPP)) 1947 etest->flags |= ETH_TEST_FL_FAILED; 1948 buf[i] = ret; 1949 1950 switch (stmmac_selftests[i].lb) { 1951 case STMMAC_LOOPBACK_PHY: 1952 ret = -EOPNOTSUPP; 1953 if (dev->phydev) 1954 ret = phy_loopback(dev->phydev, false); 1955 if (!ret) 1956 break; 1957 /* Fallthrough */ 1958 case STMMAC_LOOPBACK_MAC: 1959 stmmac_set_mac_loopback(priv, priv->ioaddr, false); 1960 break; 1961 default: 1962 break; 1963 } 1964 } 1965 1966 /* Restart everything */ 1967 if (carrier) 1968 netif_carrier_on(dev); 1969 } 1970 1971 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data) 1972 { 1973 u8 *p = data; 1974 int i; 1975 1976 for (i = 0; i < stmmac_selftest_get_count(priv); i++) { 1977 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1, 1978 stmmac_selftests[i].name); 1979 p += ETH_GSTRING_LEN; 1980 } 1981 } 1982 1983 int stmmac_selftest_get_count(struct stmmac_priv *priv) 1984 { 1985 return ARRAY_SIZE(stmmac_selftests); 1986 } 1987