1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates. 4 * stmmac Selftests Support 5 * 6 * Author: Jose Abreu <joabreu@synopsys.com> 7 */ 8 9 #include <linux/bitrev.h> 10 #include <linux/completion.h> 11 #include <linux/crc32.h> 12 #include <linux/ethtool.h> 13 #include <linux/ip.h> 14 #include <linux/phy.h> 15 #include <linux/udp.h> 16 #include <net/pkt_cls.h> 17 #include <net/tcp.h> 18 #include <net/udp.h> 19 #include <net/tc_act/tc_gact.h> 20 #include "stmmac.h" 21 22 struct stmmachdr { 23 __be32 version; 24 __be64 magic; 25 u8 id; 26 } __packed; 27 28 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ 29 sizeof(struct stmmachdr)) 30 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL 31 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200) 32 33 struct stmmac_packet_attrs { 34 int vlan; 35 int vlan_id_in; 36 int vlan_id_out; 37 unsigned char *src; 38 unsigned char *dst; 39 u32 ip_src; 40 u32 ip_dst; 41 int tcp; 42 int sport; 43 int dport; 44 u32 exp_hash; 45 int dont_wait; 46 int timeout; 47 int size; 48 int max_size; 49 int remove_sa; 50 u8 id; 51 int sarc; 52 u16 queue_mapping; 53 }; 54 55 static u8 stmmac_test_next_id; 56 57 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, 58 struct stmmac_packet_attrs *attr) 59 { 60 struct sk_buff *skb = NULL; 61 struct udphdr *uhdr = NULL; 62 struct tcphdr *thdr = NULL; 63 struct stmmachdr *shdr; 64 struct ethhdr *ehdr; 65 struct iphdr *ihdr; 66 int iplen, size; 67 68 size = attr->size + STMMAC_TEST_PKT_SIZE; 69 if (attr->vlan) { 70 size += 4; 71 if (attr->vlan > 1) 72 size += 4; 73 } 74 75 if (attr->tcp) 76 size += sizeof(struct tcphdr); 77 else 78 size += sizeof(struct udphdr); 79 80 if (attr->max_size && (attr->max_size > size)) 81 size = attr->max_size; 82 83 skb = netdev_alloc_skb_ip_align(priv->dev, size); 84 if (!skb) 85 return NULL; 86 87 prefetchw(skb->data); 88 89 if (attr->vlan > 1) 90 ehdr = skb_push(skb, ETH_HLEN + 8); 91 else if (attr->vlan) 92 ehdr = skb_push(skb, ETH_HLEN + 4); 93 else if (attr->remove_sa) 94 ehdr = skb_push(skb, ETH_HLEN - 6); 95 else 96 ehdr = skb_push(skb, ETH_HLEN); 97 skb_reset_mac_header(skb); 98 99 skb_set_network_header(skb, skb->len); 100 ihdr = skb_put(skb, sizeof(*ihdr)); 101 102 skb_set_transport_header(skb, skb->len); 103 if (attr->tcp) 104 thdr = skb_put(skb, sizeof(*thdr)); 105 else 106 uhdr = skb_put(skb, sizeof(*uhdr)); 107 108 if (!attr->remove_sa) 109 eth_zero_addr(ehdr->h_source); 110 eth_zero_addr(ehdr->h_dest); 111 if (attr->src && !attr->remove_sa) 112 ether_addr_copy(ehdr->h_source, attr->src); 113 if (attr->dst) 114 ether_addr_copy(ehdr->h_dest, attr->dst); 115 116 if (!attr->remove_sa) { 117 ehdr->h_proto = htons(ETH_P_IP); 118 } else { 119 __be16 *ptr = (__be16 *)ehdr; 120 121 /* HACK */ 122 ptr[3] = htons(ETH_P_IP); 123 } 124 125 if (attr->vlan) { 126 __be16 *tag, *proto; 127 128 if (!attr->remove_sa) { 129 tag = (void *)ehdr + ETH_HLEN; 130 proto = (void *)ehdr + (2 * ETH_ALEN); 131 } else { 132 tag = (void *)ehdr + ETH_HLEN - 6; 133 proto = (void *)ehdr + ETH_ALEN; 134 } 135 136 proto[0] = htons(ETH_P_8021Q); 137 tag[0] = htons(attr->vlan_id_out); 138 tag[1] = htons(ETH_P_IP); 139 if (attr->vlan > 1) { 140 proto[0] = htons(ETH_P_8021AD); 141 tag[1] = htons(ETH_P_8021Q); 142 tag[2] = htons(attr->vlan_id_in); 143 tag[3] = htons(ETH_P_IP); 144 } 145 } 146 147 if (attr->tcp) { 148 thdr->source = htons(attr->sport); 149 thdr->dest = htons(attr->dport); 150 thdr->doff = sizeof(struct tcphdr) / 4; 151 thdr->check = 0; 152 } else { 153 uhdr->source = htons(attr->sport); 154 uhdr->dest = htons(attr->dport); 155 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); 156 if (attr->max_size) 157 uhdr->len = htons(attr->max_size - 158 (sizeof(*ihdr) + sizeof(*ehdr))); 159 uhdr->check = 0; 160 } 161 162 ihdr->ihl = 5; 163 ihdr->ttl = 32; 164 ihdr->version = 4; 165 if (attr->tcp) 166 ihdr->protocol = IPPROTO_TCP; 167 else 168 ihdr->protocol = IPPROTO_UDP; 169 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size; 170 if (attr->tcp) 171 iplen += sizeof(*thdr); 172 else 173 iplen += sizeof(*uhdr); 174 175 if (attr->max_size) 176 iplen = attr->max_size - sizeof(*ehdr); 177 178 ihdr->tot_len = htons(iplen); 179 ihdr->frag_off = 0; 180 ihdr->saddr = htonl(attr->ip_src); 181 ihdr->daddr = htonl(attr->ip_dst); 182 ihdr->tos = 0; 183 ihdr->id = 0; 184 ip_send_check(ihdr); 185 186 shdr = skb_put(skb, sizeof(*shdr)); 187 shdr->version = 0; 188 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC); 189 attr->id = stmmac_test_next_id; 190 shdr->id = stmmac_test_next_id++; 191 192 if (attr->size) 193 skb_put(skb, attr->size); 194 if (attr->max_size && (attr->max_size > skb->len)) 195 skb_put(skb, attr->max_size - skb->len); 196 197 skb->csum = 0; 198 skb->ip_summed = CHECKSUM_PARTIAL; 199 if (attr->tcp) { 200 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0); 201 skb->csum_start = skb_transport_header(skb) - skb->head; 202 skb->csum_offset = offsetof(struct tcphdr, check); 203 } else { 204 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr); 205 } 206 207 skb->protocol = htons(ETH_P_IP); 208 skb->pkt_type = PACKET_HOST; 209 skb->dev = priv->dev; 210 211 return skb; 212 } 213 214 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv, 215 struct stmmac_packet_attrs *attr) 216 { 217 __be32 ip_src = htonl(attr->ip_src); 218 __be32 ip_dst = htonl(attr->ip_dst); 219 struct sk_buff *skb = NULL; 220 221 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src, 222 NULL, attr->src, attr->dst); 223 if (!skb) 224 return NULL; 225 226 skb->pkt_type = PACKET_HOST; 227 skb->dev = priv->dev; 228 229 return skb; 230 } 231 232 struct stmmac_test_priv { 233 struct stmmac_packet_attrs *packet; 234 struct packet_type pt; 235 struct completion comp; 236 int double_vlan; 237 int vlan_id; 238 int ok; 239 }; 240 241 static int stmmac_test_loopback_validate(struct sk_buff *skb, 242 struct net_device *ndev, 243 struct packet_type *pt, 244 struct net_device *orig_ndev) 245 { 246 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 247 struct stmmachdr *shdr; 248 struct ethhdr *ehdr; 249 struct udphdr *uhdr; 250 struct tcphdr *thdr; 251 struct iphdr *ihdr; 252 253 skb = skb_unshare(skb, GFP_ATOMIC); 254 if (!skb) 255 goto out; 256 257 if (skb_linearize(skb)) 258 goto out; 259 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 260 goto out; 261 262 ehdr = (struct ethhdr *)skb_mac_header(skb); 263 if (tpriv->packet->dst) { 264 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 265 goto out; 266 } 267 if (tpriv->packet->sarc) { 268 if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) 269 goto out; 270 } else if (tpriv->packet->src) { 271 if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) 272 goto out; 273 } 274 275 ihdr = ip_hdr(skb); 276 if (tpriv->double_vlan) 277 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 278 279 if (tpriv->packet->tcp) { 280 if (ihdr->protocol != IPPROTO_TCP) 281 goto out; 282 283 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 284 if (thdr->dest != htons(tpriv->packet->dport)) 285 goto out; 286 287 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr)); 288 } else { 289 if (ihdr->protocol != IPPROTO_UDP) 290 goto out; 291 292 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 293 if (uhdr->dest != htons(tpriv->packet->dport)) 294 goto out; 295 296 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 297 } 298 299 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 300 goto out; 301 if (tpriv->packet->exp_hash && !skb->hash) 302 goto out; 303 if (tpriv->packet->id != shdr->id) 304 goto out; 305 306 tpriv->ok = true; 307 complete(&tpriv->comp); 308 out: 309 kfree_skb(skb); 310 return 0; 311 } 312 313 static int __stmmac_test_loopback(struct stmmac_priv *priv, 314 struct stmmac_packet_attrs *attr) 315 { 316 struct stmmac_test_priv *tpriv; 317 struct sk_buff *skb = NULL; 318 int ret = 0; 319 320 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 321 if (!tpriv) 322 return -ENOMEM; 323 324 tpriv->ok = false; 325 init_completion(&tpriv->comp); 326 327 tpriv->pt.type = htons(ETH_P_IP); 328 tpriv->pt.func = stmmac_test_loopback_validate; 329 tpriv->pt.dev = priv->dev; 330 tpriv->pt.af_packet_priv = tpriv; 331 tpriv->packet = attr; 332 333 if (!attr->dont_wait) 334 dev_add_pack(&tpriv->pt); 335 336 skb = stmmac_test_get_udp_skb(priv, attr); 337 if (!skb) { 338 ret = -ENOMEM; 339 goto cleanup; 340 } 341 342 skb_set_queue_mapping(skb, attr->queue_mapping); 343 ret = dev_queue_xmit(skb); 344 if (ret) 345 goto cleanup; 346 347 if (attr->dont_wait) 348 goto cleanup; 349 350 if (!attr->timeout) 351 attr->timeout = STMMAC_LB_TIMEOUT; 352 353 wait_for_completion_timeout(&tpriv->comp, attr->timeout); 354 ret = tpriv->ok ? 0 : -ETIMEDOUT; 355 356 cleanup: 357 if (!attr->dont_wait) 358 dev_remove_pack(&tpriv->pt); 359 kfree(tpriv); 360 return ret; 361 } 362 363 static int stmmac_test_mac_loopback(struct stmmac_priv *priv) 364 { 365 struct stmmac_packet_attrs attr = { }; 366 367 attr.dst = priv->dev->dev_addr; 368 return __stmmac_test_loopback(priv, &attr); 369 } 370 371 static int stmmac_test_phy_loopback(struct stmmac_priv *priv) 372 { 373 struct stmmac_packet_attrs attr = { }; 374 int ret; 375 376 if (!priv->dev->phydev) 377 return -EBUSY; 378 379 ret = phy_loopback(priv->dev->phydev, true); 380 if (ret) 381 return ret; 382 383 attr.dst = priv->dev->dev_addr; 384 ret = __stmmac_test_loopback(priv, &attr); 385 386 phy_loopback(priv->dev->phydev, false); 387 return ret; 388 } 389 390 static int stmmac_test_mmc(struct stmmac_priv *priv) 391 { 392 struct stmmac_counters initial, final; 393 int ret; 394 395 memset(&initial, 0, sizeof(initial)); 396 memset(&final, 0, sizeof(final)); 397 398 if (!priv->dma_cap.rmon) 399 return -EOPNOTSUPP; 400 401 /* Save previous results into internal struct */ 402 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); 403 404 ret = stmmac_test_mac_loopback(priv); 405 if (ret) 406 return ret; 407 408 /* These will be loopback results so no need to save them */ 409 stmmac_mmc_read(priv, priv->mmcaddr, &final); 410 411 /* 412 * The number of MMC counters available depends on HW configuration 413 * so we just use this one to validate the feature. I hope there is 414 * not a version without this counter. 415 */ 416 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g) 417 return -EINVAL; 418 419 return 0; 420 } 421 422 static int stmmac_test_eee(struct stmmac_priv *priv) 423 { 424 struct stmmac_extra_stats *initial, *final; 425 int retries = 10; 426 int ret; 427 428 if (!priv->dma_cap.eee || !priv->eee_active) 429 return -EOPNOTSUPP; 430 431 initial = kzalloc(sizeof(*initial), GFP_KERNEL); 432 if (!initial) 433 return -ENOMEM; 434 435 final = kzalloc(sizeof(*final), GFP_KERNEL); 436 if (!final) { 437 ret = -ENOMEM; 438 goto out_free_initial; 439 } 440 441 memcpy(initial, &priv->xstats, sizeof(*initial)); 442 443 ret = stmmac_test_mac_loopback(priv); 444 if (ret) 445 goto out_free_final; 446 447 /* We have no traffic in the line so, sooner or later it will go LPI */ 448 while (--retries) { 449 memcpy(final, &priv->xstats, sizeof(*final)); 450 451 if (final->irq_tx_path_in_lpi_mode_n > 452 initial->irq_tx_path_in_lpi_mode_n) 453 break; 454 msleep(100); 455 } 456 457 if (!retries) { 458 ret = -ETIMEDOUT; 459 goto out_free_final; 460 } 461 462 if (final->irq_tx_path_in_lpi_mode_n <= 463 initial->irq_tx_path_in_lpi_mode_n) { 464 ret = -EINVAL; 465 goto out_free_final; 466 } 467 468 if (final->irq_tx_path_exit_lpi_mode_n <= 469 initial->irq_tx_path_exit_lpi_mode_n) { 470 ret = -EINVAL; 471 goto out_free_final; 472 } 473 474 out_free_final: 475 kfree(final); 476 out_free_initial: 477 kfree(initial); 478 return ret; 479 } 480 481 static int stmmac_filter_check(struct stmmac_priv *priv) 482 { 483 if (!(priv->dev->flags & IFF_PROMISC)) 484 return 0; 485 486 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n"); 487 return -EOPNOTSUPP; 488 } 489 490 static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr) 491 { 492 int mc_offset = 32 - priv->hw->mcast_bits_log2; 493 struct netdev_hw_addr *ha; 494 u32 hash, hash_nr; 495 496 /* First compute the hash for desired addr */ 497 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset; 498 hash_nr = hash >> 5; 499 hash = 1 << (hash & 0x1f); 500 501 /* Now, check if it collides with any existing one */ 502 netdev_for_each_mc_addr(ha, priv->dev) { 503 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset; 504 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash)) 505 return false; 506 } 507 508 /* No collisions, address is good to go */ 509 return true; 510 } 511 512 static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr) 513 { 514 struct netdev_hw_addr *ha; 515 516 /* Check if it collides with any existing one */ 517 netdev_for_each_uc_addr(ha, priv->dev) { 518 if (!memcmp(ha->addr, addr, ETH_ALEN)) 519 return false; 520 } 521 522 /* No collisions, address is good to go */ 523 return true; 524 } 525 526 static int stmmac_test_hfilt(struct stmmac_priv *priv) 527 { 528 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; 529 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 530 struct stmmac_packet_attrs attr = { }; 531 int ret, tries = 256; 532 533 ret = stmmac_filter_check(priv); 534 if (ret) 535 return ret; 536 537 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 538 return -EOPNOTSUPP; 539 540 while (--tries) { 541 /* We only need to check the bd_addr for collisions */ 542 bd_addr[ETH_ALEN - 1] = tries; 543 if (stmmac_hash_check(priv, bd_addr)) 544 break; 545 } 546 547 if (!tries) 548 return -EOPNOTSUPP; 549 550 ret = dev_mc_add(priv->dev, gd_addr); 551 if (ret) 552 return ret; 553 554 attr.dst = gd_addr; 555 556 /* Shall receive packet */ 557 ret = __stmmac_test_loopback(priv, &attr); 558 if (ret) 559 goto cleanup; 560 561 attr.dst = bd_addr; 562 563 /* Shall NOT receive packet */ 564 ret = __stmmac_test_loopback(priv, &attr); 565 ret = ret ? 0 : -EINVAL; 566 567 cleanup: 568 dev_mc_del(priv->dev, gd_addr); 569 return ret; 570 } 571 572 static int stmmac_test_pfilt(struct stmmac_priv *priv) 573 { 574 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77}; 575 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 576 struct stmmac_packet_attrs attr = { }; 577 int ret, tries = 256; 578 579 if (stmmac_filter_check(priv)) 580 return -EOPNOTSUPP; 581 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 582 return -EOPNOTSUPP; 583 584 while (--tries) { 585 /* We only need to check the bd_addr for collisions */ 586 bd_addr[ETH_ALEN - 1] = tries; 587 if (stmmac_perfect_check(priv, bd_addr)) 588 break; 589 } 590 591 if (!tries) 592 return -EOPNOTSUPP; 593 594 ret = dev_uc_add(priv->dev, gd_addr); 595 if (ret) 596 return ret; 597 598 attr.dst = gd_addr; 599 600 /* Shall receive packet */ 601 ret = __stmmac_test_loopback(priv, &attr); 602 if (ret) 603 goto cleanup; 604 605 attr.dst = bd_addr; 606 607 /* Shall NOT receive packet */ 608 ret = __stmmac_test_loopback(priv, &attr); 609 ret = ret ? 0 : -EINVAL; 610 611 cleanup: 612 dev_uc_del(priv->dev, gd_addr); 613 return ret; 614 } 615 616 static int stmmac_test_mcfilt(struct stmmac_priv *priv) 617 { 618 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 619 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 620 struct stmmac_packet_attrs attr = { }; 621 int ret, tries = 256; 622 623 if (stmmac_filter_check(priv)) 624 return -EOPNOTSUPP; 625 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 626 return -EOPNOTSUPP; 627 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 628 return -EOPNOTSUPP; 629 630 while (--tries) { 631 /* We only need to check the mc_addr for collisions */ 632 mc_addr[ETH_ALEN - 1] = tries; 633 if (stmmac_hash_check(priv, mc_addr)) 634 break; 635 } 636 637 if (!tries) 638 return -EOPNOTSUPP; 639 640 ret = dev_uc_add(priv->dev, uc_addr); 641 if (ret) 642 return ret; 643 644 attr.dst = uc_addr; 645 646 /* Shall receive packet */ 647 ret = __stmmac_test_loopback(priv, &attr); 648 if (ret) 649 goto cleanup; 650 651 attr.dst = mc_addr; 652 653 /* Shall NOT receive packet */ 654 ret = __stmmac_test_loopback(priv, &attr); 655 ret = ret ? 0 : -EINVAL; 656 657 cleanup: 658 dev_uc_del(priv->dev, uc_addr); 659 return ret; 660 } 661 662 static int stmmac_test_ucfilt(struct stmmac_priv *priv) 663 { 664 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; 665 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; 666 struct stmmac_packet_attrs attr = { }; 667 int ret, tries = 256; 668 669 if (stmmac_filter_check(priv)) 670 return -EOPNOTSUPP; 671 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) 672 return -EOPNOTSUPP; 673 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 674 return -EOPNOTSUPP; 675 676 while (--tries) { 677 /* We only need to check the uc_addr for collisions */ 678 uc_addr[ETH_ALEN - 1] = tries; 679 if (stmmac_perfect_check(priv, uc_addr)) 680 break; 681 } 682 683 if (!tries) 684 return -EOPNOTSUPP; 685 686 ret = dev_mc_add(priv->dev, mc_addr); 687 if (ret) 688 return ret; 689 690 attr.dst = mc_addr; 691 692 /* Shall receive packet */ 693 ret = __stmmac_test_loopback(priv, &attr); 694 if (ret) 695 goto cleanup; 696 697 attr.dst = uc_addr; 698 699 /* Shall NOT receive packet */ 700 ret = __stmmac_test_loopback(priv, &attr); 701 ret = ret ? 0 : -EINVAL; 702 703 cleanup: 704 dev_mc_del(priv->dev, mc_addr); 705 return ret; 706 } 707 708 static int stmmac_test_flowctrl_validate(struct sk_buff *skb, 709 struct net_device *ndev, 710 struct packet_type *pt, 711 struct net_device *orig_ndev) 712 { 713 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 714 struct ethhdr *ehdr; 715 716 ehdr = (struct ethhdr *)skb_mac_header(skb); 717 if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) 718 goto out; 719 if (ehdr->h_proto != htons(ETH_P_PAUSE)) 720 goto out; 721 722 tpriv->ok = true; 723 complete(&tpriv->comp); 724 out: 725 kfree_skb(skb); 726 return 0; 727 } 728 729 static int stmmac_test_flowctrl(struct stmmac_priv *priv) 730 { 731 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01}; 732 struct phy_device *phydev = priv->dev->phydev; 733 u32 rx_cnt = priv->plat->rx_queues_to_use; 734 struct stmmac_test_priv *tpriv; 735 unsigned int pkt_count; 736 int i, ret = 0; 737 738 if (!phydev || (!phydev->pause && !phydev->asym_pause)) 739 return -EOPNOTSUPP; 740 741 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 742 if (!tpriv) 743 return -ENOMEM; 744 745 tpriv->ok = false; 746 init_completion(&tpriv->comp); 747 tpriv->pt.type = htons(ETH_P_PAUSE); 748 tpriv->pt.func = stmmac_test_flowctrl_validate; 749 tpriv->pt.dev = priv->dev; 750 tpriv->pt.af_packet_priv = tpriv; 751 dev_add_pack(&tpriv->pt); 752 753 /* Compute minimum number of packets to make FIFO full */ 754 pkt_count = priv->plat->rx_fifo_size; 755 if (!pkt_count) 756 pkt_count = priv->dma_cap.rx_fifo_size; 757 pkt_count /= 1400; 758 pkt_count *= 2; 759 760 for (i = 0; i < rx_cnt; i++) 761 stmmac_stop_rx(priv, priv->ioaddr, i); 762 763 ret = dev_set_promiscuity(priv->dev, 1); 764 if (ret) 765 goto cleanup; 766 767 ret = dev_mc_add(priv->dev, paddr); 768 if (ret) 769 goto cleanup; 770 771 for (i = 0; i < pkt_count; i++) { 772 struct stmmac_packet_attrs attr = { }; 773 774 attr.dst = priv->dev->dev_addr; 775 attr.dont_wait = true; 776 attr.size = 1400; 777 778 ret = __stmmac_test_loopback(priv, &attr); 779 if (ret) 780 goto cleanup; 781 if (tpriv->ok) 782 break; 783 } 784 785 /* Wait for some time in case RX Watchdog is enabled */ 786 msleep(200); 787 788 for (i = 0; i < rx_cnt; i++) { 789 struct stmmac_channel *ch = &priv->channel[i]; 790 u32 tail; 791 792 tail = priv->rx_queue[i].dma_rx_phy + 793 (DMA_RX_SIZE * sizeof(struct dma_desc)); 794 795 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); 796 stmmac_start_rx(priv, priv->ioaddr, i); 797 798 local_bh_disable(); 799 napi_reschedule(&ch->rx_napi); 800 local_bh_enable(); 801 } 802 803 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 804 ret = tpriv->ok ? 0 : -ETIMEDOUT; 805 806 cleanup: 807 dev_mc_del(priv->dev, paddr); 808 dev_set_promiscuity(priv->dev, -1); 809 dev_remove_pack(&tpriv->pt); 810 kfree(tpriv); 811 return ret; 812 } 813 814 static int stmmac_test_rss(struct stmmac_priv *priv) 815 { 816 struct stmmac_packet_attrs attr = { }; 817 818 if (!priv->dma_cap.rssen || !priv->rss.enable) 819 return -EOPNOTSUPP; 820 821 attr.dst = priv->dev->dev_addr; 822 attr.exp_hash = true; 823 attr.sport = 0x321; 824 attr.dport = 0x123; 825 826 return __stmmac_test_loopback(priv, &attr); 827 } 828 829 static int stmmac_test_vlan_validate(struct sk_buff *skb, 830 struct net_device *ndev, 831 struct packet_type *pt, 832 struct net_device *orig_ndev) 833 { 834 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 835 struct stmmachdr *shdr; 836 struct ethhdr *ehdr; 837 struct udphdr *uhdr; 838 struct iphdr *ihdr; 839 u16 proto; 840 841 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q; 842 843 skb = skb_unshare(skb, GFP_ATOMIC); 844 if (!skb) 845 goto out; 846 847 if (skb_linearize(skb)) 848 goto out; 849 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 850 goto out; 851 if (tpriv->vlan_id) { 852 if (skb->vlan_proto != htons(proto)) 853 goto out; 854 if (skb->vlan_tci != tpriv->vlan_id) 855 goto out; 856 } 857 858 ehdr = (struct ethhdr *)skb_mac_header(skb); 859 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 860 goto out; 861 862 ihdr = ip_hdr(skb); 863 if (tpriv->double_vlan) 864 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 865 if (ihdr->protocol != IPPROTO_UDP) 866 goto out; 867 868 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 869 if (uhdr->dest != htons(tpriv->packet->dport)) 870 goto out; 871 872 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 873 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 874 goto out; 875 876 tpriv->ok = true; 877 complete(&tpriv->comp); 878 879 out: 880 kfree_skb(skb); 881 return 0; 882 } 883 884 static int __stmmac_test_vlanfilt(struct stmmac_priv *priv) 885 { 886 struct stmmac_packet_attrs attr = { }; 887 struct stmmac_test_priv *tpriv; 888 struct sk_buff *skb = NULL; 889 int ret = 0, i; 890 891 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 892 if (!tpriv) 893 return -ENOMEM; 894 895 tpriv->ok = false; 896 init_completion(&tpriv->comp); 897 898 tpriv->pt.type = htons(ETH_P_IP); 899 tpriv->pt.func = stmmac_test_vlan_validate; 900 tpriv->pt.dev = priv->dev; 901 tpriv->pt.af_packet_priv = tpriv; 902 tpriv->packet = &attr; 903 904 /* 905 * As we use HASH filtering, false positives may appear. This is a 906 * specially chosen ID so that adjacent IDs (+4) have different 907 * HASH values. 908 */ 909 tpriv->vlan_id = 0x123; 910 dev_add_pack(&tpriv->pt); 911 912 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 913 if (ret) 914 goto cleanup; 915 916 for (i = 0; i < 4; i++) { 917 attr.vlan = 1; 918 attr.vlan_id_out = tpriv->vlan_id + i; 919 attr.dst = priv->dev->dev_addr; 920 attr.sport = 9; 921 attr.dport = 9; 922 923 skb = stmmac_test_get_udp_skb(priv, &attr); 924 if (!skb) { 925 ret = -ENOMEM; 926 goto vlan_del; 927 } 928 929 skb_set_queue_mapping(skb, 0); 930 ret = dev_queue_xmit(skb); 931 if (ret) 932 goto vlan_del; 933 934 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 935 ret = tpriv->ok ? 0 : -ETIMEDOUT; 936 if (ret && !i) { 937 goto vlan_del; 938 } else if (!ret && i) { 939 ret = -EINVAL; 940 goto vlan_del; 941 } else { 942 ret = 0; 943 } 944 945 tpriv->ok = false; 946 } 947 948 vlan_del: 949 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 950 cleanup: 951 dev_remove_pack(&tpriv->pt); 952 kfree(tpriv); 953 return ret; 954 } 955 956 static int stmmac_test_vlanfilt(struct stmmac_priv *priv) 957 { 958 if (!priv->dma_cap.vlhash) 959 return -EOPNOTSUPP; 960 961 return __stmmac_test_vlanfilt(priv); 962 } 963 964 static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv) 965 { 966 int ret, prev_cap = priv->dma_cap.vlhash; 967 968 priv->dma_cap.vlhash = 0; 969 ret = __stmmac_test_vlanfilt(priv); 970 priv->dma_cap.vlhash = prev_cap; 971 972 return ret; 973 } 974 975 static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv) 976 { 977 struct stmmac_packet_attrs attr = { }; 978 struct stmmac_test_priv *tpriv; 979 struct sk_buff *skb = NULL; 980 int ret = 0, i; 981 982 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 983 if (!tpriv) 984 return -ENOMEM; 985 986 tpriv->ok = false; 987 tpriv->double_vlan = true; 988 init_completion(&tpriv->comp); 989 990 tpriv->pt.type = htons(ETH_P_8021Q); 991 tpriv->pt.func = stmmac_test_vlan_validate; 992 tpriv->pt.dev = priv->dev; 993 tpriv->pt.af_packet_priv = tpriv; 994 tpriv->packet = &attr; 995 996 /* 997 * As we use HASH filtering, false positives may appear. This is a 998 * specially chosen ID so that adjacent IDs (+4) have different 999 * HASH values. 1000 */ 1001 tpriv->vlan_id = 0x123; 1002 dev_add_pack(&tpriv->pt); 1003 1004 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 1005 if (ret) 1006 goto cleanup; 1007 1008 for (i = 0; i < 4; i++) { 1009 attr.vlan = 2; 1010 attr.vlan_id_out = tpriv->vlan_id + i; 1011 attr.dst = priv->dev->dev_addr; 1012 attr.sport = 9; 1013 attr.dport = 9; 1014 1015 skb = stmmac_test_get_udp_skb(priv, &attr); 1016 if (!skb) { 1017 ret = -ENOMEM; 1018 goto vlan_del; 1019 } 1020 1021 skb_set_queue_mapping(skb, 0); 1022 ret = dev_queue_xmit(skb); 1023 if (ret) 1024 goto vlan_del; 1025 1026 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1027 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1028 if (ret && !i) { 1029 goto vlan_del; 1030 } else if (!ret && i) { 1031 ret = -EINVAL; 1032 goto vlan_del; 1033 } else { 1034 ret = 0; 1035 } 1036 1037 tpriv->ok = false; 1038 } 1039 1040 vlan_del: 1041 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 1042 cleanup: 1043 dev_remove_pack(&tpriv->pt); 1044 kfree(tpriv); 1045 return ret; 1046 } 1047 1048 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv) 1049 { 1050 if (!priv->dma_cap.vlhash) 1051 return -EOPNOTSUPP; 1052 1053 return __stmmac_test_dvlanfilt(priv); 1054 } 1055 1056 static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv) 1057 { 1058 int ret, prev_cap = priv->dma_cap.vlhash; 1059 1060 priv->dma_cap.vlhash = 0; 1061 ret = __stmmac_test_dvlanfilt(priv); 1062 priv->dma_cap.vlhash = prev_cap; 1063 1064 return ret; 1065 } 1066 1067 #ifdef CONFIG_NET_CLS_ACT 1068 static int stmmac_test_rxp(struct stmmac_priv *priv) 1069 { 1070 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; 1071 struct tc_cls_u32_offload cls_u32 = { }; 1072 struct stmmac_packet_attrs attr = { }; 1073 struct tc_action **actions, *act; 1074 struct tc_u32_sel *sel; 1075 struct tcf_exts *exts; 1076 int ret, i, nk = 1; 1077 1078 if (!tc_can_offload(priv->dev)) 1079 return -EOPNOTSUPP; 1080 if (!priv->dma_cap.frpsel) 1081 return -EOPNOTSUPP; 1082 1083 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); 1084 if (!sel) 1085 return -ENOMEM; 1086 1087 exts = kzalloc(sizeof(*exts), GFP_KERNEL); 1088 if (!exts) { 1089 ret = -ENOMEM; 1090 goto cleanup_sel; 1091 } 1092 1093 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); 1094 if (!actions) { 1095 ret = -ENOMEM; 1096 goto cleanup_exts; 1097 } 1098 1099 act = kzalloc(nk * sizeof(*act), GFP_KERNEL); 1100 if (!act) { 1101 ret = -ENOMEM; 1102 goto cleanup_actions; 1103 } 1104 1105 cls_u32.command = TC_CLSU32_NEW_KNODE; 1106 cls_u32.common.chain_index = 0; 1107 cls_u32.common.protocol = htons(ETH_P_ALL); 1108 cls_u32.knode.exts = exts; 1109 cls_u32.knode.sel = sel; 1110 cls_u32.knode.handle = 0x123; 1111 1112 exts->nr_actions = nk; 1113 exts->actions = actions; 1114 for (i = 0; i < nk; i++) { 1115 struct tcf_gact *gact = to_gact(&act[i]); 1116 1117 actions[i] = &act[i]; 1118 gact->tcf_action = TC_ACT_SHOT; 1119 } 1120 1121 sel->nkeys = nk; 1122 sel->offshift = 0; 1123 sel->keys[0].off = 6; 1124 sel->keys[0].val = htonl(0xdeadbeef); 1125 sel->keys[0].mask = ~0x0; 1126 1127 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1128 if (ret) 1129 goto cleanup_act; 1130 1131 attr.dst = priv->dev->dev_addr; 1132 attr.src = addr; 1133 1134 ret = __stmmac_test_loopback(priv, &attr); 1135 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */ 1136 1137 cls_u32.command = TC_CLSU32_DELETE_KNODE; 1138 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1139 1140 cleanup_act: 1141 kfree(act); 1142 cleanup_actions: 1143 kfree(actions); 1144 cleanup_exts: 1145 kfree(exts); 1146 cleanup_sel: 1147 kfree(sel); 1148 return ret; 1149 } 1150 #else 1151 static int stmmac_test_rxp(struct stmmac_priv *priv) 1152 { 1153 return -EOPNOTSUPP; 1154 } 1155 #endif 1156 1157 static int stmmac_test_desc_sai(struct stmmac_priv *priv) 1158 { 1159 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1160 struct stmmac_packet_attrs attr = { }; 1161 int ret; 1162 1163 if (!priv->dma_cap.vlins) 1164 return -EOPNOTSUPP; 1165 1166 attr.remove_sa = true; 1167 attr.sarc = true; 1168 attr.src = src; 1169 attr.dst = priv->dev->dev_addr; 1170 1171 priv->sarc_type = 0x1; 1172 1173 ret = __stmmac_test_loopback(priv, &attr); 1174 1175 priv->sarc_type = 0x0; 1176 return ret; 1177 } 1178 1179 static int stmmac_test_desc_sar(struct stmmac_priv *priv) 1180 { 1181 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1182 struct stmmac_packet_attrs attr = { }; 1183 int ret; 1184 1185 if (!priv->dma_cap.vlins) 1186 return -EOPNOTSUPP; 1187 1188 attr.sarc = true; 1189 attr.src = src; 1190 attr.dst = priv->dev->dev_addr; 1191 1192 priv->sarc_type = 0x2; 1193 1194 ret = __stmmac_test_loopback(priv, &attr); 1195 1196 priv->sarc_type = 0x0; 1197 return ret; 1198 } 1199 1200 static int stmmac_test_reg_sai(struct stmmac_priv *priv) 1201 { 1202 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1203 struct stmmac_packet_attrs attr = { }; 1204 int ret; 1205 1206 if (!priv->dma_cap.vlins) 1207 return -EOPNOTSUPP; 1208 1209 attr.remove_sa = true; 1210 attr.sarc = true; 1211 attr.src = src; 1212 attr.dst = priv->dev->dev_addr; 1213 1214 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2)) 1215 return -EOPNOTSUPP; 1216 1217 ret = __stmmac_test_loopback(priv, &attr); 1218 1219 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1220 return ret; 1221 } 1222 1223 static int stmmac_test_reg_sar(struct stmmac_priv *priv) 1224 { 1225 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1226 struct stmmac_packet_attrs attr = { }; 1227 int ret; 1228 1229 if (!priv->dma_cap.vlins) 1230 return -EOPNOTSUPP; 1231 1232 attr.sarc = true; 1233 attr.src = src; 1234 attr.dst = priv->dev->dev_addr; 1235 1236 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3)) 1237 return -EOPNOTSUPP; 1238 1239 ret = __stmmac_test_loopback(priv, &attr); 1240 1241 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1242 return ret; 1243 } 1244 1245 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan) 1246 { 1247 struct stmmac_packet_attrs attr = { }; 1248 struct stmmac_test_priv *tpriv; 1249 struct sk_buff *skb = NULL; 1250 int ret = 0; 1251 u16 proto; 1252 1253 if (!priv->dma_cap.vlins) 1254 return -EOPNOTSUPP; 1255 1256 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1257 if (!tpriv) 1258 return -ENOMEM; 1259 1260 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q; 1261 1262 tpriv->ok = false; 1263 tpriv->double_vlan = svlan; 1264 init_completion(&tpriv->comp); 1265 1266 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP); 1267 tpriv->pt.func = stmmac_test_vlan_validate; 1268 tpriv->pt.dev = priv->dev; 1269 tpriv->pt.af_packet_priv = tpriv; 1270 tpriv->packet = &attr; 1271 tpriv->vlan_id = 0x123; 1272 dev_add_pack(&tpriv->pt); 1273 1274 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id); 1275 if (ret) 1276 goto cleanup; 1277 1278 attr.dst = priv->dev->dev_addr; 1279 1280 skb = stmmac_test_get_udp_skb(priv, &attr); 1281 if (!skb) { 1282 ret = -ENOMEM; 1283 goto vlan_del; 1284 } 1285 1286 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); 1287 skb->protocol = htons(proto); 1288 1289 skb_set_queue_mapping(skb, 0); 1290 ret = dev_queue_xmit(skb); 1291 if (ret) 1292 goto vlan_del; 1293 1294 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1295 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1296 1297 vlan_del: 1298 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id); 1299 cleanup: 1300 dev_remove_pack(&tpriv->pt); 1301 kfree(tpriv); 1302 return ret; 1303 } 1304 1305 static int stmmac_test_vlanoff(struct stmmac_priv *priv) 1306 { 1307 return stmmac_test_vlanoff_common(priv, false); 1308 } 1309 1310 static int stmmac_test_svlanoff(struct stmmac_priv *priv) 1311 { 1312 if (!priv->dma_cap.dvlan) 1313 return -EOPNOTSUPP; 1314 return stmmac_test_vlanoff_common(priv, true); 1315 } 1316 1317 #ifdef CONFIG_NET_CLS_ACT 1318 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1319 u32 dst_mask, u32 src_mask) 1320 { 1321 struct flow_dissector_key_ipv4_addrs key, mask; 1322 unsigned long dummy_cookie = 0xdeadbeef; 1323 struct stmmac_packet_attrs attr = { }; 1324 struct flow_dissector *dissector; 1325 struct flow_cls_offload *cls; 1326 struct flow_rule *rule; 1327 int ret; 1328 1329 if (!tc_can_offload(priv->dev)) 1330 return -EOPNOTSUPP; 1331 if (!priv->dma_cap.l3l4fnum) 1332 return -EOPNOTSUPP; 1333 if (priv->rss.enable) 1334 stmmac_rss_configure(priv, priv->hw, NULL, 1335 priv->plat->rx_queues_to_use); 1336 1337 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1338 if (!dissector) { 1339 ret = -ENOMEM; 1340 goto cleanup_rss; 1341 } 1342 1343 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); 1344 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; 1345 1346 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1347 if (!cls) { 1348 ret = -ENOMEM; 1349 goto cleanup_dissector; 1350 } 1351 1352 cls->common.chain_index = 0; 1353 cls->command = FLOW_CLS_REPLACE; 1354 cls->cookie = dummy_cookie; 1355 1356 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1357 if (!rule) { 1358 ret = -ENOMEM; 1359 goto cleanup_cls; 1360 } 1361 1362 rule->match.dissector = dissector; 1363 rule->match.key = (void *)&key; 1364 rule->match.mask = (void *)&mask; 1365 1366 key.src = htonl(src); 1367 key.dst = htonl(dst); 1368 mask.src = src_mask; 1369 mask.dst = dst_mask; 1370 1371 cls->rule = rule; 1372 1373 rule->action.entries[0].id = FLOW_ACTION_DROP; 1374 rule->action.num_entries = 1; 1375 1376 attr.dst = priv->dev->dev_addr; 1377 attr.ip_dst = dst; 1378 attr.ip_src = src; 1379 1380 /* Shall receive packet */ 1381 ret = __stmmac_test_loopback(priv, &attr); 1382 if (ret) 1383 goto cleanup_rule; 1384 1385 ret = stmmac_tc_setup_cls(priv, priv, cls); 1386 if (ret) 1387 goto cleanup_rule; 1388 1389 /* Shall NOT receive packet */ 1390 ret = __stmmac_test_loopback(priv, &attr); 1391 ret = ret ? 0 : -EINVAL; 1392 1393 cls->command = FLOW_CLS_DESTROY; 1394 stmmac_tc_setup_cls(priv, priv, cls); 1395 cleanup_rule: 1396 kfree(rule); 1397 cleanup_cls: 1398 kfree(cls); 1399 cleanup_dissector: 1400 kfree(dissector); 1401 cleanup_rss: 1402 if (priv->rss.enable) { 1403 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1404 priv->plat->rx_queues_to_use); 1405 } 1406 1407 return ret; 1408 } 1409 #else 1410 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1411 u32 dst_mask, u32 src_mask) 1412 { 1413 return -EOPNOTSUPP; 1414 } 1415 #endif 1416 1417 static int stmmac_test_l3filt_da(struct stmmac_priv *priv) 1418 { 1419 u32 addr = 0x10203040; 1420 1421 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0); 1422 } 1423 1424 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv) 1425 { 1426 u32 addr = 0x10203040; 1427 1428 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0); 1429 } 1430 1431 #ifdef CONFIG_NET_CLS_ACT 1432 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1433 u32 dst_mask, u32 src_mask, bool udp) 1434 { 1435 struct { 1436 struct flow_dissector_key_basic bkey; 1437 struct flow_dissector_key_ports key; 1438 } __aligned(BITS_PER_LONG / 8) keys; 1439 struct { 1440 struct flow_dissector_key_basic bmask; 1441 struct flow_dissector_key_ports mask; 1442 } __aligned(BITS_PER_LONG / 8) masks; 1443 unsigned long dummy_cookie = 0xdeadbeef; 1444 struct stmmac_packet_attrs attr = { }; 1445 struct flow_dissector *dissector; 1446 struct flow_cls_offload *cls; 1447 struct flow_rule *rule; 1448 int ret; 1449 1450 if (!tc_can_offload(priv->dev)) 1451 return -EOPNOTSUPP; 1452 if (!priv->dma_cap.l3l4fnum) 1453 return -EOPNOTSUPP; 1454 if (priv->rss.enable) 1455 stmmac_rss_configure(priv, priv->hw, NULL, 1456 priv->plat->rx_queues_to_use); 1457 1458 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1459 if (!dissector) { 1460 ret = -ENOMEM; 1461 goto cleanup_rss; 1462 } 1463 1464 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); 1465 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); 1466 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; 1467 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); 1468 1469 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1470 if (!cls) { 1471 ret = -ENOMEM; 1472 goto cleanup_dissector; 1473 } 1474 1475 cls->common.chain_index = 0; 1476 cls->command = FLOW_CLS_REPLACE; 1477 cls->cookie = dummy_cookie; 1478 1479 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1480 if (!rule) { 1481 ret = -ENOMEM; 1482 goto cleanup_cls; 1483 } 1484 1485 rule->match.dissector = dissector; 1486 rule->match.key = (void *)&keys; 1487 rule->match.mask = (void *)&masks; 1488 1489 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP; 1490 keys.key.src = htons(src); 1491 keys.key.dst = htons(dst); 1492 masks.mask.src = src_mask; 1493 masks.mask.dst = dst_mask; 1494 1495 cls->rule = rule; 1496 1497 rule->action.entries[0].id = FLOW_ACTION_DROP; 1498 rule->action.num_entries = 1; 1499 1500 attr.dst = priv->dev->dev_addr; 1501 attr.tcp = !udp; 1502 attr.sport = src; 1503 attr.dport = dst; 1504 attr.ip_dst = 0; 1505 1506 /* Shall receive packet */ 1507 ret = __stmmac_test_loopback(priv, &attr); 1508 if (ret) 1509 goto cleanup_rule; 1510 1511 ret = stmmac_tc_setup_cls(priv, priv, cls); 1512 if (ret) 1513 goto cleanup_rule; 1514 1515 /* Shall NOT receive packet */ 1516 ret = __stmmac_test_loopback(priv, &attr); 1517 ret = ret ? 0 : -EINVAL; 1518 1519 cls->command = FLOW_CLS_DESTROY; 1520 stmmac_tc_setup_cls(priv, priv, cls); 1521 cleanup_rule: 1522 kfree(rule); 1523 cleanup_cls: 1524 kfree(cls); 1525 cleanup_dissector: 1526 kfree(dissector); 1527 cleanup_rss: 1528 if (priv->rss.enable) { 1529 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1530 priv->plat->rx_queues_to_use); 1531 } 1532 1533 return ret; 1534 } 1535 #else 1536 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1537 u32 dst_mask, u32 src_mask, bool udp) 1538 { 1539 return -EOPNOTSUPP; 1540 } 1541 #endif 1542 1543 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv) 1544 { 1545 u16 dummy_port = 0x123; 1546 1547 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false); 1548 } 1549 1550 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv) 1551 { 1552 u16 dummy_port = 0x123; 1553 1554 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false); 1555 } 1556 1557 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv) 1558 { 1559 u16 dummy_port = 0x123; 1560 1561 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true); 1562 } 1563 1564 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv) 1565 { 1566 u16 dummy_port = 0x123; 1567 1568 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true); 1569 } 1570 1571 static int stmmac_test_arp_validate(struct sk_buff *skb, 1572 struct net_device *ndev, 1573 struct packet_type *pt, 1574 struct net_device *orig_ndev) 1575 { 1576 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 1577 struct ethhdr *ehdr; 1578 struct arphdr *ahdr; 1579 1580 ehdr = (struct ethhdr *)skb_mac_header(skb); 1581 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) 1582 goto out; 1583 1584 ahdr = arp_hdr(skb); 1585 if (ahdr->ar_op != htons(ARPOP_REPLY)) 1586 goto out; 1587 1588 tpriv->ok = true; 1589 complete(&tpriv->comp); 1590 out: 1591 kfree_skb(skb); 1592 return 0; 1593 } 1594 1595 static int stmmac_test_arpoffload(struct stmmac_priv *priv) 1596 { 1597 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}; 1598 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1599 struct stmmac_packet_attrs attr = { }; 1600 struct stmmac_test_priv *tpriv; 1601 struct sk_buff *skb = NULL; 1602 u32 ip_addr = 0xdeadcafe; 1603 u32 ip_src = 0xdeadbeef; 1604 int ret; 1605 1606 if (!priv->dma_cap.arpoffsel) 1607 return -EOPNOTSUPP; 1608 1609 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1610 if (!tpriv) 1611 return -ENOMEM; 1612 1613 tpriv->ok = false; 1614 init_completion(&tpriv->comp); 1615 1616 tpriv->pt.type = htons(ETH_P_ARP); 1617 tpriv->pt.func = stmmac_test_arp_validate; 1618 tpriv->pt.dev = priv->dev; 1619 tpriv->pt.af_packet_priv = tpriv; 1620 tpriv->packet = &attr; 1621 dev_add_pack(&tpriv->pt); 1622 1623 attr.src = src; 1624 attr.ip_src = ip_src; 1625 attr.dst = dst; 1626 attr.ip_dst = ip_addr; 1627 1628 skb = stmmac_test_get_arp_skb(priv, &attr); 1629 if (!skb) { 1630 ret = -ENOMEM; 1631 goto cleanup; 1632 } 1633 1634 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); 1635 if (ret) 1636 goto cleanup; 1637 1638 ret = dev_set_promiscuity(priv->dev, 1); 1639 if (ret) 1640 goto cleanup; 1641 1642 skb_set_queue_mapping(skb, 0); 1643 ret = dev_queue_xmit(skb); 1644 if (ret) 1645 goto cleanup_promisc; 1646 1647 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1648 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1649 1650 cleanup_promisc: 1651 dev_set_promiscuity(priv->dev, -1); 1652 cleanup: 1653 stmmac_set_arp_offload(priv, priv->hw, false, 0x0); 1654 dev_remove_pack(&tpriv->pt); 1655 kfree(tpriv); 1656 return ret; 1657 } 1658 1659 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) 1660 { 1661 struct stmmac_packet_attrs attr = { }; 1662 int size = priv->dma_buf_sz; 1663 1664 attr.dst = priv->dev->dev_addr; 1665 attr.max_size = size - ETH_FCS_LEN; 1666 attr.queue_mapping = queue; 1667 1668 return __stmmac_test_loopback(priv, &attr); 1669 } 1670 1671 static int stmmac_test_jumbo(struct stmmac_priv *priv) 1672 { 1673 return __stmmac_test_jumbo(priv, 0); 1674 } 1675 1676 static int stmmac_test_mjumbo(struct stmmac_priv *priv) 1677 { 1678 u32 chan, tx_cnt = priv->plat->tx_queues_to_use; 1679 int ret; 1680 1681 if (tx_cnt <= 1) 1682 return -EOPNOTSUPP; 1683 1684 for (chan = 0; chan < tx_cnt; chan++) { 1685 ret = __stmmac_test_jumbo(priv, chan); 1686 if (ret) 1687 return ret; 1688 } 1689 1690 return 0; 1691 } 1692 1693 static int stmmac_test_sph(struct stmmac_priv *priv) 1694 { 1695 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n; 1696 struct stmmac_packet_attrs attr = { }; 1697 int ret; 1698 1699 if (!priv->sph) 1700 return -EOPNOTSUPP; 1701 1702 /* Check for UDP first */ 1703 attr.dst = priv->dev->dev_addr; 1704 attr.tcp = false; 1705 1706 ret = __stmmac_test_loopback(priv, &attr); 1707 if (ret) 1708 return ret; 1709 1710 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1711 if (cnt_end <= cnt_start) 1712 return -EINVAL; 1713 1714 /* Check for TCP now */ 1715 cnt_start = cnt_end; 1716 1717 attr.dst = priv->dev->dev_addr; 1718 attr.tcp = true; 1719 1720 ret = __stmmac_test_loopback(priv, &attr); 1721 if (ret) 1722 return ret; 1723 1724 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1725 if (cnt_end <= cnt_start) 1726 return -EINVAL; 1727 1728 return 0; 1729 } 1730 1731 #define STMMAC_LOOPBACK_NONE 0 1732 #define STMMAC_LOOPBACK_MAC 1 1733 #define STMMAC_LOOPBACK_PHY 2 1734 1735 static const struct stmmac_test { 1736 char name[ETH_GSTRING_LEN]; 1737 int lb; 1738 int (*fn)(struct stmmac_priv *priv); 1739 } stmmac_selftests[] = { 1740 { 1741 .name = "MAC Loopback ", 1742 .lb = STMMAC_LOOPBACK_MAC, 1743 .fn = stmmac_test_mac_loopback, 1744 }, { 1745 .name = "PHY Loopback ", 1746 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */ 1747 .fn = stmmac_test_phy_loopback, 1748 }, { 1749 .name = "MMC Counters ", 1750 .lb = STMMAC_LOOPBACK_PHY, 1751 .fn = stmmac_test_mmc, 1752 }, { 1753 .name = "EEE ", 1754 .lb = STMMAC_LOOPBACK_PHY, 1755 .fn = stmmac_test_eee, 1756 }, { 1757 .name = "Hash Filter MC ", 1758 .lb = STMMAC_LOOPBACK_PHY, 1759 .fn = stmmac_test_hfilt, 1760 }, { 1761 .name = "Perfect Filter UC ", 1762 .lb = STMMAC_LOOPBACK_PHY, 1763 .fn = stmmac_test_pfilt, 1764 }, { 1765 .name = "MC Filter ", 1766 .lb = STMMAC_LOOPBACK_PHY, 1767 .fn = stmmac_test_mcfilt, 1768 }, { 1769 .name = "UC Filter ", 1770 .lb = STMMAC_LOOPBACK_PHY, 1771 .fn = stmmac_test_ucfilt, 1772 }, { 1773 .name = "Flow Control ", 1774 .lb = STMMAC_LOOPBACK_PHY, 1775 .fn = stmmac_test_flowctrl, 1776 }, { 1777 .name = "RSS ", 1778 .lb = STMMAC_LOOPBACK_PHY, 1779 .fn = stmmac_test_rss, 1780 }, { 1781 .name = "VLAN Filtering ", 1782 .lb = STMMAC_LOOPBACK_PHY, 1783 .fn = stmmac_test_vlanfilt, 1784 }, { 1785 .name = "VLAN Filtering (perf) ", 1786 .lb = STMMAC_LOOPBACK_PHY, 1787 .fn = stmmac_test_vlanfilt_perfect, 1788 }, { 1789 .name = "Double VLAN Filter ", 1790 .lb = STMMAC_LOOPBACK_PHY, 1791 .fn = stmmac_test_dvlanfilt, 1792 }, { 1793 .name = "Double VLAN Filter (perf) ", 1794 .lb = STMMAC_LOOPBACK_PHY, 1795 .fn = stmmac_test_dvlanfilt_perfect, 1796 }, { 1797 .name = "Flexible RX Parser ", 1798 .lb = STMMAC_LOOPBACK_PHY, 1799 .fn = stmmac_test_rxp, 1800 }, { 1801 .name = "SA Insertion (desc) ", 1802 .lb = STMMAC_LOOPBACK_PHY, 1803 .fn = stmmac_test_desc_sai, 1804 }, { 1805 .name = "SA Replacement (desc) ", 1806 .lb = STMMAC_LOOPBACK_PHY, 1807 .fn = stmmac_test_desc_sar, 1808 }, { 1809 .name = "SA Insertion (reg) ", 1810 .lb = STMMAC_LOOPBACK_PHY, 1811 .fn = stmmac_test_reg_sai, 1812 }, { 1813 .name = "SA Replacement (reg) ", 1814 .lb = STMMAC_LOOPBACK_PHY, 1815 .fn = stmmac_test_reg_sar, 1816 }, { 1817 .name = "VLAN TX Insertion ", 1818 .lb = STMMAC_LOOPBACK_PHY, 1819 .fn = stmmac_test_vlanoff, 1820 }, { 1821 .name = "SVLAN TX Insertion ", 1822 .lb = STMMAC_LOOPBACK_PHY, 1823 .fn = stmmac_test_svlanoff, 1824 }, { 1825 .name = "L3 DA Filtering ", 1826 .lb = STMMAC_LOOPBACK_PHY, 1827 .fn = stmmac_test_l3filt_da, 1828 }, { 1829 .name = "L3 SA Filtering ", 1830 .lb = STMMAC_LOOPBACK_PHY, 1831 .fn = stmmac_test_l3filt_sa, 1832 }, { 1833 .name = "L4 DA TCP Filtering ", 1834 .lb = STMMAC_LOOPBACK_PHY, 1835 .fn = stmmac_test_l4filt_da_tcp, 1836 }, { 1837 .name = "L4 SA TCP Filtering ", 1838 .lb = STMMAC_LOOPBACK_PHY, 1839 .fn = stmmac_test_l4filt_sa_tcp, 1840 }, { 1841 .name = "L4 DA UDP Filtering ", 1842 .lb = STMMAC_LOOPBACK_PHY, 1843 .fn = stmmac_test_l4filt_da_udp, 1844 }, { 1845 .name = "L4 SA UDP Filtering ", 1846 .lb = STMMAC_LOOPBACK_PHY, 1847 .fn = stmmac_test_l4filt_sa_udp, 1848 }, { 1849 .name = "ARP Offload ", 1850 .lb = STMMAC_LOOPBACK_PHY, 1851 .fn = stmmac_test_arpoffload, 1852 }, { 1853 .name = "Jumbo Frame ", 1854 .lb = STMMAC_LOOPBACK_PHY, 1855 .fn = stmmac_test_jumbo, 1856 }, { 1857 .name = "Multichannel Jumbo ", 1858 .lb = STMMAC_LOOPBACK_PHY, 1859 .fn = stmmac_test_mjumbo, 1860 }, { 1861 .name = "Split Header ", 1862 .lb = STMMAC_LOOPBACK_PHY, 1863 .fn = stmmac_test_sph, 1864 }, 1865 }; 1866 1867 void stmmac_selftest_run(struct net_device *dev, 1868 struct ethtool_test *etest, u64 *buf) 1869 { 1870 struct stmmac_priv *priv = netdev_priv(dev); 1871 int count = stmmac_selftest_get_count(priv); 1872 int carrier = netif_carrier_ok(dev); 1873 int i, ret; 1874 1875 memset(buf, 0, sizeof(*buf) * count); 1876 stmmac_test_next_id = 0; 1877 1878 if (etest->flags != ETH_TEST_FL_OFFLINE) { 1879 netdev_err(priv->dev, "Only offline tests are supported\n"); 1880 etest->flags |= ETH_TEST_FL_FAILED; 1881 return; 1882 } else if (!carrier) { 1883 netdev_err(priv->dev, "You need valid Link to execute tests\n"); 1884 etest->flags |= ETH_TEST_FL_FAILED; 1885 return; 1886 } 1887 1888 /* We don't want extra traffic */ 1889 netif_carrier_off(dev); 1890 1891 /* Wait for queues drain */ 1892 msleep(200); 1893 1894 for (i = 0; i < count; i++) { 1895 ret = 0; 1896 1897 switch (stmmac_selftests[i].lb) { 1898 case STMMAC_LOOPBACK_PHY: 1899 ret = -EOPNOTSUPP; 1900 if (dev->phydev) 1901 ret = phy_loopback(dev->phydev, true); 1902 if (!ret) 1903 break; 1904 /* Fallthrough */ 1905 case STMMAC_LOOPBACK_MAC: 1906 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); 1907 break; 1908 case STMMAC_LOOPBACK_NONE: 1909 break; 1910 default: 1911 ret = -EOPNOTSUPP; 1912 break; 1913 } 1914 1915 /* 1916 * First tests will always be MAC / PHY loobpack. If any of 1917 * them is not supported we abort earlier. 1918 */ 1919 if (ret) { 1920 netdev_err(priv->dev, "Loopback is not supported\n"); 1921 etest->flags |= ETH_TEST_FL_FAILED; 1922 break; 1923 } 1924 1925 ret = stmmac_selftests[i].fn(priv); 1926 if (ret && (ret != -EOPNOTSUPP)) 1927 etest->flags |= ETH_TEST_FL_FAILED; 1928 buf[i] = ret; 1929 1930 switch (stmmac_selftests[i].lb) { 1931 case STMMAC_LOOPBACK_PHY: 1932 ret = -EOPNOTSUPP; 1933 if (dev->phydev) 1934 ret = phy_loopback(dev->phydev, false); 1935 if (!ret) 1936 break; 1937 /* Fallthrough */ 1938 case STMMAC_LOOPBACK_MAC: 1939 stmmac_set_mac_loopback(priv, priv->ioaddr, false); 1940 break; 1941 default: 1942 break; 1943 } 1944 } 1945 1946 /* Restart everything */ 1947 if (carrier) 1948 netif_carrier_on(dev); 1949 } 1950 1951 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data) 1952 { 1953 u8 *p = data; 1954 int i; 1955 1956 for (i = 0; i < stmmac_selftest_get_count(priv); i++) { 1957 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1, 1958 stmmac_selftests[i].name); 1959 p += ETH_GSTRING_LEN; 1960 } 1961 } 1962 1963 int stmmac_selftest_get_count(struct stmmac_priv *priv) 1964 { 1965 return ARRAY_SIZE(stmmac_selftests); 1966 } 1967