1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates. 4 * stmmac Selftests Support 5 * 6 * Author: Jose Abreu <joabreu@synopsys.com> 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/ethtool.h> 11 #include <linux/ip.h> 12 #include <linux/phy.h> 13 #include <linux/udp.h> 14 #include <net/pkt_cls.h> 15 #include <net/tcp.h> 16 #include <net/udp.h> 17 #include <net/tc_act/tc_gact.h> 18 #include "stmmac.h" 19 20 struct stmmachdr { 21 __be32 version; 22 __be64 magic; 23 u8 id; 24 } __packed; 25 26 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ 27 sizeof(struct stmmachdr)) 28 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL 29 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200) 30 31 struct stmmac_packet_attrs { 32 int vlan; 33 int vlan_id_in; 34 int vlan_id_out; 35 unsigned char *src; 36 unsigned char *dst; 37 u32 ip_src; 38 u32 ip_dst; 39 int tcp; 40 int sport; 41 int dport; 42 u32 exp_hash; 43 int dont_wait; 44 int timeout; 45 int size; 46 int remove_sa; 47 u8 id; 48 int sarc; 49 }; 50 51 static u8 stmmac_test_next_id; 52 53 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, 54 struct stmmac_packet_attrs *attr) 55 { 56 struct sk_buff *skb = NULL; 57 struct udphdr *uhdr = NULL; 58 struct tcphdr *thdr = NULL; 59 struct stmmachdr *shdr; 60 struct ethhdr *ehdr; 61 struct iphdr *ihdr; 62 int iplen, size; 63 64 size = attr->size + STMMAC_TEST_PKT_SIZE; 65 if (attr->vlan) { 66 size += 4; 67 if (attr->vlan > 1) 68 size += 4; 69 } 70 71 if (attr->tcp) 72 size += sizeof(struct tcphdr); 73 else 74 size += sizeof(struct udphdr); 75 76 skb = netdev_alloc_skb(priv->dev, size); 77 if (!skb) 78 return NULL; 79 80 prefetchw(skb->data); 81 skb_reserve(skb, NET_IP_ALIGN); 82 83 if (attr->vlan > 1) 84 ehdr = skb_push(skb, ETH_HLEN + 8); 85 else if (attr->vlan) 86 ehdr = skb_push(skb, ETH_HLEN + 4); 87 else if (attr->remove_sa) 88 ehdr = skb_push(skb, ETH_HLEN - 6); 89 else 90 ehdr = skb_push(skb, ETH_HLEN); 91 skb_reset_mac_header(skb); 92 93 skb_set_network_header(skb, skb->len); 94 ihdr = skb_put(skb, sizeof(*ihdr)); 95 96 skb_set_transport_header(skb, skb->len); 97 if (attr->tcp) 98 thdr = skb_put(skb, sizeof(*thdr)); 99 else 100 uhdr = skb_put(skb, sizeof(*uhdr)); 101 102 if (!attr->remove_sa) 103 eth_zero_addr(ehdr->h_source); 104 eth_zero_addr(ehdr->h_dest); 105 if (attr->src && !attr->remove_sa) 106 ether_addr_copy(ehdr->h_source, attr->src); 107 if (attr->dst) 108 ether_addr_copy(ehdr->h_dest, attr->dst); 109 110 if (!attr->remove_sa) { 111 ehdr->h_proto = htons(ETH_P_IP); 112 } else { 113 __be16 *ptr = (__be16 *)ehdr; 114 115 /* HACK */ 116 ptr[3] = htons(ETH_P_IP); 117 } 118 119 if (attr->vlan) { 120 __be16 *tag, *proto; 121 122 if (!attr->remove_sa) { 123 tag = (void *)ehdr + ETH_HLEN; 124 proto = (void *)ehdr + (2 * ETH_ALEN); 125 } else { 126 tag = (void *)ehdr + ETH_HLEN - 6; 127 proto = (void *)ehdr + ETH_ALEN; 128 } 129 130 proto[0] = htons(ETH_P_8021Q); 131 tag[0] = htons(attr->vlan_id_out); 132 tag[1] = htons(ETH_P_IP); 133 if (attr->vlan > 1) { 134 proto[0] = htons(ETH_P_8021AD); 135 tag[1] = htons(ETH_P_8021Q); 136 tag[2] = htons(attr->vlan_id_in); 137 tag[3] = htons(ETH_P_IP); 138 } 139 } 140 141 if (attr->tcp) { 142 thdr->source = htons(attr->sport); 143 thdr->dest = htons(attr->dport); 144 thdr->doff = sizeof(struct tcphdr) / 4; 145 thdr->check = 0; 146 } else { 147 uhdr->source = htons(attr->sport); 148 uhdr->dest = htons(attr->dport); 149 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); 150 uhdr->check = 0; 151 } 152 153 ihdr->ihl = 5; 154 ihdr->ttl = 32; 155 ihdr->version = 4; 156 if (attr->tcp) 157 ihdr->protocol = IPPROTO_TCP; 158 else 159 ihdr->protocol = IPPROTO_UDP; 160 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size; 161 if (attr->tcp) 162 iplen += sizeof(*thdr); 163 else 164 iplen += sizeof(*uhdr); 165 ihdr->tot_len = htons(iplen); 166 ihdr->frag_off = 0; 167 ihdr->saddr = 0; 168 ihdr->daddr = htonl(attr->ip_dst); 169 ihdr->tos = 0; 170 ihdr->id = 0; 171 ip_send_check(ihdr); 172 173 shdr = skb_put(skb, sizeof(*shdr)); 174 shdr->version = 0; 175 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC); 176 attr->id = stmmac_test_next_id; 177 shdr->id = stmmac_test_next_id++; 178 179 if (attr->size) 180 skb_put(skb, attr->size); 181 182 skb->csum = 0; 183 skb->ip_summed = CHECKSUM_PARTIAL; 184 if (attr->tcp) { 185 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0); 186 skb->csum_start = skb_transport_header(skb) - skb->head; 187 skb->csum_offset = offsetof(struct tcphdr, check); 188 } else { 189 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr); 190 } 191 192 skb->protocol = htons(ETH_P_IP); 193 skb->pkt_type = PACKET_HOST; 194 skb->dev = priv->dev; 195 196 return skb; 197 } 198 199 struct stmmac_test_priv { 200 struct stmmac_packet_attrs *packet; 201 struct packet_type pt; 202 struct completion comp; 203 int double_vlan; 204 int vlan_id; 205 int ok; 206 }; 207 208 static int stmmac_test_loopback_validate(struct sk_buff *skb, 209 struct net_device *ndev, 210 struct packet_type *pt, 211 struct net_device *orig_ndev) 212 { 213 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 214 struct stmmachdr *shdr; 215 struct ethhdr *ehdr; 216 struct udphdr *uhdr; 217 struct tcphdr *thdr; 218 struct iphdr *ihdr; 219 220 skb = skb_unshare(skb, GFP_ATOMIC); 221 if (!skb) 222 goto out; 223 224 if (skb_linearize(skb)) 225 goto out; 226 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 227 goto out; 228 229 ehdr = (struct ethhdr *)skb_mac_header(skb); 230 if (tpriv->packet->dst) { 231 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 232 goto out; 233 } 234 if (tpriv->packet->sarc) { 235 if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) 236 goto out; 237 } else if (tpriv->packet->src) { 238 if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) 239 goto out; 240 } 241 242 ihdr = ip_hdr(skb); 243 if (tpriv->double_vlan) 244 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 245 246 if (tpriv->packet->tcp) { 247 if (ihdr->protocol != IPPROTO_TCP) 248 goto out; 249 250 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 251 if (thdr->dest != htons(tpriv->packet->dport)) 252 goto out; 253 254 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr)); 255 } else { 256 if (ihdr->protocol != IPPROTO_UDP) 257 goto out; 258 259 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 260 if (uhdr->dest != htons(tpriv->packet->dport)) 261 goto out; 262 263 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 264 } 265 266 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 267 goto out; 268 if (tpriv->packet->exp_hash && !skb->hash) 269 goto out; 270 if (tpriv->packet->id != shdr->id) 271 goto out; 272 273 tpriv->ok = true; 274 complete(&tpriv->comp); 275 out: 276 kfree_skb(skb); 277 return 0; 278 } 279 280 static int __stmmac_test_loopback(struct stmmac_priv *priv, 281 struct stmmac_packet_attrs *attr) 282 { 283 struct stmmac_test_priv *tpriv; 284 struct sk_buff *skb = NULL; 285 int ret = 0; 286 287 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 288 if (!tpriv) 289 return -ENOMEM; 290 291 tpriv->ok = false; 292 init_completion(&tpriv->comp); 293 294 tpriv->pt.type = htons(ETH_P_IP); 295 tpriv->pt.func = stmmac_test_loopback_validate; 296 tpriv->pt.dev = priv->dev; 297 tpriv->pt.af_packet_priv = tpriv; 298 tpriv->packet = attr; 299 300 if (!attr->dont_wait) 301 dev_add_pack(&tpriv->pt); 302 303 skb = stmmac_test_get_udp_skb(priv, attr); 304 if (!skb) { 305 ret = -ENOMEM; 306 goto cleanup; 307 } 308 309 skb_set_queue_mapping(skb, 0); 310 ret = dev_queue_xmit(skb); 311 if (ret) 312 goto cleanup; 313 314 if (attr->dont_wait) 315 goto cleanup; 316 317 if (!attr->timeout) 318 attr->timeout = STMMAC_LB_TIMEOUT; 319 320 wait_for_completion_timeout(&tpriv->comp, attr->timeout); 321 ret = !tpriv->ok; 322 323 cleanup: 324 if (!attr->dont_wait) 325 dev_remove_pack(&tpriv->pt); 326 kfree(tpriv); 327 return ret; 328 } 329 330 static int stmmac_test_mac_loopback(struct stmmac_priv *priv) 331 { 332 struct stmmac_packet_attrs attr = { }; 333 334 attr.dst = priv->dev->dev_addr; 335 return __stmmac_test_loopback(priv, &attr); 336 } 337 338 static int stmmac_test_phy_loopback(struct stmmac_priv *priv) 339 { 340 struct stmmac_packet_attrs attr = { }; 341 int ret; 342 343 if (!priv->dev->phydev) 344 return -EBUSY; 345 346 ret = phy_loopback(priv->dev->phydev, true); 347 if (ret) 348 return ret; 349 350 attr.dst = priv->dev->dev_addr; 351 ret = __stmmac_test_loopback(priv, &attr); 352 353 phy_loopback(priv->dev->phydev, false); 354 return ret; 355 } 356 357 static int stmmac_test_mmc(struct stmmac_priv *priv) 358 { 359 struct stmmac_counters initial, final; 360 int ret; 361 362 memset(&initial, 0, sizeof(initial)); 363 memset(&final, 0, sizeof(final)); 364 365 if (!priv->dma_cap.rmon) 366 return -EOPNOTSUPP; 367 368 /* Save previous results into internal struct */ 369 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); 370 371 ret = stmmac_test_mac_loopback(priv); 372 if (ret) 373 return ret; 374 375 /* These will be loopback results so no need to save them */ 376 stmmac_mmc_read(priv, priv->mmcaddr, &final); 377 378 /* 379 * The number of MMC counters available depends on HW configuration 380 * so we just use this one to validate the feature. I hope there is 381 * not a version without this counter. 382 */ 383 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g) 384 return -EINVAL; 385 386 return 0; 387 } 388 389 static int stmmac_test_eee(struct stmmac_priv *priv) 390 { 391 struct stmmac_extra_stats *initial, *final; 392 int retries = 10; 393 int ret; 394 395 if (!priv->dma_cap.eee || !priv->eee_active) 396 return -EOPNOTSUPP; 397 398 initial = kzalloc(sizeof(*initial), GFP_KERNEL); 399 if (!initial) 400 return -ENOMEM; 401 402 final = kzalloc(sizeof(*final), GFP_KERNEL); 403 if (!final) { 404 ret = -ENOMEM; 405 goto out_free_initial; 406 } 407 408 memcpy(initial, &priv->xstats, sizeof(*initial)); 409 410 ret = stmmac_test_mac_loopback(priv); 411 if (ret) 412 goto out_free_final; 413 414 /* We have no traffic in the line so, sooner or later it will go LPI */ 415 while (--retries) { 416 memcpy(final, &priv->xstats, sizeof(*final)); 417 418 if (final->irq_tx_path_in_lpi_mode_n > 419 initial->irq_tx_path_in_lpi_mode_n) 420 break; 421 msleep(100); 422 } 423 424 if (!retries) { 425 ret = -ETIMEDOUT; 426 goto out_free_final; 427 } 428 429 if (final->irq_tx_path_in_lpi_mode_n <= 430 initial->irq_tx_path_in_lpi_mode_n) { 431 ret = -EINVAL; 432 goto out_free_final; 433 } 434 435 if (final->irq_tx_path_exit_lpi_mode_n <= 436 initial->irq_tx_path_exit_lpi_mode_n) { 437 ret = -EINVAL; 438 goto out_free_final; 439 } 440 441 out_free_final: 442 kfree(final); 443 out_free_initial: 444 kfree(initial); 445 return ret; 446 } 447 448 static int stmmac_filter_check(struct stmmac_priv *priv) 449 { 450 if (!(priv->dev->flags & IFF_PROMISC)) 451 return 0; 452 453 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n"); 454 return -EOPNOTSUPP; 455 } 456 457 static int stmmac_test_hfilt(struct stmmac_priv *priv) 458 { 459 unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd}; 460 unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb}; 461 struct stmmac_packet_attrs attr = { }; 462 int ret; 463 464 ret = stmmac_filter_check(priv); 465 if (ret) 466 return ret; 467 468 ret = dev_mc_add(priv->dev, gd_addr); 469 if (ret) 470 return ret; 471 472 attr.dst = gd_addr; 473 474 /* Shall receive packet */ 475 ret = __stmmac_test_loopback(priv, &attr); 476 if (ret) 477 goto cleanup; 478 479 attr.dst = bd_addr; 480 481 /* Shall NOT receive packet */ 482 ret = __stmmac_test_loopback(priv, &attr); 483 ret = !ret; 484 485 cleanup: 486 dev_mc_del(priv->dev, gd_addr); 487 return ret; 488 } 489 490 static int stmmac_test_pfilt(struct stmmac_priv *priv) 491 { 492 unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 493 unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55}; 494 struct stmmac_packet_attrs attr = { }; 495 int ret; 496 497 if (stmmac_filter_check(priv)) 498 return -EOPNOTSUPP; 499 500 ret = dev_uc_add(priv->dev, gd_addr); 501 if (ret) 502 return ret; 503 504 attr.dst = gd_addr; 505 506 /* Shall receive packet */ 507 ret = __stmmac_test_loopback(priv, &attr); 508 if (ret) 509 goto cleanup; 510 511 attr.dst = bd_addr; 512 513 /* Shall NOT receive packet */ 514 ret = __stmmac_test_loopback(priv, &attr); 515 ret = !ret; 516 517 cleanup: 518 dev_uc_del(priv->dev, gd_addr); 519 return ret; 520 } 521 522 static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr) 523 { 524 return 0; 525 } 526 527 static void stmmac_test_set_rx_mode(struct net_device *netdev) 528 { 529 /* As we are in test mode of ethtool we already own the rtnl lock 530 * so no address will change from user. We can just call the 531 * ndo_set_rx_mode() callback directly */ 532 if (netdev->netdev_ops->ndo_set_rx_mode) 533 netdev->netdev_ops->ndo_set_rx_mode(netdev); 534 } 535 536 static int stmmac_test_mcfilt(struct stmmac_priv *priv) 537 { 538 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 539 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 540 struct stmmac_packet_attrs attr = { }; 541 int ret; 542 543 if (stmmac_filter_check(priv)) 544 return -EOPNOTSUPP; 545 546 /* Remove all MC addresses */ 547 __dev_mc_unsync(priv->dev, NULL); 548 stmmac_test_set_rx_mode(priv->dev); 549 550 ret = dev_uc_add(priv->dev, uc_addr); 551 if (ret) 552 goto cleanup; 553 554 attr.dst = uc_addr; 555 556 /* Shall receive packet */ 557 ret = __stmmac_test_loopback(priv, &attr); 558 if (ret) 559 goto cleanup; 560 561 attr.dst = mc_addr; 562 563 /* Shall NOT receive packet */ 564 ret = __stmmac_test_loopback(priv, &attr); 565 ret = !ret; 566 567 cleanup: 568 dev_uc_del(priv->dev, uc_addr); 569 __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL); 570 stmmac_test_set_rx_mode(priv->dev); 571 return ret; 572 } 573 574 static int stmmac_test_ucfilt(struct stmmac_priv *priv) 575 { 576 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 577 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 578 struct stmmac_packet_attrs attr = { }; 579 int ret; 580 581 if (stmmac_filter_check(priv)) 582 return -EOPNOTSUPP; 583 584 /* Remove all UC addresses */ 585 __dev_uc_unsync(priv->dev, NULL); 586 stmmac_test_set_rx_mode(priv->dev); 587 588 ret = dev_mc_add(priv->dev, mc_addr); 589 if (ret) 590 goto cleanup; 591 592 attr.dst = mc_addr; 593 594 /* Shall receive packet */ 595 ret = __stmmac_test_loopback(priv, &attr); 596 if (ret) 597 goto cleanup; 598 599 attr.dst = uc_addr; 600 601 /* Shall NOT receive packet */ 602 ret = __stmmac_test_loopback(priv, &attr); 603 ret = !ret; 604 605 cleanup: 606 dev_mc_del(priv->dev, mc_addr); 607 __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL); 608 stmmac_test_set_rx_mode(priv->dev); 609 return ret; 610 } 611 612 static int stmmac_test_flowctrl_validate(struct sk_buff *skb, 613 struct net_device *ndev, 614 struct packet_type *pt, 615 struct net_device *orig_ndev) 616 { 617 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 618 struct ethhdr *ehdr; 619 620 ehdr = (struct ethhdr *)skb_mac_header(skb); 621 if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) 622 goto out; 623 if (ehdr->h_proto != htons(ETH_P_PAUSE)) 624 goto out; 625 626 tpriv->ok = true; 627 complete(&tpriv->comp); 628 out: 629 kfree_skb(skb); 630 return 0; 631 } 632 633 static int stmmac_test_flowctrl(struct stmmac_priv *priv) 634 { 635 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01}; 636 struct phy_device *phydev = priv->dev->phydev; 637 u32 rx_cnt = priv->plat->rx_queues_to_use; 638 struct stmmac_test_priv *tpriv; 639 unsigned int pkt_count; 640 int i, ret = 0; 641 642 if (!phydev || !phydev->pause) 643 return -EOPNOTSUPP; 644 645 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 646 if (!tpriv) 647 return -ENOMEM; 648 649 tpriv->ok = false; 650 init_completion(&tpriv->comp); 651 tpriv->pt.type = htons(ETH_P_PAUSE); 652 tpriv->pt.func = stmmac_test_flowctrl_validate; 653 tpriv->pt.dev = priv->dev; 654 tpriv->pt.af_packet_priv = tpriv; 655 dev_add_pack(&tpriv->pt); 656 657 /* Compute minimum number of packets to make FIFO full */ 658 pkt_count = priv->plat->rx_fifo_size; 659 if (!pkt_count) 660 pkt_count = priv->dma_cap.rx_fifo_size; 661 pkt_count /= 1400; 662 pkt_count *= 2; 663 664 for (i = 0; i < rx_cnt; i++) 665 stmmac_stop_rx(priv, priv->ioaddr, i); 666 667 ret = dev_set_promiscuity(priv->dev, 1); 668 if (ret) 669 goto cleanup; 670 671 ret = dev_mc_add(priv->dev, paddr); 672 if (ret) 673 goto cleanup; 674 675 for (i = 0; i < pkt_count; i++) { 676 struct stmmac_packet_attrs attr = { }; 677 678 attr.dst = priv->dev->dev_addr; 679 attr.dont_wait = true; 680 attr.size = 1400; 681 682 ret = __stmmac_test_loopback(priv, &attr); 683 if (ret) 684 goto cleanup; 685 if (tpriv->ok) 686 break; 687 } 688 689 /* Wait for some time in case RX Watchdog is enabled */ 690 msleep(200); 691 692 for (i = 0; i < rx_cnt; i++) { 693 struct stmmac_channel *ch = &priv->channel[i]; 694 695 stmmac_start_rx(priv, priv->ioaddr, i); 696 local_bh_disable(); 697 napi_reschedule(&ch->rx_napi); 698 local_bh_enable(); 699 } 700 701 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 702 ret = !tpriv->ok; 703 704 cleanup: 705 dev_mc_del(priv->dev, paddr); 706 dev_set_promiscuity(priv->dev, -1); 707 dev_remove_pack(&tpriv->pt); 708 kfree(tpriv); 709 return ret; 710 } 711 712 static int stmmac_test_rss(struct stmmac_priv *priv) 713 { 714 struct stmmac_packet_attrs attr = { }; 715 716 if (!priv->dma_cap.rssen || !priv->rss.enable) 717 return -EOPNOTSUPP; 718 719 attr.dst = priv->dev->dev_addr; 720 attr.exp_hash = true; 721 attr.sport = 0x321; 722 attr.dport = 0x123; 723 724 return __stmmac_test_loopback(priv, &attr); 725 } 726 727 static int stmmac_test_vlan_validate(struct sk_buff *skb, 728 struct net_device *ndev, 729 struct packet_type *pt, 730 struct net_device *orig_ndev) 731 { 732 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 733 struct stmmachdr *shdr; 734 struct ethhdr *ehdr; 735 struct udphdr *uhdr; 736 struct iphdr *ihdr; 737 u16 proto; 738 739 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q; 740 741 skb = skb_unshare(skb, GFP_ATOMIC); 742 if (!skb) 743 goto out; 744 745 if (skb_linearize(skb)) 746 goto out; 747 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 748 goto out; 749 if (tpriv->vlan_id) { 750 if (skb->vlan_proto != htons(proto)) 751 goto out; 752 if (skb->vlan_tci != tpriv->vlan_id) 753 goto out; 754 } 755 756 ehdr = (struct ethhdr *)skb_mac_header(skb); 757 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 758 goto out; 759 760 ihdr = ip_hdr(skb); 761 if (tpriv->double_vlan) 762 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 763 if (ihdr->protocol != IPPROTO_UDP) 764 goto out; 765 766 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 767 if (uhdr->dest != htons(tpriv->packet->dport)) 768 goto out; 769 770 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 771 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 772 goto out; 773 774 tpriv->ok = true; 775 complete(&tpriv->comp); 776 777 out: 778 kfree_skb(skb); 779 return 0; 780 } 781 782 static int stmmac_test_vlanfilt(struct stmmac_priv *priv) 783 { 784 struct stmmac_packet_attrs attr = { }; 785 struct stmmac_test_priv *tpriv; 786 struct sk_buff *skb = NULL; 787 int ret = 0, i; 788 789 if (!priv->dma_cap.vlhash) 790 return -EOPNOTSUPP; 791 792 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 793 if (!tpriv) 794 return -ENOMEM; 795 796 tpriv->ok = false; 797 init_completion(&tpriv->comp); 798 799 tpriv->pt.type = htons(ETH_P_IP); 800 tpriv->pt.func = stmmac_test_vlan_validate; 801 tpriv->pt.dev = priv->dev; 802 tpriv->pt.af_packet_priv = tpriv; 803 tpriv->packet = &attr; 804 805 /* 806 * As we use HASH filtering, false positives may appear. This is a 807 * specially chosen ID so that adjacent IDs (+4) have different 808 * HASH values. 809 */ 810 tpriv->vlan_id = 0x123; 811 dev_add_pack(&tpriv->pt); 812 813 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 814 if (ret) 815 goto cleanup; 816 817 for (i = 0; i < 4; i++) { 818 attr.vlan = 1; 819 attr.vlan_id_out = tpriv->vlan_id + i; 820 attr.dst = priv->dev->dev_addr; 821 attr.sport = 9; 822 attr.dport = 9; 823 824 skb = stmmac_test_get_udp_skb(priv, &attr); 825 if (!skb) { 826 ret = -ENOMEM; 827 goto vlan_del; 828 } 829 830 skb_set_queue_mapping(skb, 0); 831 ret = dev_queue_xmit(skb); 832 if (ret) 833 goto vlan_del; 834 835 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 836 ret = !tpriv->ok; 837 if (ret && !i) { 838 goto vlan_del; 839 } else if (!ret && i) { 840 ret = -1; 841 goto vlan_del; 842 } else { 843 ret = 0; 844 } 845 846 tpriv->ok = false; 847 } 848 849 vlan_del: 850 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 851 cleanup: 852 dev_remove_pack(&tpriv->pt); 853 kfree(tpriv); 854 return ret; 855 } 856 857 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv) 858 { 859 struct stmmac_packet_attrs attr = { }; 860 struct stmmac_test_priv *tpriv; 861 struct sk_buff *skb = NULL; 862 int ret = 0, i; 863 864 if (!priv->dma_cap.vlhash) 865 return -EOPNOTSUPP; 866 867 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 868 if (!tpriv) 869 return -ENOMEM; 870 871 tpriv->ok = false; 872 tpriv->double_vlan = true; 873 init_completion(&tpriv->comp); 874 875 tpriv->pt.type = htons(ETH_P_8021Q); 876 tpriv->pt.func = stmmac_test_vlan_validate; 877 tpriv->pt.dev = priv->dev; 878 tpriv->pt.af_packet_priv = tpriv; 879 tpriv->packet = &attr; 880 881 /* 882 * As we use HASH filtering, false positives may appear. This is a 883 * specially chosen ID so that adjacent IDs (+4) have different 884 * HASH values. 885 */ 886 tpriv->vlan_id = 0x123; 887 dev_add_pack(&tpriv->pt); 888 889 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 890 if (ret) 891 goto cleanup; 892 893 for (i = 0; i < 4; i++) { 894 attr.vlan = 2; 895 attr.vlan_id_out = tpriv->vlan_id + i; 896 attr.dst = priv->dev->dev_addr; 897 attr.sport = 9; 898 attr.dport = 9; 899 900 skb = stmmac_test_get_udp_skb(priv, &attr); 901 if (!skb) { 902 ret = -ENOMEM; 903 goto vlan_del; 904 } 905 906 skb_set_queue_mapping(skb, 0); 907 ret = dev_queue_xmit(skb); 908 if (ret) 909 goto vlan_del; 910 911 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 912 ret = !tpriv->ok; 913 if (ret && !i) { 914 goto vlan_del; 915 } else if (!ret && i) { 916 ret = -1; 917 goto vlan_del; 918 } else { 919 ret = 0; 920 } 921 922 tpriv->ok = false; 923 } 924 925 vlan_del: 926 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 927 cleanup: 928 dev_remove_pack(&tpriv->pt); 929 kfree(tpriv); 930 return ret; 931 } 932 933 #ifdef CONFIG_NET_CLS_ACT 934 static int stmmac_test_rxp(struct stmmac_priv *priv) 935 { 936 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; 937 struct tc_cls_u32_offload cls_u32 = { }; 938 struct stmmac_packet_attrs attr = { }; 939 struct tc_action **actions, *act; 940 struct tc_u32_sel *sel; 941 struct tcf_exts *exts; 942 int ret, i, nk = 1; 943 944 if (!tc_can_offload(priv->dev)) 945 return -EOPNOTSUPP; 946 if (!priv->dma_cap.frpsel) 947 return -EOPNOTSUPP; 948 949 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); 950 if (!sel) 951 return -ENOMEM; 952 953 exts = kzalloc(sizeof(*exts), GFP_KERNEL); 954 if (!exts) { 955 ret = -ENOMEM; 956 goto cleanup_sel; 957 } 958 959 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); 960 if (!actions) { 961 ret = -ENOMEM; 962 goto cleanup_exts; 963 } 964 965 act = kzalloc(nk * sizeof(*act), GFP_KERNEL); 966 if (!act) { 967 ret = -ENOMEM; 968 goto cleanup_actions; 969 } 970 971 cls_u32.command = TC_CLSU32_NEW_KNODE; 972 cls_u32.common.chain_index = 0; 973 cls_u32.common.protocol = htons(ETH_P_ALL); 974 cls_u32.knode.exts = exts; 975 cls_u32.knode.sel = sel; 976 cls_u32.knode.handle = 0x123; 977 978 exts->nr_actions = nk; 979 exts->actions = actions; 980 for (i = 0; i < nk; i++) { 981 struct tcf_gact *gact = to_gact(&act[i]); 982 983 actions[i] = &act[i]; 984 gact->tcf_action = TC_ACT_SHOT; 985 } 986 987 sel->nkeys = nk; 988 sel->offshift = 0; 989 sel->keys[0].off = 6; 990 sel->keys[0].val = htonl(0xdeadbeef); 991 sel->keys[0].mask = ~0x0; 992 993 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 994 if (ret) 995 goto cleanup_act; 996 997 attr.dst = priv->dev->dev_addr; 998 attr.src = addr; 999 1000 ret = __stmmac_test_loopback(priv, &attr); 1001 ret = !ret; /* Shall NOT receive packet */ 1002 1003 cls_u32.command = TC_CLSU32_DELETE_KNODE; 1004 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1005 1006 cleanup_act: 1007 kfree(act); 1008 cleanup_actions: 1009 kfree(actions); 1010 cleanup_exts: 1011 kfree(exts); 1012 cleanup_sel: 1013 kfree(sel); 1014 return ret; 1015 } 1016 #else 1017 static int stmmac_test_rxp(struct stmmac_priv *priv) 1018 { 1019 return -EOPNOTSUPP; 1020 } 1021 #endif 1022 1023 static int stmmac_test_desc_sai(struct stmmac_priv *priv) 1024 { 1025 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1026 struct stmmac_packet_attrs attr = { }; 1027 int ret; 1028 1029 attr.remove_sa = true; 1030 attr.sarc = true; 1031 attr.src = src; 1032 attr.dst = priv->dev->dev_addr; 1033 1034 priv->sarc_type = 0x1; 1035 1036 ret = __stmmac_test_loopback(priv, &attr); 1037 1038 priv->sarc_type = 0x0; 1039 return ret; 1040 } 1041 1042 static int stmmac_test_desc_sar(struct stmmac_priv *priv) 1043 { 1044 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1045 struct stmmac_packet_attrs attr = { }; 1046 int ret; 1047 1048 attr.sarc = true; 1049 attr.src = src; 1050 attr.dst = priv->dev->dev_addr; 1051 1052 priv->sarc_type = 0x2; 1053 1054 ret = __stmmac_test_loopback(priv, &attr); 1055 1056 priv->sarc_type = 0x0; 1057 return ret; 1058 } 1059 1060 static int stmmac_test_reg_sai(struct stmmac_priv *priv) 1061 { 1062 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1063 struct stmmac_packet_attrs attr = { }; 1064 int ret; 1065 1066 attr.remove_sa = true; 1067 attr.sarc = true; 1068 attr.src = src; 1069 attr.dst = priv->dev->dev_addr; 1070 1071 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2)) 1072 return -EOPNOTSUPP; 1073 1074 ret = __stmmac_test_loopback(priv, &attr); 1075 1076 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1077 return ret; 1078 } 1079 1080 static int stmmac_test_reg_sar(struct stmmac_priv *priv) 1081 { 1082 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1083 struct stmmac_packet_attrs attr = { }; 1084 int ret; 1085 1086 attr.sarc = true; 1087 attr.src = src; 1088 attr.dst = priv->dev->dev_addr; 1089 1090 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3)) 1091 return -EOPNOTSUPP; 1092 1093 ret = __stmmac_test_loopback(priv, &attr); 1094 1095 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1096 return ret; 1097 } 1098 1099 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan) 1100 { 1101 struct stmmac_packet_attrs attr = { }; 1102 struct stmmac_test_priv *tpriv; 1103 struct sk_buff *skb = NULL; 1104 int ret = 0; 1105 u16 proto; 1106 1107 if (!priv->dma_cap.vlins) 1108 return -EOPNOTSUPP; 1109 1110 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1111 if (!tpriv) 1112 return -ENOMEM; 1113 1114 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q; 1115 1116 tpriv->ok = false; 1117 tpriv->double_vlan = svlan; 1118 init_completion(&tpriv->comp); 1119 1120 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP); 1121 tpriv->pt.func = stmmac_test_vlan_validate; 1122 tpriv->pt.dev = priv->dev; 1123 tpriv->pt.af_packet_priv = tpriv; 1124 tpriv->packet = &attr; 1125 tpriv->vlan_id = 0x123; 1126 dev_add_pack(&tpriv->pt); 1127 1128 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id); 1129 if (ret) 1130 goto cleanup; 1131 1132 attr.dst = priv->dev->dev_addr; 1133 1134 skb = stmmac_test_get_udp_skb(priv, &attr); 1135 if (!skb) { 1136 ret = -ENOMEM; 1137 goto vlan_del; 1138 } 1139 1140 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); 1141 skb->protocol = htons(proto); 1142 1143 skb_set_queue_mapping(skb, 0); 1144 ret = dev_queue_xmit(skb); 1145 if (ret) 1146 goto vlan_del; 1147 1148 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1149 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1150 1151 vlan_del: 1152 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id); 1153 cleanup: 1154 dev_remove_pack(&tpriv->pt); 1155 kfree(tpriv); 1156 return ret; 1157 } 1158 1159 static int stmmac_test_vlanoff(struct stmmac_priv *priv) 1160 { 1161 return stmmac_test_vlanoff_common(priv, false); 1162 } 1163 1164 static int stmmac_test_svlanoff(struct stmmac_priv *priv) 1165 { 1166 if (!priv->dma_cap.dvlan) 1167 return -EOPNOTSUPP; 1168 return stmmac_test_vlanoff_common(priv, true); 1169 } 1170 1171 #define STMMAC_LOOPBACK_NONE 0 1172 #define STMMAC_LOOPBACK_MAC 1 1173 #define STMMAC_LOOPBACK_PHY 2 1174 1175 static const struct stmmac_test { 1176 char name[ETH_GSTRING_LEN]; 1177 int lb; 1178 int (*fn)(struct stmmac_priv *priv); 1179 } stmmac_selftests[] = { 1180 { 1181 .name = "MAC Loopback ", 1182 .lb = STMMAC_LOOPBACK_MAC, 1183 .fn = stmmac_test_mac_loopback, 1184 }, { 1185 .name = "PHY Loopback ", 1186 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */ 1187 .fn = stmmac_test_phy_loopback, 1188 }, { 1189 .name = "MMC Counters ", 1190 .lb = STMMAC_LOOPBACK_PHY, 1191 .fn = stmmac_test_mmc, 1192 }, { 1193 .name = "EEE ", 1194 .lb = STMMAC_LOOPBACK_PHY, 1195 .fn = stmmac_test_eee, 1196 }, { 1197 .name = "Hash Filter MC ", 1198 .lb = STMMAC_LOOPBACK_PHY, 1199 .fn = stmmac_test_hfilt, 1200 }, { 1201 .name = "Perfect Filter UC ", 1202 .lb = STMMAC_LOOPBACK_PHY, 1203 .fn = stmmac_test_pfilt, 1204 }, { 1205 .name = "MC Filter ", 1206 .lb = STMMAC_LOOPBACK_PHY, 1207 .fn = stmmac_test_mcfilt, 1208 }, { 1209 .name = "UC Filter ", 1210 .lb = STMMAC_LOOPBACK_PHY, 1211 .fn = stmmac_test_ucfilt, 1212 }, { 1213 .name = "Flow Control ", 1214 .lb = STMMAC_LOOPBACK_PHY, 1215 .fn = stmmac_test_flowctrl, 1216 }, { 1217 .name = "RSS ", 1218 .lb = STMMAC_LOOPBACK_PHY, 1219 .fn = stmmac_test_rss, 1220 }, { 1221 .name = "VLAN Filtering ", 1222 .lb = STMMAC_LOOPBACK_PHY, 1223 .fn = stmmac_test_vlanfilt, 1224 }, { 1225 .name = "Double VLAN Filtering", 1226 .lb = STMMAC_LOOPBACK_PHY, 1227 .fn = stmmac_test_dvlanfilt, 1228 }, { 1229 .name = "Flexible RX Parser ", 1230 .lb = STMMAC_LOOPBACK_PHY, 1231 .fn = stmmac_test_rxp, 1232 }, { 1233 .name = "SA Insertion (desc) ", 1234 .lb = STMMAC_LOOPBACK_PHY, 1235 .fn = stmmac_test_desc_sai, 1236 }, { 1237 .name = "SA Replacement (desc)", 1238 .lb = STMMAC_LOOPBACK_PHY, 1239 .fn = stmmac_test_desc_sar, 1240 }, { 1241 .name = "SA Insertion (reg) ", 1242 .lb = STMMAC_LOOPBACK_PHY, 1243 .fn = stmmac_test_reg_sai, 1244 }, { 1245 .name = "SA Replacement (reg)", 1246 .lb = STMMAC_LOOPBACK_PHY, 1247 .fn = stmmac_test_reg_sar, 1248 }, { 1249 .name = "VLAN TX Insertion ", 1250 .lb = STMMAC_LOOPBACK_PHY, 1251 .fn = stmmac_test_vlanoff, 1252 }, { 1253 .name = "SVLAN TX Insertion ", 1254 .lb = STMMAC_LOOPBACK_PHY, 1255 .fn = stmmac_test_svlanoff, 1256 }, 1257 }; 1258 1259 void stmmac_selftest_run(struct net_device *dev, 1260 struct ethtool_test *etest, u64 *buf) 1261 { 1262 struct stmmac_priv *priv = netdev_priv(dev); 1263 int count = stmmac_selftest_get_count(priv); 1264 int carrier = netif_carrier_ok(dev); 1265 int i, ret; 1266 1267 memset(buf, 0, sizeof(*buf) * count); 1268 stmmac_test_next_id = 0; 1269 1270 if (etest->flags != ETH_TEST_FL_OFFLINE) { 1271 netdev_err(priv->dev, "Only offline tests are supported\n"); 1272 etest->flags |= ETH_TEST_FL_FAILED; 1273 return; 1274 } else if (!carrier) { 1275 netdev_err(priv->dev, "You need valid Link to execute tests\n"); 1276 etest->flags |= ETH_TEST_FL_FAILED; 1277 return; 1278 } 1279 1280 /* We don't want extra traffic */ 1281 netif_carrier_off(dev); 1282 1283 /* Wait for queues drain */ 1284 msleep(200); 1285 1286 for (i = 0; i < count; i++) { 1287 ret = 0; 1288 1289 switch (stmmac_selftests[i].lb) { 1290 case STMMAC_LOOPBACK_PHY: 1291 ret = -EOPNOTSUPP; 1292 if (dev->phydev) 1293 ret = phy_loopback(dev->phydev, true); 1294 if (!ret) 1295 break; 1296 /* Fallthrough */ 1297 case STMMAC_LOOPBACK_MAC: 1298 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); 1299 break; 1300 case STMMAC_LOOPBACK_NONE: 1301 break; 1302 default: 1303 ret = -EOPNOTSUPP; 1304 break; 1305 } 1306 1307 /* 1308 * First tests will always be MAC / PHY loobpack. If any of 1309 * them is not supported we abort earlier. 1310 */ 1311 if (ret) { 1312 netdev_err(priv->dev, "Loopback is not supported\n"); 1313 etest->flags |= ETH_TEST_FL_FAILED; 1314 break; 1315 } 1316 1317 ret = stmmac_selftests[i].fn(priv); 1318 if (ret && (ret != -EOPNOTSUPP)) 1319 etest->flags |= ETH_TEST_FL_FAILED; 1320 buf[i] = ret; 1321 1322 switch (stmmac_selftests[i].lb) { 1323 case STMMAC_LOOPBACK_PHY: 1324 ret = -EOPNOTSUPP; 1325 if (dev->phydev) 1326 ret = phy_loopback(dev->phydev, false); 1327 if (!ret) 1328 break; 1329 /* Fallthrough */ 1330 case STMMAC_LOOPBACK_MAC: 1331 stmmac_set_mac_loopback(priv, priv->ioaddr, false); 1332 break; 1333 default: 1334 break; 1335 } 1336 } 1337 1338 /* Restart everything */ 1339 if (carrier) 1340 netif_carrier_on(dev); 1341 } 1342 1343 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data) 1344 { 1345 u8 *p = data; 1346 int i; 1347 1348 for (i = 0; i < stmmac_selftest_get_count(priv); i++) { 1349 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1, 1350 stmmac_selftests[i].name); 1351 p += ETH_GSTRING_LEN; 1352 } 1353 } 1354 1355 int stmmac_selftest_get_count(struct stmmac_priv *priv) 1356 { 1357 return ARRAY_SIZE(stmmac_selftests); 1358 } 1359