1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates. 4 * stmmac Selftests Support 5 * 6 * Author: Jose Abreu <joabreu@synopsys.com> 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/ethtool.h> 11 #include <linux/ip.h> 12 #include <linux/phy.h> 13 #include <linux/udp.h> 14 #include <net/pkt_cls.h> 15 #include <net/tcp.h> 16 #include <net/udp.h> 17 #include <net/tc_act/tc_gact.h> 18 #include "stmmac.h" 19 20 struct stmmachdr { 21 __be32 version; 22 __be64 magic; 23 u8 id; 24 } __packed; 25 26 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ 27 sizeof(struct stmmachdr)) 28 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL 29 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200) 30 31 struct stmmac_packet_attrs { 32 int vlan; 33 int vlan_id_in; 34 int vlan_id_out; 35 unsigned char *src; 36 unsigned char *dst; 37 u32 ip_src; 38 u32 ip_dst; 39 int tcp; 40 int sport; 41 int dport; 42 u32 exp_hash; 43 int dont_wait; 44 int timeout; 45 int size; 46 int max_size; 47 int remove_sa; 48 u8 id; 49 int sarc; 50 u16 queue_mapping; 51 }; 52 53 static u8 stmmac_test_next_id; 54 55 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, 56 struct stmmac_packet_attrs *attr) 57 { 58 struct sk_buff *skb = NULL; 59 struct udphdr *uhdr = NULL; 60 struct tcphdr *thdr = NULL; 61 struct stmmachdr *shdr; 62 struct ethhdr *ehdr; 63 struct iphdr *ihdr; 64 int iplen, size; 65 66 size = attr->size + STMMAC_TEST_PKT_SIZE; 67 if (attr->vlan) { 68 size += 4; 69 if (attr->vlan > 1) 70 size += 4; 71 } 72 73 if (attr->tcp) 74 size += sizeof(struct tcphdr); 75 else 76 size += sizeof(struct udphdr); 77 78 if (attr->max_size && (attr->max_size > size)) 79 size = attr->max_size; 80 81 skb = netdev_alloc_skb_ip_align(priv->dev, size); 82 if (!skb) 83 return NULL; 84 85 prefetchw(skb->data); 86 87 if (attr->vlan > 1) 88 ehdr = skb_push(skb, ETH_HLEN + 8); 89 else if (attr->vlan) 90 ehdr = skb_push(skb, ETH_HLEN + 4); 91 else if (attr->remove_sa) 92 ehdr = skb_push(skb, ETH_HLEN - 6); 93 else 94 ehdr = skb_push(skb, ETH_HLEN); 95 skb_reset_mac_header(skb); 96 97 skb_set_network_header(skb, skb->len); 98 ihdr = skb_put(skb, sizeof(*ihdr)); 99 100 skb_set_transport_header(skb, skb->len); 101 if (attr->tcp) 102 thdr = skb_put(skb, sizeof(*thdr)); 103 else 104 uhdr = skb_put(skb, sizeof(*uhdr)); 105 106 if (!attr->remove_sa) 107 eth_zero_addr(ehdr->h_source); 108 eth_zero_addr(ehdr->h_dest); 109 if (attr->src && !attr->remove_sa) 110 ether_addr_copy(ehdr->h_source, attr->src); 111 if (attr->dst) 112 ether_addr_copy(ehdr->h_dest, attr->dst); 113 114 if (!attr->remove_sa) { 115 ehdr->h_proto = htons(ETH_P_IP); 116 } else { 117 __be16 *ptr = (__be16 *)ehdr; 118 119 /* HACK */ 120 ptr[3] = htons(ETH_P_IP); 121 } 122 123 if (attr->vlan) { 124 __be16 *tag, *proto; 125 126 if (!attr->remove_sa) { 127 tag = (void *)ehdr + ETH_HLEN; 128 proto = (void *)ehdr + (2 * ETH_ALEN); 129 } else { 130 tag = (void *)ehdr + ETH_HLEN - 6; 131 proto = (void *)ehdr + ETH_ALEN; 132 } 133 134 proto[0] = htons(ETH_P_8021Q); 135 tag[0] = htons(attr->vlan_id_out); 136 tag[1] = htons(ETH_P_IP); 137 if (attr->vlan > 1) { 138 proto[0] = htons(ETH_P_8021AD); 139 tag[1] = htons(ETH_P_8021Q); 140 tag[2] = htons(attr->vlan_id_in); 141 tag[3] = htons(ETH_P_IP); 142 } 143 } 144 145 if (attr->tcp) { 146 thdr->source = htons(attr->sport); 147 thdr->dest = htons(attr->dport); 148 thdr->doff = sizeof(struct tcphdr) / 4; 149 thdr->check = 0; 150 } else { 151 uhdr->source = htons(attr->sport); 152 uhdr->dest = htons(attr->dport); 153 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); 154 if (attr->max_size) 155 uhdr->len = htons(attr->max_size - 156 (sizeof(*ihdr) + sizeof(*ehdr))); 157 uhdr->check = 0; 158 } 159 160 ihdr->ihl = 5; 161 ihdr->ttl = 32; 162 ihdr->version = 4; 163 if (attr->tcp) 164 ihdr->protocol = IPPROTO_TCP; 165 else 166 ihdr->protocol = IPPROTO_UDP; 167 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size; 168 if (attr->tcp) 169 iplen += sizeof(*thdr); 170 else 171 iplen += sizeof(*uhdr); 172 173 if (attr->max_size) 174 iplen = attr->max_size - sizeof(*ehdr); 175 176 ihdr->tot_len = htons(iplen); 177 ihdr->frag_off = 0; 178 ihdr->saddr = htonl(attr->ip_src); 179 ihdr->daddr = htonl(attr->ip_dst); 180 ihdr->tos = 0; 181 ihdr->id = 0; 182 ip_send_check(ihdr); 183 184 shdr = skb_put(skb, sizeof(*shdr)); 185 shdr->version = 0; 186 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC); 187 attr->id = stmmac_test_next_id; 188 shdr->id = stmmac_test_next_id++; 189 190 if (attr->size) 191 skb_put(skb, attr->size); 192 if (attr->max_size && (attr->max_size > skb->len)) 193 skb_put(skb, attr->max_size - skb->len); 194 195 skb->csum = 0; 196 skb->ip_summed = CHECKSUM_PARTIAL; 197 if (attr->tcp) { 198 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0); 199 skb->csum_start = skb_transport_header(skb) - skb->head; 200 skb->csum_offset = offsetof(struct tcphdr, check); 201 } else { 202 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr); 203 } 204 205 skb->protocol = htons(ETH_P_IP); 206 skb->pkt_type = PACKET_HOST; 207 skb->dev = priv->dev; 208 209 return skb; 210 } 211 212 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv, 213 struct stmmac_packet_attrs *attr) 214 { 215 __be32 ip_src = htonl(attr->ip_src); 216 __be32 ip_dst = htonl(attr->ip_dst); 217 struct sk_buff *skb = NULL; 218 219 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src, 220 NULL, attr->src, attr->dst); 221 if (!skb) 222 return NULL; 223 224 skb->pkt_type = PACKET_HOST; 225 skb->dev = priv->dev; 226 227 return skb; 228 } 229 230 struct stmmac_test_priv { 231 struct stmmac_packet_attrs *packet; 232 struct packet_type pt; 233 struct completion comp; 234 int double_vlan; 235 int vlan_id; 236 int ok; 237 }; 238 239 static int stmmac_test_loopback_validate(struct sk_buff *skb, 240 struct net_device *ndev, 241 struct packet_type *pt, 242 struct net_device *orig_ndev) 243 { 244 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 245 struct stmmachdr *shdr; 246 struct ethhdr *ehdr; 247 struct udphdr *uhdr; 248 struct tcphdr *thdr; 249 struct iphdr *ihdr; 250 251 skb = skb_unshare(skb, GFP_ATOMIC); 252 if (!skb) 253 goto out; 254 255 if (skb_linearize(skb)) 256 goto out; 257 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 258 goto out; 259 260 ehdr = (struct ethhdr *)skb_mac_header(skb); 261 if (tpriv->packet->dst) { 262 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 263 goto out; 264 } 265 if (tpriv->packet->sarc) { 266 if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) 267 goto out; 268 } else if (tpriv->packet->src) { 269 if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) 270 goto out; 271 } 272 273 ihdr = ip_hdr(skb); 274 if (tpriv->double_vlan) 275 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 276 277 if (tpriv->packet->tcp) { 278 if (ihdr->protocol != IPPROTO_TCP) 279 goto out; 280 281 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 282 if (thdr->dest != htons(tpriv->packet->dport)) 283 goto out; 284 285 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr)); 286 } else { 287 if (ihdr->protocol != IPPROTO_UDP) 288 goto out; 289 290 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 291 if (uhdr->dest != htons(tpriv->packet->dport)) 292 goto out; 293 294 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 295 } 296 297 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 298 goto out; 299 if (tpriv->packet->exp_hash && !skb->hash) 300 goto out; 301 if (tpriv->packet->id != shdr->id) 302 goto out; 303 304 tpriv->ok = true; 305 complete(&tpriv->comp); 306 out: 307 kfree_skb(skb); 308 return 0; 309 } 310 311 static int __stmmac_test_loopback(struct stmmac_priv *priv, 312 struct stmmac_packet_attrs *attr) 313 { 314 struct stmmac_test_priv *tpriv; 315 struct sk_buff *skb = NULL; 316 int ret = 0; 317 318 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 319 if (!tpriv) 320 return -ENOMEM; 321 322 tpriv->ok = false; 323 init_completion(&tpriv->comp); 324 325 tpriv->pt.type = htons(ETH_P_IP); 326 tpriv->pt.func = stmmac_test_loopback_validate; 327 tpriv->pt.dev = priv->dev; 328 tpriv->pt.af_packet_priv = tpriv; 329 tpriv->packet = attr; 330 331 if (!attr->dont_wait) 332 dev_add_pack(&tpriv->pt); 333 334 skb = stmmac_test_get_udp_skb(priv, attr); 335 if (!skb) { 336 ret = -ENOMEM; 337 goto cleanup; 338 } 339 340 skb_set_queue_mapping(skb, attr->queue_mapping); 341 ret = dev_queue_xmit(skb); 342 if (ret) 343 goto cleanup; 344 345 if (attr->dont_wait) 346 goto cleanup; 347 348 if (!attr->timeout) 349 attr->timeout = STMMAC_LB_TIMEOUT; 350 351 wait_for_completion_timeout(&tpriv->comp, attr->timeout); 352 ret = tpriv->ok ? 0 : -ETIMEDOUT; 353 354 cleanup: 355 if (!attr->dont_wait) 356 dev_remove_pack(&tpriv->pt); 357 kfree(tpriv); 358 return ret; 359 } 360 361 static int stmmac_test_mac_loopback(struct stmmac_priv *priv) 362 { 363 struct stmmac_packet_attrs attr = { }; 364 365 attr.dst = priv->dev->dev_addr; 366 return __stmmac_test_loopback(priv, &attr); 367 } 368 369 static int stmmac_test_phy_loopback(struct stmmac_priv *priv) 370 { 371 struct stmmac_packet_attrs attr = { }; 372 int ret; 373 374 if (!priv->dev->phydev) 375 return -EBUSY; 376 377 ret = phy_loopback(priv->dev->phydev, true); 378 if (ret) 379 return ret; 380 381 attr.dst = priv->dev->dev_addr; 382 ret = __stmmac_test_loopback(priv, &attr); 383 384 phy_loopback(priv->dev->phydev, false); 385 return ret; 386 } 387 388 static int stmmac_test_mmc(struct stmmac_priv *priv) 389 { 390 struct stmmac_counters initial, final; 391 int ret; 392 393 memset(&initial, 0, sizeof(initial)); 394 memset(&final, 0, sizeof(final)); 395 396 if (!priv->dma_cap.rmon) 397 return -EOPNOTSUPP; 398 399 /* Save previous results into internal struct */ 400 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); 401 402 ret = stmmac_test_mac_loopback(priv); 403 if (ret) 404 return ret; 405 406 /* These will be loopback results so no need to save them */ 407 stmmac_mmc_read(priv, priv->mmcaddr, &final); 408 409 /* 410 * The number of MMC counters available depends on HW configuration 411 * so we just use this one to validate the feature. I hope there is 412 * not a version without this counter. 413 */ 414 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g) 415 return -EINVAL; 416 417 return 0; 418 } 419 420 static int stmmac_test_eee(struct stmmac_priv *priv) 421 { 422 struct stmmac_extra_stats *initial, *final; 423 int retries = 10; 424 int ret; 425 426 if (!priv->dma_cap.eee || !priv->eee_active) 427 return -EOPNOTSUPP; 428 429 initial = kzalloc(sizeof(*initial), GFP_KERNEL); 430 if (!initial) 431 return -ENOMEM; 432 433 final = kzalloc(sizeof(*final), GFP_KERNEL); 434 if (!final) { 435 ret = -ENOMEM; 436 goto out_free_initial; 437 } 438 439 memcpy(initial, &priv->xstats, sizeof(*initial)); 440 441 ret = stmmac_test_mac_loopback(priv); 442 if (ret) 443 goto out_free_final; 444 445 /* We have no traffic in the line so, sooner or later it will go LPI */ 446 while (--retries) { 447 memcpy(final, &priv->xstats, sizeof(*final)); 448 449 if (final->irq_tx_path_in_lpi_mode_n > 450 initial->irq_tx_path_in_lpi_mode_n) 451 break; 452 msleep(100); 453 } 454 455 if (!retries) { 456 ret = -ETIMEDOUT; 457 goto out_free_final; 458 } 459 460 if (final->irq_tx_path_in_lpi_mode_n <= 461 initial->irq_tx_path_in_lpi_mode_n) { 462 ret = -EINVAL; 463 goto out_free_final; 464 } 465 466 if (final->irq_tx_path_exit_lpi_mode_n <= 467 initial->irq_tx_path_exit_lpi_mode_n) { 468 ret = -EINVAL; 469 goto out_free_final; 470 } 471 472 out_free_final: 473 kfree(final); 474 out_free_initial: 475 kfree(initial); 476 return ret; 477 } 478 479 static int stmmac_filter_check(struct stmmac_priv *priv) 480 { 481 if (!(priv->dev->flags & IFF_PROMISC)) 482 return 0; 483 484 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n"); 485 return -EOPNOTSUPP; 486 } 487 488 static int stmmac_test_hfilt(struct stmmac_priv *priv) 489 { 490 unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd}; 491 unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb}; 492 struct stmmac_packet_attrs attr = { }; 493 int ret; 494 495 ret = stmmac_filter_check(priv); 496 if (ret) 497 return ret; 498 499 ret = dev_mc_add(priv->dev, gd_addr); 500 if (ret) 501 return ret; 502 503 attr.dst = gd_addr; 504 505 /* Shall receive packet */ 506 ret = __stmmac_test_loopback(priv, &attr); 507 if (ret) 508 goto cleanup; 509 510 attr.dst = bd_addr; 511 512 /* Shall NOT receive packet */ 513 ret = __stmmac_test_loopback(priv, &attr); 514 ret = ret ? 0 : -EINVAL; 515 516 cleanup: 517 dev_mc_del(priv->dev, gd_addr); 518 return ret; 519 } 520 521 static int stmmac_test_pfilt(struct stmmac_priv *priv) 522 { 523 unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 524 unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55}; 525 struct stmmac_packet_attrs attr = { }; 526 int ret; 527 528 if (stmmac_filter_check(priv)) 529 return -EOPNOTSUPP; 530 531 ret = dev_uc_add(priv->dev, gd_addr); 532 if (ret) 533 return ret; 534 535 attr.dst = gd_addr; 536 537 /* Shall receive packet */ 538 ret = __stmmac_test_loopback(priv, &attr); 539 if (ret) 540 goto cleanup; 541 542 attr.dst = bd_addr; 543 544 /* Shall NOT receive packet */ 545 ret = __stmmac_test_loopback(priv, &attr); 546 ret = ret ? 0 : -EINVAL; 547 548 cleanup: 549 dev_uc_del(priv->dev, gd_addr); 550 return ret; 551 } 552 553 static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr) 554 { 555 return 0; 556 } 557 558 static void stmmac_test_set_rx_mode(struct net_device *netdev) 559 { 560 /* As we are in test mode of ethtool we already own the rtnl lock 561 * so no address will change from user. We can just call the 562 * ndo_set_rx_mode() callback directly */ 563 if (netdev->netdev_ops->ndo_set_rx_mode) 564 netdev->netdev_ops->ndo_set_rx_mode(netdev); 565 } 566 567 static int stmmac_test_mcfilt(struct stmmac_priv *priv) 568 { 569 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 570 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 571 struct stmmac_packet_attrs attr = { }; 572 int ret; 573 574 if (stmmac_filter_check(priv)) 575 return -EOPNOTSUPP; 576 577 /* Remove all MC addresses */ 578 __dev_mc_unsync(priv->dev, NULL); 579 stmmac_test_set_rx_mode(priv->dev); 580 581 ret = dev_uc_add(priv->dev, uc_addr); 582 if (ret) 583 goto cleanup; 584 585 attr.dst = uc_addr; 586 587 /* Shall receive packet */ 588 ret = __stmmac_test_loopback(priv, &attr); 589 if (ret) 590 goto cleanup; 591 592 attr.dst = mc_addr; 593 594 /* Shall NOT receive packet */ 595 ret = __stmmac_test_loopback(priv, &attr); 596 ret = ret ? 0 : -EINVAL; 597 598 cleanup: 599 dev_uc_del(priv->dev, uc_addr); 600 __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL); 601 stmmac_test_set_rx_mode(priv->dev); 602 return ret; 603 } 604 605 static int stmmac_test_ucfilt(struct stmmac_priv *priv) 606 { 607 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 608 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 609 struct stmmac_packet_attrs attr = { }; 610 int ret; 611 612 if (stmmac_filter_check(priv)) 613 return -EOPNOTSUPP; 614 615 /* Remove all UC addresses */ 616 __dev_uc_unsync(priv->dev, NULL); 617 stmmac_test_set_rx_mode(priv->dev); 618 619 ret = dev_mc_add(priv->dev, mc_addr); 620 if (ret) 621 goto cleanup; 622 623 attr.dst = mc_addr; 624 625 /* Shall receive packet */ 626 ret = __stmmac_test_loopback(priv, &attr); 627 if (ret) 628 goto cleanup; 629 630 attr.dst = uc_addr; 631 632 /* Shall NOT receive packet */ 633 ret = __stmmac_test_loopback(priv, &attr); 634 ret = ret ? 0 : -EINVAL; 635 636 cleanup: 637 dev_mc_del(priv->dev, mc_addr); 638 __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL); 639 stmmac_test_set_rx_mode(priv->dev); 640 return ret; 641 } 642 643 static int stmmac_test_flowctrl_validate(struct sk_buff *skb, 644 struct net_device *ndev, 645 struct packet_type *pt, 646 struct net_device *orig_ndev) 647 { 648 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 649 struct ethhdr *ehdr; 650 651 ehdr = (struct ethhdr *)skb_mac_header(skb); 652 if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) 653 goto out; 654 if (ehdr->h_proto != htons(ETH_P_PAUSE)) 655 goto out; 656 657 tpriv->ok = true; 658 complete(&tpriv->comp); 659 out: 660 kfree_skb(skb); 661 return 0; 662 } 663 664 static int stmmac_test_flowctrl(struct stmmac_priv *priv) 665 { 666 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01}; 667 struct phy_device *phydev = priv->dev->phydev; 668 u32 rx_cnt = priv->plat->rx_queues_to_use; 669 struct stmmac_test_priv *tpriv; 670 unsigned int pkt_count; 671 int i, ret = 0; 672 673 if (!phydev || !phydev->pause) 674 return -EOPNOTSUPP; 675 676 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 677 if (!tpriv) 678 return -ENOMEM; 679 680 tpriv->ok = false; 681 init_completion(&tpriv->comp); 682 tpriv->pt.type = htons(ETH_P_PAUSE); 683 tpriv->pt.func = stmmac_test_flowctrl_validate; 684 tpriv->pt.dev = priv->dev; 685 tpriv->pt.af_packet_priv = tpriv; 686 dev_add_pack(&tpriv->pt); 687 688 /* Compute minimum number of packets to make FIFO full */ 689 pkt_count = priv->plat->rx_fifo_size; 690 if (!pkt_count) 691 pkt_count = priv->dma_cap.rx_fifo_size; 692 pkt_count /= 1400; 693 pkt_count *= 2; 694 695 for (i = 0; i < rx_cnt; i++) 696 stmmac_stop_rx(priv, priv->ioaddr, i); 697 698 ret = dev_set_promiscuity(priv->dev, 1); 699 if (ret) 700 goto cleanup; 701 702 ret = dev_mc_add(priv->dev, paddr); 703 if (ret) 704 goto cleanup; 705 706 for (i = 0; i < pkt_count; i++) { 707 struct stmmac_packet_attrs attr = { }; 708 709 attr.dst = priv->dev->dev_addr; 710 attr.dont_wait = true; 711 attr.size = 1400; 712 713 ret = __stmmac_test_loopback(priv, &attr); 714 if (ret) 715 goto cleanup; 716 if (tpriv->ok) 717 break; 718 } 719 720 /* Wait for some time in case RX Watchdog is enabled */ 721 msleep(200); 722 723 for (i = 0; i < rx_cnt; i++) { 724 struct stmmac_channel *ch = &priv->channel[i]; 725 u32 tail; 726 727 tail = priv->rx_queue[i].dma_rx_phy + 728 (DMA_RX_SIZE * sizeof(struct dma_desc)); 729 730 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); 731 stmmac_start_rx(priv, priv->ioaddr, i); 732 733 local_bh_disable(); 734 napi_reschedule(&ch->rx_napi); 735 local_bh_enable(); 736 } 737 738 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 739 ret = tpriv->ok ? 0 : -ETIMEDOUT; 740 741 cleanup: 742 dev_mc_del(priv->dev, paddr); 743 dev_set_promiscuity(priv->dev, -1); 744 dev_remove_pack(&tpriv->pt); 745 kfree(tpriv); 746 return ret; 747 } 748 749 static int stmmac_test_rss(struct stmmac_priv *priv) 750 { 751 struct stmmac_packet_attrs attr = { }; 752 753 if (!priv->dma_cap.rssen || !priv->rss.enable) 754 return -EOPNOTSUPP; 755 756 attr.dst = priv->dev->dev_addr; 757 attr.exp_hash = true; 758 attr.sport = 0x321; 759 attr.dport = 0x123; 760 761 return __stmmac_test_loopback(priv, &attr); 762 } 763 764 static int stmmac_test_vlan_validate(struct sk_buff *skb, 765 struct net_device *ndev, 766 struct packet_type *pt, 767 struct net_device *orig_ndev) 768 { 769 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 770 struct stmmachdr *shdr; 771 struct ethhdr *ehdr; 772 struct udphdr *uhdr; 773 struct iphdr *ihdr; 774 u16 proto; 775 776 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q; 777 778 skb = skb_unshare(skb, GFP_ATOMIC); 779 if (!skb) 780 goto out; 781 782 if (skb_linearize(skb)) 783 goto out; 784 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 785 goto out; 786 if (tpriv->vlan_id) { 787 if (skb->vlan_proto != htons(proto)) 788 goto out; 789 if (skb->vlan_tci != tpriv->vlan_id) 790 goto out; 791 } 792 793 ehdr = (struct ethhdr *)skb_mac_header(skb); 794 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 795 goto out; 796 797 ihdr = ip_hdr(skb); 798 if (tpriv->double_vlan) 799 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 800 if (ihdr->protocol != IPPROTO_UDP) 801 goto out; 802 803 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 804 if (uhdr->dest != htons(tpriv->packet->dport)) 805 goto out; 806 807 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 808 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 809 goto out; 810 811 tpriv->ok = true; 812 complete(&tpriv->comp); 813 814 out: 815 kfree_skb(skb); 816 return 0; 817 } 818 819 static int stmmac_test_vlanfilt(struct stmmac_priv *priv) 820 { 821 struct stmmac_packet_attrs attr = { }; 822 struct stmmac_test_priv *tpriv; 823 struct sk_buff *skb = NULL; 824 int ret = 0, i; 825 826 if (!priv->dma_cap.vlhash) 827 return -EOPNOTSUPP; 828 829 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 830 if (!tpriv) 831 return -ENOMEM; 832 833 tpriv->ok = false; 834 init_completion(&tpriv->comp); 835 836 tpriv->pt.type = htons(ETH_P_IP); 837 tpriv->pt.func = stmmac_test_vlan_validate; 838 tpriv->pt.dev = priv->dev; 839 tpriv->pt.af_packet_priv = tpriv; 840 tpriv->packet = &attr; 841 842 /* 843 * As we use HASH filtering, false positives may appear. This is a 844 * specially chosen ID so that adjacent IDs (+4) have different 845 * HASH values. 846 */ 847 tpriv->vlan_id = 0x123; 848 dev_add_pack(&tpriv->pt); 849 850 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 851 if (ret) 852 goto cleanup; 853 854 for (i = 0; i < 4; i++) { 855 attr.vlan = 1; 856 attr.vlan_id_out = tpriv->vlan_id + i; 857 attr.dst = priv->dev->dev_addr; 858 attr.sport = 9; 859 attr.dport = 9; 860 861 skb = stmmac_test_get_udp_skb(priv, &attr); 862 if (!skb) { 863 ret = -ENOMEM; 864 goto vlan_del; 865 } 866 867 skb_set_queue_mapping(skb, 0); 868 ret = dev_queue_xmit(skb); 869 if (ret) 870 goto vlan_del; 871 872 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 873 ret = tpriv->ok ? 0 : -ETIMEDOUT; 874 if (ret && !i) { 875 goto vlan_del; 876 } else if (!ret && i) { 877 ret = -EINVAL; 878 goto vlan_del; 879 } else { 880 ret = 0; 881 } 882 883 tpriv->ok = false; 884 } 885 886 vlan_del: 887 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 888 cleanup: 889 dev_remove_pack(&tpriv->pt); 890 kfree(tpriv); 891 return ret; 892 } 893 894 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv) 895 { 896 struct stmmac_packet_attrs attr = { }; 897 struct stmmac_test_priv *tpriv; 898 struct sk_buff *skb = NULL; 899 int ret = 0, i; 900 901 if (!priv->dma_cap.vlhash) 902 return -EOPNOTSUPP; 903 904 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 905 if (!tpriv) 906 return -ENOMEM; 907 908 tpriv->ok = false; 909 tpriv->double_vlan = true; 910 init_completion(&tpriv->comp); 911 912 tpriv->pt.type = htons(ETH_P_8021Q); 913 tpriv->pt.func = stmmac_test_vlan_validate; 914 tpriv->pt.dev = priv->dev; 915 tpriv->pt.af_packet_priv = tpriv; 916 tpriv->packet = &attr; 917 918 /* 919 * As we use HASH filtering, false positives may appear. This is a 920 * specially chosen ID so that adjacent IDs (+4) have different 921 * HASH values. 922 */ 923 tpriv->vlan_id = 0x123; 924 dev_add_pack(&tpriv->pt); 925 926 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 927 if (ret) 928 goto cleanup; 929 930 for (i = 0; i < 4; i++) { 931 attr.vlan = 2; 932 attr.vlan_id_out = tpriv->vlan_id + i; 933 attr.dst = priv->dev->dev_addr; 934 attr.sport = 9; 935 attr.dport = 9; 936 937 skb = stmmac_test_get_udp_skb(priv, &attr); 938 if (!skb) { 939 ret = -ENOMEM; 940 goto vlan_del; 941 } 942 943 skb_set_queue_mapping(skb, 0); 944 ret = dev_queue_xmit(skb); 945 if (ret) 946 goto vlan_del; 947 948 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 949 ret = tpriv->ok ? 0 : -ETIMEDOUT; 950 if (ret && !i) { 951 goto vlan_del; 952 } else if (!ret && i) { 953 ret = -EINVAL; 954 goto vlan_del; 955 } else { 956 ret = 0; 957 } 958 959 tpriv->ok = false; 960 } 961 962 vlan_del: 963 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 964 cleanup: 965 dev_remove_pack(&tpriv->pt); 966 kfree(tpriv); 967 return ret; 968 } 969 970 #ifdef CONFIG_NET_CLS_ACT 971 static int stmmac_test_rxp(struct stmmac_priv *priv) 972 { 973 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; 974 struct tc_cls_u32_offload cls_u32 = { }; 975 struct stmmac_packet_attrs attr = { }; 976 struct tc_action **actions, *act; 977 struct tc_u32_sel *sel; 978 struct tcf_exts *exts; 979 int ret, i, nk = 1; 980 981 if (!tc_can_offload(priv->dev)) 982 return -EOPNOTSUPP; 983 if (!priv->dma_cap.frpsel) 984 return -EOPNOTSUPP; 985 986 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); 987 if (!sel) 988 return -ENOMEM; 989 990 exts = kzalloc(sizeof(*exts), GFP_KERNEL); 991 if (!exts) { 992 ret = -ENOMEM; 993 goto cleanup_sel; 994 } 995 996 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); 997 if (!actions) { 998 ret = -ENOMEM; 999 goto cleanup_exts; 1000 } 1001 1002 act = kzalloc(nk * sizeof(*act), GFP_KERNEL); 1003 if (!act) { 1004 ret = -ENOMEM; 1005 goto cleanup_actions; 1006 } 1007 1008 cls_u32.command = TC_CLSU32_NEW_KNODE; 1009 cls_u32.common.chain_index = 0; 1010 cls_u32.common.protocol = htons(ETH_P_ALL); 1011 cls_u32.knode.exts = exts; 1012 cls_u32.knode.sel = sel; 1013 cls_u32.knode.handle = 0x123; 1014 1015 exts->nr_actions = nk; 1016 exts->actions = actions; 1017 for (i = 0; i < nk; i++) { 1018 struct tcf_gact *gact = to_gact(&act[i]); 1019 1020 actions[i] = &act[i]; 1021 gact->tcf_action = TC_ACT_SHOT; 1022 } 1023 1024 sel->nkeys = nk; 1025 sel->offshift = 0; 1026 sel->keys[0].off = 6; 1027 sel->keys[0].val = htonl(0xdeadbeef); 1028 sel->keys[0].mask = ~0x0; 1029 1030 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1031 if (ret) 1032 goto cleanup_act; 1033 1034 attr.dst = priv->dev->dev_addr; 1035 attr.src = addr; 1036 1037 ret = __stmmac_test_loopback(priv, &attr); 1038 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */ 1039 1040 cls_u32.command = TC_CLSU32_DELETE_KNODE; 1041 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1042 1043 cleanup_act: 1044 kfree(act); 1045 cleanup_actions: 1046 kfree(actions); 1047 cleanup_exts: 1048 kfree(exts); 1049 cleanup_sel: 1050 kfree(sel); 1051 return ret; 1052 } 1053 #else 1054 static int stmmac_test_rxp(struct stmmac_priv *priv) 1055 { 1056 return -EOPNOTSUPP; 1057 } 1058 #endif 1059 1060 static int stmmac_test_desc_sai(struct stmmac_priv *priv) 1061 { 1062 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1063 struct stmmac_packet_attrs attr = { }; 1064 int ret; 1065 1066 if (!priv->dma_cap.vlins) 1067 return -EOPNOTSUPP; 1068 1069 attr.remove_sa = true; 1070 attr.sarc = true; 1071 attr.src = src; 1072 attr.dst = priv->dev->dev_addr; 1073 1074 priv->sarc_type = 0x1; 1075 1076 ret = __stmmac_test_loopback(priv, &attr); 1077 1078 priv->sarc_type = 0x0; 1079 return ret; 1080 } 1081 1082 static int stmmac_test_desc_sar(struct stmmac_priv *priv) 1083 { 1084 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1085 struct stmmac_packet_attrs attr = { }; 1086 int ret; 1087 1088 if (!priv->dma_cap.vlins) 1089 return -EOPNOTSUPP; 1090 1091 attr.sarc = true; 1092 attr.src = src; 1093 attr.dst = priv->dev->dev_addr; 1094 1095 priv->sarc_type = 0x2; 1096 1097 ret = __stmmac_test_loopback(priv, &attr); 1098 1099 priv->sarc_type = 0x0; 1100 return ret; 1101 } 1102 1103 static int stmmac_test_reg_sai(struct stmmac_priv *priv) 1104 { 1105 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1106 struct stmmac_packet_attrs attr = { }; 1107 int ret; 1108 1109 if (!priv->dma_cap.vlins) 1110 return -EOPNOTSUPP; 1111 1112 attr.remove_sa = true; 1113 attr.sarc = true; 1114 attr.src = src; 1115 attr.dst = priv->dev->dev_addr; 1116 1117 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2)) 1118 return -EOPNOTSUPP; 1119 1120 ret = __stmmac_test_loopback(priv, &attr); 1121 1122 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1123 return ret; 1124 } 1125 1126 static int stmmac_test_reg_sar(struct stmmac_priv *priv) 1127 { 1128 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1129 struct stmmac_packet_attrs attr = { }; 1130 int ret; 1131 1132 if (!priv->dma_cap.vlins) 1133 return -EOPNOTSUPP; 1134 1135 attr.sarc = true; 1136 attr.src = src; 1137 attr.dst = priv->dev->dev_addr; 1138 1139 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3)) 1140 return -EOPNOTSUPP; 1141 1142 ret = __stmmac_test_loopback(priv, &attr); 1143 1144 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1145 return ret; 1146 } 1147 1148 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan) 1149 { 1150 struct stmmac_packet_attrs attr = { }; 1151 struct stmmac_test_priv *tpriv; 1152 struct sk_buff *skb = NULL; 1153 int ret = 0; 1154 u16 proto; 1155 1156 if (!priv->dma_cap.vlins) 1157 return -EOPNOTSUPP; 1158 1159 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1160 if (!tpriv) 1161 return -ENOMEM; 1162 1163 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q; 1164 1165 tpriv->ok = false; 1166 tpriv->double_vlan = svlan; 1167 init_completion(&tpriv->comp); 1168 1169 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP); 1170 tpriv->pt.func = stmmac_test_vlan_validate; 1171 tpriv->pt.dev = priv->dev; 1172 tpriv->pt.af_packet_priv = tpriv; 1173 tpriv->packet = &attr; 1174 tpriv->vlan_id = 0x123; 1175 dev_add_pack(&tpriv->pt); 1176 1177 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id); 1178 if (ret) 1179 goto cleanup; 1180 1181 attr.dst = priv->dev->dev_addr; 1182 1183 skb = stmmac_test_get_udp_skb(priv, &attr); 1184 if (!skb) { 1185 ret = -ENOMEM; 1186 goto vlan_del; 1187 } 1188 1189 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); 1190 skb->protocol = htons(proto); 1191 1192 skb_set_queue_mapping(skb, 0); 1193 ret = dev_queue_xmit(skb); 1194 if (ret) 1195 goto vlan_del; 1196 1197 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1198 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1199 1200 vlan_del: 1201 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id); 1202 cleanup: 1203 dev_remove_pack(&tpriv->pt); 1204 kfree(tpriv); 1205 return ret; 1206 } 1207 1208 static int stmmac_test_vlanoff(struct stmmac_priv *priv) 1209 { 1210 return stmmac_test_vlanoff_common(priv, false); 1211 } 1212 1213 static int stmmac_test_svlanoff(struct stmmac_priv *priv) 1214 { 1215 if (!priv->dma_cap.dvlan) 1216 return -EOPNOTSUPP; 1217 return stmmac_test_vlanoff_common(priv, true); 1218 } 1219 1220 #ifdef CONFIG_NET_CLS_ACT 1221 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1222 u32 dst_mask, u32 src_mask) 1223 { 1224 struct flow_dissector_key_ipv4_addrs key, mask; 1225 unsigned long dummy_cookie = 0xdeadbeef; 1226 struct stmmac_packet_attrs attr = { }; 1227 struct flow_dissector *dissector; 1228 struct flow_cls_offload *cls; 1229 struct flow_rule *rule; 1230 int ret; 1231 1232 if (!tc_can_offload(priv->dev)) 1233 return -EOPNOTSUPP; 1234 if (!priv->dma_cap.l3l4fnum) 1235 return -EOPNOTSUPP; 1236 if (priv->rss.enable) { 1237 struct stmmac_rss rss = { .enable = false, }; 1238 1239 stmmac_rss_configure(priv, priv->hw, &rss, 1240 priv->plat->rx_queues_to_use); 1241 } 1242 1243 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1244 if (!dissector) { 1245 ret = -ENOMEM; 1246 goto cleanup_rss; 1247 } 1248 1249 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); 1250 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; 1251 1252 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1253 if (!cls) { 1254 ret = -ENOMEM; 1255 goto cleanup_dissector; 1256 } 1257 1258 cls->common.chain_index = 0; 1259 cls->command = FLOW_CLS_REPLACE; 1260 cls->cookie = dummy_cookie; 1261 1262 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1263 if (!rule) { 1264 ret = -ENOMEM; 1265 goto cleanup_cls; 1266 } 1267 1268 rule->match.dissector = dissector; 1269 rule->match.key = (void *)&key; 1270 rule->match.mask = (void *)&mask; 1271 1272 key.src = htonl(src); 1273 key.dst = htonl(dst); 1274 mask.src = src_mask; 1275 mask.dst = dst_mask; 1276 1277 cls->rule = rule; 1278 1279 rule->action.entries[0].id = FLOW_ACTION_DROP; 1280 rule->action.num_entries = 1; 1281 1282 attr.dst = priv->dev->dev_addr; 1283 attr.ip_dst = dst; 1284 attr.ip_src = src; 1285 1286 /* Shall receive packet */ 1287 ret = __stmmac_test_loopback(priv, &attr); 1288 if (ret) 1289 goto cleanup_rule; 1290 1291 ret = stmmac_tc_setup_cls(priv, priv, cls); 1292 if (ret) 1293 goto cleanup_rule; 1294 1295 /* Shall NOT receive packet */ 1296 ret = __stmmac_test_loopback(priv, &attr); 1297 ret = ret ? 0 : -EINVAL; 1298 1299 cls->command = FLOW_CLS_DESTROY; 1300 stmmac_tc_setup_cls(priv, priv, cls); 1301 cleanup_rule: 1302 kfree(rule); 1303 cleanup_cls: 1304 kfree(cls); 1305 cleanup_dissector: 1306 kfree(dissector); 1307 cleanup_rss: 1308 if (priv->rss.enable) { 1309 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1310 priv->plat->rx_queues_to_use); 1311 } 1312 1313 return ret; 1314 } 1315 #else 1316 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1317 u32 dst_mask, u32 src_mask) 1318 { 1319 return -EOPNOTSUPP; 1320 } 1321 #endif 1322 1323 static int stmmac_test_l3filt_da(struct stmmac_priv *priv) 1324 { 1325 u32 addr = 0x10203040; 1326 1327 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0); 1328 } 1329 1330 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv) 1331 { 1332 u32 addr = 0x10203040; 1333 1334 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0); 1335 } 1336 1337 #ifdef CONFIG_NET_CLS_ACT 1338 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1339 u32 dst_mask, u32 src_mask, bool udp) 1340 { 1341 struct { 1342 struct flow_dissector_key_basic bkey; 1343 struct flow_dissector_key_ports key; 1344 } __aligned(BITS_PER_LONG / 8) keys; 1345 struct { 1346 struct flow_dissector_key_basic bmask; 1347 struct flow_dissector_key_ports mask; 1348 } __aligned(BITS_PER_LONG / 8) masks; 1349 unsigned long dummy_cookie = 0xdeadbeef; 1350 struct stmmac_packet_attrs attr = { }; 1351 struct flow_dissector *dissector; 1352 struct flow_cls_offload *cls; 1353 struct flow_rule *rule; 1354 int ret; 1355 1356 if (!tc_can_offload(priv->dev)) 1357 return -EOPNOTSUPP; 1358 if (!priv->dma_cap.l3l4fnum) 1359 return -EOPNOTSUPP; 1360 if (priv->rss.enable) { 1361 struct stmmac_rss rss = { .enable = false, }; 1362 1363 stmmac_rss_configure(priv, priv->hw, &rss, 1364 priv->plat->rx_queues_to_use); 1365 } 1366 1367 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1368 if (!dissector) { 1369 ret = -ENOMEM; 1370 goto cleanup_rss; 1371 } 1372 1373 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); 1374 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); 1375 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; 1376 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); 1377 1378 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1379 if (!cls) { 1380 ret = -ENOMEM; 1381 goto cleanup_dissector; 1382 } 1383 1384 cls->common.chain_index = 0; 1385 cls->command = FLOW_CLS_REPLACE; 1386 cls->cookie = dummy_cookie; 1387 1388 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1389 if (!rule) { 1390 ret = -ENOMEM; 1391 goto cleanup_cls; 1392 } 1393 1394 rule->match.dissector = dissector; 1395 rule->match.key = (void *)&keys; 1396 rule->match.mask = (void *)&masks; 1397 1398 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP; 1399 keys.key.src = htons(src); 1400 keys.key.dst = htons(dst); 1401 masks.mask.src = src_mask; 1402 masks.mask.dst = dst_mask; 1403 1404 cls->rule = rule; 1405 1406 rule->action.entries[0].id = FLOW_ACTION_DROP; 1407 rule->action.num_entries = 1; 1408 1409 attr.dst = priv->dev->dev_addr; 1410 attr.tcp = !udp; 1411 attr.sport = src; 1412 attr.dport = dst; 1413 attr.ip_dst = 0; 1414 1415 /* Shall receive packet */ 1416 ret = __stmmac_test_loopback(priv, &attr); 1417 if (ret) 1418 goto cleanup_rule; 1419 1420 ret = stmmac_tc_setup_cls(priv, priv, cls); 1421 if (ret) 1422 goto cleanup_rule; 1423 1424 /* Shall NOT receive packet */ 1425 ret = __stmmac_test_loopback(priv, &attr); 1426 ret = ret ? 0 : -EINVAL; 1427 1428 cls->command = FLOW_CLS_DESTROY; 1429 stmmac_tc_setup_cls(priv, priv, cls); 1430 cleanup_rule: 1431 kfree(rule); 1432 cleanup_cls: 1433 kfree(cls); 1434 cleanup_dissector: 1435 kfree(dissector); 1436 cleanup_rss: 1437 if (priv->rss.enable) { 1438 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1439 priv->plat->rx_queues_to_use); 1440 } 1441 1442 return ret; 1443 } 1444 #else 1445 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1446 u32 dst_mask, u32 src_mask, bool udp) 1447 { 1448 return -EOPNOTSUPP; 1449 } 1450 #endif 1451 1452 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv) 1453 { 1454 u16 dummy_port = 0x123; 1455 1456 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false); 1457 } 1458 1459 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv) 1460 { 1461 u16 dummy_port = 0x123; 1462 1463 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false); 1464 } 1465 1466 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv) 1467 { 1468 u16 dummy_port = 0x123; 1469 1470 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true); 1471 } 1472 1473 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv) 1474 { 1475 u16 dummy_port = 0x123; 1476 1477 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true); 1478 } 1479 1480 static int stmmac_test_arp_validate(struct sk_buff *skb, 1481 struct net_device *ndev, 1482 struct packet_type *pt, 1483 struct net_device *orig_ndev) 1484 { 1485 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 1486 struct ethhdr *ehdr; 1487 struct arphdr *ahdr; 1488 1489 ehdr = (struct ethhdr *)skb_mac_header(skb); 1490 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) 1491 goto out; 1492 1493 ahdr = arp_hdr(skb); 1494 if (ahdr->ar_op != htons(ARPOP_REPLY)) 1495 goto out; 1496 1497 tpriv->ok = true; 1498 complete(&tpriv->comp); 1499 out: 1500 kfree_skb(skb); 1501 return 0; 1502 } 1503 1504 static int stmmac_test_arpoffload(struct stmmac_priv *priv) 1505 { 1506 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}; 1507 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1508 struct stmmac_packet_attrs attr = { }; 1509 struct stmmac_test_priv *tpriv; 1510 struct sk_buff *skb = NULL; 1511 u32 ip_addr = 0xdeadcafe; 1512 u32 ip_src = 0xdeadbeef; 1513 int ret; 1514 1515 if (!priv->dma_cap.arpoffsel) 1516 return -EOPNOTSUPP; 1517 1518 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1519 if (!tpriv) 1520 return -ENOMEM; 1521 1522 tpriv->ok = false; 1523 init_completion(&tpriv->comp); 1524 1525 tpriv->pt.type = htons(ETH_P_ARP); 1526 tpriv->pt.func = stmmac_test_arp_validate; 1527 tpriv->pt.dev = priv->dev; 1528 tpriv->pt.af_packet_priv = tpriv; 1529 tpriv->packet = &attr; 1530 dev_add_pack(&tpriv->pt); 1531 1532 attr.src = src; 1533 attr.ip_src = ip_src; 1534 attr.dst = dst; 1535 attr.ip_dst = ip_addr; 1536 1537 skb = stmmac_test_get_arp_skb(priv, &attr); 1538 if (!skb) { 1539 ret = -ENOMEM; 1540 goto cleanup; 1541 } 1542 1543 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); 1544 if (ret) 1545 goto cleanup; 1546 1547 ret = dev_set_promiscuity(priv->dev, 1); 1548 if (ret) 1549 goto cleanup; 1550 1551 skb_set_queue_mapping(skb, 0); 1552 ret = dev_queue_xmit(skb); 1553 if (ret) 1554 goto cleanup_promisc; 1555 1556 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1557 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1558 1559 cleanup_promisc: 1560 dev_set_promiscuity(priv->dev, -1); 1561 cleanup: 1562 stmmac_set_arp_offload(priv, priv->hw, false, 0x0); 1563 dev_remove_pack(&tpriv->pt); 1564 kfree(tpriv); 1565 return ret; 1566 } 1567 1568 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) 1569 { 1570 struct stmmac_packet_attrs attr = { }; 1571 int size = priv->dma_buf_sz; 1572 1573 /* Only XGMAC has SW support for multiple RX descs in same packet */ 1574 if (priv->plat->has_xgmac) 1575 size = priv->dev->max_mtu; 1576 1577 attr.dst = priv->dev->dev_addr; 1578 attr.max_size = size - ETH_FCS_LEN; 1579 attr.queue_mapping = queue; 1580 1581 return __stmmac_test_loopback(priv, &attr); 1582 } 1583 1584 static int stmmac_test_jumbo(struct stmmac_priv *priv) 1585 { 1586 return __stmmac_test_jumbo(priv, 0); 1587 } 1588 1589 static int stmmac_test_mjumbo(struct stmmac_priv *priv) 1590 { 1591 u32 chan, tx_cnt = priv->plat->tx_queues_to_use; 1592 int ret; 1593 1594 if (tx_cnt <= 1) 1595 return -EOPNOTSUPP; 1596 1597 for (chan = 0; chan < tx_cnt; chan++) { 1598 ret = __stmmac_test_jumbo(priv, chan); 1599 if (ret) 1600 return ret; 1601 } 1602 1603 return 0; 1604 } 1605 1606 static int stmmac_test_sph(struct stmmac_priv *priv) 1607 { 1608 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n; 1609 struct stmmac_packet_attrs attr = { }; 1610 int ret; 1611 1612 if (!priv->sph) 1613 return -EOPNOTSUPP; 1614 1615 /* Check for UDP first */ 1616 attr.dst = priv->dev->dev_addr; 1617 attr.tcp = false; 1618 1619 ret = __stmmac_test_loopback(priv, &attr); 1620 if (ret) 1621 return ret; 1622 1623 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1624 if (cnt_end <= cnt_start) 1625 return -EINVAL; 1626 1627 /* Check for TCP now */ 1628 cnt_start = cnt_end; 1629 1630 attr.dst = priv->dev->dev_addr; 1631 attr.tcp = true; 1632 1633 ret = __stmmac_test_loopback(priv, &attr); 1634 if (ret) 1635 return ret; 1636 1637 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1638 if (cnt_end <= cnt_start) 1639 return -EINVAL; 1640 1641 return 0; 1642 } 1643 1644 #define STMMAC_LOOPBACK_NONE 0 1645 #define STMMAC_LOOPBACK_MAC 1 1646 #define STMMAC_LOOPBACK_PHY 2 1647 1648 static const struct stmmac_test { 1649 char name[ETH_GSTRING_LEN]; 1650 int lb; 1651 int (*fn)(struct stmmac_priv *priv); 1652 } stmmac_selftests[] = { 1653 { 1654 .name = "MAC Loopback ", 1655 .lb = STMMAC_LOOPBACK_MAC, 1656 .fn = stmmac_test_mac_loopback, 1657 }, { 1658 .name = "PHY Loopback ", 1659 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */ 1660 .fn = stmmac_test_phy_loopback, 1661 }, { 1662 .name = "MMC Counters ", 1663 .lb = STMMAC_LOOPBACK_PHY, 1664 .fn = stmmac_test_mmc, 1665 }, { 1666 .name = "EEE ", 1667 .lb = STMMAC_LOOPBACK_PHY, 1668 .fn = stmmac_test_eee, 1669 }, { 1670 .name = "Hash Filter MC ", 1671 .lb = STMMAC_LOOPBACK_PHY, 1672 .fn = stmmac_test_hfilt, 1673 }, { 1674 .name = "Perfect Filter UC ", 1675 .lb = STMMAC_LOOPBACK_PHY, 1676 .fn = stmmac_test_pfilt, 1677 }, { 1678 .name = "MC Filter ", 1679 .lb = STMMAC_LOOPBACK_PHY, 1680 .fn = stmmac_test_mcfilt, 1681 }, { 1682 .name = "UC Filter ", 1683 .lb = STMMAC_LOOPBACK_PHY, 1684 .fn = stmmac_test_ucfilt, 1685 }, { 1686 .name = "Flow Control ", 1687 .lb = STMMAC_LOOPBACK_PHY, 1688 .fn = stmmac_test_flowctrl, 1689 }, { 1690 .name = "RSS ", 1691 .lb = STMMAC_LOOPBACK_PHY, 1692 .fn = stmmac_test_rss, 1693 }, { 1694 .name = "VLAN Filtering ", 1695 .lb = STMMAC_LOOPBACK_PHY, 1696 .fn = stmmac_test_vlanfilt, 1697 }, { 1698 .name = "Double VLAN Filtering", 1699 .lb = STMMAC_LOOPBACK_PHY, 1700 .fn = stmmac_test_dvlanfilt, 1701 }, { 1702 .name = "Flexible RX Parser ", 1703 .lb = STMMAC_LOOPBACK_PHY, 1704 .fn = stmmac_test_rxp, 1705 }, { 1706 .name = "SA Insertion (desc) ", 1707 .lb = STMMAC_LOOPBACK_PHY, 1708 .fn = stmmac_test_desc_sai, 1709 }, { 1710 .name = "SA Replacement (desc)", 1711 .lb = STMMAC_LOOPBACK_PHY, 1712 .fn = stmmac_test_desc_sar, 1713 }, { 1714 .name = "SA Insertion (reg) ", 1715 .lb = STMMAC_LOOPBACK_PHY, 1716 .fn = stmmac_test_reg_sai, 1717 }, { 1718 .name = "SA Replacement (reg)", 1719 .lb = STMMAC_LOOPBACK_PHY, 1720 .fn = stmmac_test_reg_sar, 1721 }, { 1722 .name = "VLAN TX Insertion ", 1723 .lb = STMMAC_LOOPBACK_PHY, 1724 .fn = stmmac_test_vlanoff, 1725 }, { 1726 .name = "SVLAN TX Insertion ", 1727 .lb = STMMAC_LOOPBACK_PHY, 1728 .fn = stmmac_test_svlanoff, 1729 }, { 1730 .name = "L3 DA Filtering ", 1731 .lb = STMMAC_LOOPBACK_PHY, 1732 .fn = stmmac_test_l3filt_da, 1733 }, { 1734 .name = "L3 SA Filtering ", 1735 .lb = STMMAC_LOOPBACK_PHY, 1736 .fn = stmmac_test_l3filt_sa, 1737 }, { 1738 .name = "L4 DA TCP Filtering ", 1739 .lb = STMMAC_LOOPBACK_PHY, 1740 .fn = stmmac_test_l4filt_da_tcp, 1741 }, { 1742 .name = "L4 SA TCP Filtering ", 1743 .lb = STMMAC_LOOPBACK_PHY, 1744 .fn = stmmac_test_l4filt_sa_tcp, 1745 }, { 1746 .name = "L4 DA UDP Filtering ", 1747 .lb = STMMAC_LOOPBACK_PHY, 1748 .fn = stmmac_test_l4filt_da_udp, 1749 }, { 1750 .name = "L4 SA UDP Filtering ", 1751 .lb = STMMAC_LOOPBACK_PHY, 1752 .fn = stmmac_test_l4filt_sa_udp, 1753 }, { 1754 .name = "ARP Offload ", 1755 .lb = STMMAC_LOOPBACK_PHY, 1756 .fn = stmmac_test_arpoffload, 1757 }, { 1758 .name = "Jumbo Frame ", 1759 .lb = STMMAC_LOOPBACK_PHY, 1760 .fn = stmmac_test_jumbo, 1761 }, { 1762 .name = "Multichannel Jumbo ", 1763 .lb = STMMAC_LOOPBACK_PHY, 1764 .fn = stmmac_test_mjumbo, 1765 }, { 1766 .name = "Split Header ", 1767 .lb = STMMAC_LOOPBACK_PHY, 1768 .fn = stmmac_test_sph, 1769 }, 1770 }; 1771 1772 void stmmac_selftest_run(struct net_device *dev, 1773 struct ethtool_test *etest, u64 *buf) 1774 { 1775 struct stmmac_priv *priv = netdev_priv(dev); 1776 int count = stmmac_selftest_get_count(priv); 1777 int carrier = netif_carrier_ok(dev); 1778 int i, ret; 1779 1780 memset(buf, 0, sizeof(*buf) * count); 1781 stmmac_test_next_id = 0; 1782 1783 if (etest->flags != ETH_TEST_FL_OFFLINE) { 1784 netdev_err(priv->dev, "Only offline tests are supported\n"); 1785 etest->flags |= ETH_TEST_FL_FAILED; 1786 return; 1787 } else if (!carrier) { 1788 netdev_err(priv->dev, "You need valid Link to execute tests\n"); 1789 etest->flags |= ETH_TEST_FL_FAILED; 1790 return; 1791 } 1792 1793 /* We don't want extra traffic */ 1794 netif_carrier_off(dev); 1795 1796 /* Wait for queues drain */ 1797 msleep(200); 1798 1799 for (i = 0; i < count; i++) { 1800 ret = 0; 1801 1802 switch (stmmac_selftests[i].lb) { 1803 case STMMAC_LOOPBACK_PHY: 1804 ret = -EOPNOTSUPP; 1805 if (dev->phydev) 1806 ret = phy_loopback(dev->phydev, true); 1807 if (!ret) 1808 break; 1809 /* Fallthrough */ 1810 case STMMAC_LOOPBACK_MAC: 1811 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); 1812 break; 1813 case STMMAC_LOOPBACK_NONE: 1814 break; 1815 default: 1816 ret = -EOPNOTSUPP; 1817 break; 1818 } 1819 1820 /* 1821 * First tests will always be MAC / PHY loobpack. If any of 1822 * them is not supported we abort earlier. 1823 */ 1824 if (ret) { 1825 netdev_err(priv->dev, "Loopback is not supported\n"); 1826 etest->flags |= ETH_TEST_FL_FAILED; 1827 break; 1828 } 1829 1830 ret = stmmac_selftests[i].fn(priv); 1831 if (ret && (ret != -EOPNOTSUPP)) 1832 etest->flags |= ETH_TEST_FL_FAILED; 1833 buf[i] = ret; 1834 1835 switch (stmmac_selftests[i].lb) { 1836 case STMMAC_LOOPBACK_PHY: 1837 ret = -EOPNOTSUPP; 1838 if (dev->phydev) 1839 ret = phy_loopback(dev->phydev, false); 1840 if (!ret) 1841 break; 1842 /* Fallthrough */ 1843 case STMMAC_LOOPBACK_MAC: 1844 stmmac_set_mac_loopback(priv, priv->ioaddr, false); 1845 break; 1846 default: 1847 break; 1848 } 1849 } 1850 1851 /* Restart everything */ 1852 if (carrier) 1853 netif_carrier_on(dev); 1854 } 1855 1856 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data) 1857 { 1858 u8 *p = data; 1859 int i; 1860 1861 for (i = 0; i < stmmac_selftest_get_count(priv); i++) { 1862 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1, 1863 stmmac_selftests[i].name); 1864 p += ETH_GSTRING_LEN; 1865 } 1866 } 1867 1868 int stmmac_selftest_get_count(struct stmmac_priv *priv) 1869 { 1870 return ARRAY_SIZE(stmmac_selftests); 1871 } 1872