1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates. 4 * stmmac Selftests Support 5 * 6 * Author: Jose Abreu <joabreu@synopsys.com> 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/ethtool.h> 11 #include <linux/ip.h> 12 #include <linux/phy.h> 13 #include <linux/udp.h> 14 #include <net/pkt_cls.h> 15 #include <net/tcp.h> 16 #include <net/udp.h> 17 #include <net/tc_act/tc_gact.h> 18 #include "stmmac.h" 19 20 struct stmmachdr { 21 __be32 version; 22 __be64 magic; 23 u8 id; 24 } __packed; 25 26 #define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ 27 sizeof(struct stmmachdr)) 28 #define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL 29 #define STMMAC_LB_TIMEOUT msecs_to_jiffies(200) 30 31 struct stmmac_packet_attrs { 32 int vlan; 33 int vlan_id_in; 34 int vlan_id_out; 35 unsigned char *src; 36 unsigned char *dst; 37 u32 ip_src; 38 u32 ip_dst; 39 int tcp; 40 int sport; 41 int dport; 42 u32 exp_hash; 43 int dont_wait; 44 int timeout; 45 int size; 46 int max_size; 47 int remove_sa; 48 u8 id; 49 int sarc; 50 u16 queue_mapping; 51 }; 52 53 static u8 stmmac_test_next_id; 54 55 static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, 56 struct stmmac_packet_attrs *attr) 57 { 58 struct sk_buff *skb = NULL; 59 struct udphdr *uhdr = NULL; 60 struct tcphdr *thdr = NULL; 61 struct stmmachdr *shdr; 62 struct ethhdr *ehdr; 63 struct iphdr *ihdr; 64 int iplen, size; 65 66 size = attr->size + STMMAC_TEST_PKT_SIZE; 67 if (attr->vlan) { 68 size += 4; 69 if (attr->vlan > 1) 70 size += 4; 71 } 72 73 if (attr->tcp) 74 size += sizeof(struct tcphdr); 75 else 76 size += sizeof(struct udphdr); 77 78 if (attr->max_size && (attr->max_size > size)) 79 size = attr->max_size; 80 81 skb = netdev_alloc_skb_ip_align(priv->dev, size); 82 if (!skb) 83 return NULL; 84 85 prefetchw(skb->data); 86 87 if (attr->vlan > 1) 88 ehdr = skb_push(skb, ETH_HLEN + 8); 89 else if (attr->vlan) 90 ehdr = skb_push(skb, ETH_HLEN + 4); 91 else if (attr->remove_sa) 92 ehdr = skb_push(skb, ETH_HLEN - 6); 93 else 94 ehdr = skb_push(skb, ETH_HLEN); 95 skb_reset_mac_header(skb); 96 97 skb_set_network_header(skb, skb->len); 98 ihdr = skb_put(skb, sizeof(*ihdr)); 99 100 skb_set_transport_header(skb, skb->len); 101 if (attr->tcp) 102 thdr = skb_put(skb, sizeof(*thdr)); 103 else 104 uhdr = skb_put(skb, sizeof(*uhdr)); 105 106 if (!attr->remove_sa) 107 eth_zero_addr(ehdr->h_source); 108 eth_zero_addr(ehdr->h_dest); 109 if (attr->src && !attr->remove_sa) 110 ether_addr_copy(ehdr->h_source, attr->src); 111 if (attr->dst) 112 ether_addr_copy(ehdr->h_dest, attr->dst); 113 114 if (!attr->remove_sa) { 115 ehdr->h_proto = htons(ETH_P_IP); 116 } else { 117 __be16 *ptr = (__be16 *)ehdr; 118 119 /* HACK */ 120 ptr[3] = htons(ETH_P_IP); 121 } 122 123 if (attr->vlan) { 124 __be16 *tag, *proto; 125 126 if (!attr->remove_sa) { 127 tag = (void *)ehdr + ETH_HLEN; 128 proto = (void *)ehdr + (2 * ETH_ALEN); 129 } else { 130 tag = (void *)ehdr + ETH_HLEN - 6; 131 proto = (void *)ehdr + ETH_ALEN; 132 } 133 134 proto[0] = htons(ETH_P_8021Q); 135 tag[0] = htons(attr->vlan_id_out); 136 tag[1] = htons(ETH_P_IP); 137 if (attr->vlan > 1) { 138 proto[0] = htons(ETH_P_8021AD); 139 tag[1] = htons(ETH_P_8021Q); 140 tag[2] = htons(attr->vlan_id_in); 141 tag[3] = htons(ETH_P_IP); 142 } 143 } 144 145 if (attr->tcp) { 146 thdr->source = htons(attr->sport); 147 thdr->dest = htons(attr->dport); 148 thdr->doff = sizeof(struct tcphdr) / 4; 149 thdr->check = 0; 150 } else { 151 uhdr->source = htons(attr->sport); 152 uhdr->dest = htons(attr->dport); 153 uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size); 154 if (attr->max_size) 155 uhdr->len = htons(attr->max_size - 156 (sizeof(*ihdr) + sizeof(*ehdr))); 157 uhdr->check = 0; 158 } 159 160 ihdr->ihl = 5; 161 ihdr->ttl = 32; 162 ihdr->version = 4; 163 if (attr->tcp) 164 ihdr->protocol = IPPROTO_TCP; 165 else 166 ihdr->protocol = IPPROTO_UDP; 167 iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size; 168 if (attr->tcp) 169 iplen += sizeof(*thdr); 170 else 171 iplen += sizeof(*uhdr); 172 173 if (attr->max_size) 174 iplen = attr->max_size - sizeof(*ehdr); 175 176 ihdr->tot_len = htons(iplen); 177 ihdr->frag_off = 0; 178 ihdr->saddr = htonl(attr->ip_src); 179 ihdr->daddr = htonl(attr->ip_dst); 180 ihdr->tos = 0; 181 ihdr->id = 0; 182 ip_send_check(ihdr); 183 184 shdr = skb_put(skb, sizeof(*shdr)); 185 shdr->version = 0; 186 shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC); 187 attr->id = stmmac_test_next_id; 188 shdr->id = stmmac_test_next_id++; 189 190 if (attr->size) 191 skb_put(skb, attr->size); 192 if (attr->max_size && (attr->max_size > skb->len)) 193 skb_put(skb, attr->max_size - skb->len); 194 195 skb->csum = 0; 196 skb->ip_summed = CHECKSUM_PARTIAL; 197 if (attr->tcp) { 198 thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0); 199 skb->csum_start = skb_transport_header(skb) - skb->head; 200 skb->csum_offset = offsetof(struct tcphdr, check); 201 } else { 202 udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr); 203 } 204 205 skb->protocol = htons(ETH_P_IP); 206 skb->pkt_type = PACKET_HOST; 207 skb->dev = priv->dev; 208 209 return skb; 210 } 211 212 static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv, 213 struct stmmac_packet_attrs *attr) 214 { 215 __be32 ip_src = htonl(attr->ip_src); 216 __be32 ip_dst = htonl(attr->ip_dst); 217 struct sk_buff *skb = NULL; 218 219 skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src, 220 NULL, attr->src, attr->dst); 221 if (!skb) 222 return NULL; 223 224 skb->pkt_type = PACKET_HOST; 225 skb->dev = priv->dev; 226 227 return skb; 228 } 229 230 struct stmmac_test_priv { 231 struct stmmac_packet_attrs *packet; 232 struct packet_type pt; 233 struct completion comp; 234 int double_vlan; 235 int vlan_id; 236 int ok; 237 }; 238 239 static int stmmac_test_loopback_validate(struct sk_buff *skb, 240 struct net_device *ndev, 241 struct packet_type *pt, 242 struct net_device *orig_ndev) 243 { 244 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 245 struct stmmachdr *shdr; 246 struct ethhdr *ehdr; 247 struct udphdr *uhdr; 248 struct tcphdr *thdr; 249 struct iphdr *ihdr; 250 251 skb = skb_unshare(skb, GFP_ATOMIC); 252 if (!skb) 253 goto out; 254 255 if (skb_linearize(skb)) 256 goto out; 257 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 258 goto out; 259 260 ehdr = (struct ethhdr *)skb_mac_header(skb); 261 if (tpriv->packet->dst) { 262 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 263 goto out; 264 } 265 if (tpriv->packet->sarc) { 266 if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) 267 goto out; 268 } else if (tpriv->packet->src) { 269 if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) 270 goto out; 271 } 272 273 ihdr = ip_hdr(skb); 274 if (tpriv->double_vlan) 275 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 276 277 if (tpriv->packet->tcp) { 278 if (ihdr->protocol != IPPROTO_TCP) 279 goto out; 280 281 thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 282 if (thdr->dest != htons(tpriv->packet->dport)) 283 goto out; 284 285 shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr)); 286 } else { 287 if (ihdr->protocol != IPPROTO_UDP) 288 goto out; 289 290 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 291 if (uhdr->dest != htons(tpriv->packet->dport)) 292 goto out; 293 294 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 295 } 296 297 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 298 goto out; 299 if (tpriv->packet->exp_hash && !skb->hash) 300 goto out; 301 if (tpriv->packet->id != shdr->id) 302 goto out; 303 304 tpriv->ok = true; 305 complete(&tpriv->comp); 306 out: 307 kfree_skb(skb); 308 return 0; 309 } 310 311 static int __stmmac_test_loopback(struct stmmac_priv *priv, 312 struct stmmac_packet_attrs *attr) 313 { 314 struct stmmac_test_priv *tpriv; 315 struct sk_buff *skb = NULL; 316 int ret = 0; 317 318 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 319 if (!tpriv) 320 return -ENOMEM; 321 322 tpriv->ok = false; 323 init_completion(&tpriv->comp); 324 325 tpriv->pt.type = htons(ETH_P_IP); 326 tpriv->pt.func = stmmac_test_loopback_validate; 327 tpriv->pt.dev = priv->dev; 328 tpriv->pt.af_packet_priv = tpriv; 329 tpriv->packet = attr; 330 331 if (!attr->dont_wait) 332 dev_add_pack(&tpriv->pt); 333 334 skb = stmmac_test_get_udp_skb(priv, attr); 335 if (!skb) { 336 ret = -ENOMEM; 337 goto cleanup; 338 } 339 340 skb_set_queue_mapping(skb, attr->queue_mapping); 341 ret = dev_queue_xmit(skb); 342 if (ret) 343 goto cleanup; 344 345 if (attr->dont_wait) 346 goto cleanup; 347 348 if (!attr->timeout) 349 attr->timeout = STMMAC_LB_TIMEOUT; 350 351 wait_for_completion_timeout(&tpriv->comp, attr->timeout); 352 ret = tpriv->ok ? 0 : -ETIMEDOUT; 353 354 cleanup: 355 if (!attr->dont_wait) 356 dev_remove_pack(&tpriv->pt); 357 kfree(tpriv); 358 return ret; 359 } 360 361 static int stmmac_test_mac_loopback(struct stmmac_priv *priv) 362 { 363 struct stmmac_packet_attrs attr = { }; 364 365 attr.dst = priv->dev->dev_addr; 366 return __stmmac_test_loopback(priv, &attr); 367 } 368 369 static int stmmac_test_phy_loopback(struct stmmac_priv *priv) 370 { 371 struct stmmac_packet_attrs attr = { }; 372 int ret; 373 374 if (!priv->dev->phydev) 375 return -EBUSY; 376 377 ret = phy_loopback(priv->dev->phydev, true); 378 if (ret) 379 return ret; 380 381 attr.dst = priv->dev->dev_addr; 382 ret = __stmmac_test_loopback(priv, &attr); 383 384 phy_loopback(priv->dev->phydev, false); 385 return ret; 386 } 387 388 static int stmmac_test_mmc(struct stmmac_priv *priv) 389 { 390 struct stmmac_counters initial, final; 391 int ret; 392 393 memset(&initial, 0, sizeof(initial)); 394 memset(&final, 0, sizeof(final)); 395 396 if (!priv->dma_cap.rmon) 397 return -EOPNOTSUPP; 398 399 /* Save previous results into internal struct */ 400 stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); 401 402 ret = stmmac_test_mac_loopback(priv); 403 if (ret) 404 return ret; 405 406 /* These will be loopback results so no need to save them */ 407 stmmac_mmc_read(priv, priv->mmcaddr, &final); 408 409 /* 410 * The number of MMC counters available depends on HW configuration 411 * so we just use this one to validate the feature. I hope there is 412 * not a version without this counter. 413 */ 414 if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g) 415 return -EINVAL; 416 417 return 0; 418 } 419 420 static int stmmac_test_eee(struct stmmac_priv *priv) 421 { 422 struct stmmac_extra_stats *initial, *final; 423 int retries = 10; 424 int ret; 425 426 if (!priv->dma_cap.eee || !priv->eee_active) 427 return -EOPNOTSUPP; 428 429 initial = kzalloc(sizeof(*initial), GFP_KERNEL); 430 if (!initial) 431 return -ENOMEM; 432 433 final = kzalloc(sizeof(*final), GFP_KERNEL); 434 if (!final) { 435 ret = -ENOMEM; 436 goto out_free_initial; 437 } 438 439 memcpy(initial, &priv->xstats, sizeof(*initial)); 440 441 ret = stmmac_test_mac_loopback(priv); 442 if (ret) 443 goto out_free_final; 444 445 /* We have no traffic in the line so, sooner or later it will go LPI */ 446 while (--retries) { 447 memcpy(final, &priv->xstats, sizeof(*final)); 448 449 if (final->irq_tx_path_in_lpi_mode_n > 450 initial->irq_tx_path_in_lpi_mode_n) 451 break; 452 msleep(100); 453 } 454 455 if (!retries) { 456 ret = -ETIMEDOUT; 457 goto out_free_final; 458 } 459 460 if (final->irq_tx_path_in_lpi_mode_n <= 461 initial->irq_tx_path_in_lpi_mode_n) { 462 ret = -EINVAL; 463 goto out_free_final; 464 } 465 466 if (final->irq_tx_path_exit_lpi_mode_n <= 467 initial->irq_tx_path_exit_lpi_mode_n) { 468 ret = -EINVAL; 469 goto out_free_final; 470 } 471 472 out_free_final: 473 kfree(final); 474 out_free_initial: 475 kfree(initial); 476 return ret; 477 } 478 479 static int stmmac_filter_check(struct stmmac_priv *priv) 480 { 481 if (!(priv->dev->flags & IFF_PROMISC)) 482 return 0; 483 484 netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n"); 485 return -EOPNOTSUPP; 486 } 487 488 static int stmmac_test_hfilt(struct stmmac_priv *priv) 489 { 490 unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; 491 unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05}; 492 struct stmmac_packet_attrs attr = { }; 493 int ret; 494 495 ret = stmmac_filter_check(priv); 496 if (ret) 497 return ret; 498 499 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 500 return -EOPNOTSUPP; 501 502 ret = dev_mc_add(priv->dev, gd_addr); 503 if (ret) 504 return ret; 505 506 attr.dst = gd_addr; 507 508 /* Shall receive packet */ 509 ret = __stmmac_test_loopback(priv, &attr); 510 if (ret) 511 goto cleanup; 512 513 attr.dst = bd_addr; 514 515 /* Shall NOT receive packet */ 516 ret = __stmmac_test_loopback(priv, &attr); 517 ret = ret ? 0 : -EINVAL; 518 519 cleanup: 520 dev_mc_del(priv->dev, gd_addr); 521 return ret; 522 } 523 524 static int stmmac_test_pfilt(struct stmmac_priv *priv) 525 { 526 unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 527 unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55}; 528 struct stmmac_packet_attrs attr = { }; 529 int ret; 530 531 if (stmmac_filter_check(priv)) 532 return -EOPNOTSUPP; 533 534 ret = dev_uc_add(priv->dev, gd_addr); 535 if (ret) 536 return ret; 537 538 attr.dst = gd_addr; 539 540 /* Shall receive packet */ 541 ret = __stmmac_test_loopback(priv, &attr); 542 if (ret) 543 goto cleanup; 544 545 attr.dst = bd_addr; 546 547 /* Shall NOT receive packet */ 548 ret = __stmmac_test_loopback(priv, &attr); 549 ret = ret ? 0 : -EINVAL; 550 551 cleanup: 552 dev_uc_del(priv->dev, gd_addr); 553 return ret; 554 } 555 556 static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr) 557 { 558 return 0; 559 } 560 561 static void stmmac_test_set_rx_mode(struct net_device *netdev) 562 { 563 /* As we are in test mode of ethtool we already own the rtnl lock 564 * so no address will change from user. We can just call the 565 * ndo_set_rx_mode() callback directly */ 566 if (netdev->netdev_ops->ndo_set_rx_mode) 567 netdev->netdev_ops->ndo_set_rx_mode(netdev); 568 } 569 570 static int stmmac_test_mcfilt(struct stmmac_priv *priv) 571 { 572 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 573 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 574 struct stmmac_packet_attrs attr = { }; 575 int ret; 576 577 if (stmmac_filter_check(priv)) 578 return -EOPNOTSUPP; 579 if (!priv->hw->multicast_filter_bins) 580 return -EOPNOTSUPP; 581 582 /* Remove all MC addresses */ 583 __dev_mc_unsync(priv->dev, NULL); 584 stmmac_test_set_rx_mode(priv->dev); 585 586 ret = dev_uc_add(priv->dev, uc_addr); 587 if (ret) 588 goto cleanup; 589 590 attr.dst = uc_addr; 591 592 /* Shall receive packet */ 593 ret = __stmmac_test_loopback(priv, &attr); 594 if (ret) 595 goto cleanup; 596 597 attr.dst = mc_addr; 598 599 /* Shall NOT receive packet */ 600 ret = __stmmac_test_loopback(priv, &attr); 601 ret = ret ? 0 : -EINVAL; 602 603 cleanup: 604 dev_uc_del(priv->dev, uc_addr); 605 __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL); 606 stmmac_test_set_rx_mode(priv->dev); 607 return ret; 608 } 609 610 static int stmmac_test_ucfilt(struct stmmac_priv *priv) 611 { 612 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 613 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 614 struct stmmac_packet_attrs attr = { }; 615 int ret; 616 617 if (stmmac_filter_check(priv)) 618 return -EOPNOTSUPP; 619 if (!priv->hw->multicast_filter_bins) 620 return -EOPNOTSUPP; 621 622 /* Remove all UC addresses */ 623 __dev_uc_unsync(priv->dev, NULL); 624 stmmac_test_set_rx_mode(priv->dev); 625 626 ret = dev_mc_add(priv->dev, mc_addr); 627 if (ret) 628 goto cleanup; 629 630 attr.dst = mc_addr; 631 632 /* Shall receive packet */ 633 ret = __stmmac_test_loopback(priv, &attr); 634 if (ret) 635 goto cleanup; 636 637 attr.dst = uc_addr; 638 639 /* Shall NOT receive packet */ 640 ret = __stmmac_test_loopback(priv, &attr); 641 ret = ret ? 0 : -EINVAL; 642 643 cleanup: 644 dev_mc_del(priv->dev, mc_addr); 645 __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL); 646 stmmac_test_set_rx_mode(priv->dev); 647 return ret; 648 } 649 650 static int stmmac_test_flowctrl_validate(struct sk_buff *skb, 651 struct net_device *ndev, 652 struct packet_type *pt, 653 struct net_device *orig_ndev) 654 { 655 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 656 struct ethhdr *ehdr; 657 658 ehdr = (struct ethhdr *)skb_mac_header(skb); 659 if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) 660 goto out; 661 if (ehdr->h_proto != htons(ETH_P_PAUSE)) 662 goto out; 663 664 tpriv->ok = true; 665 complete(&tpriv->comp); 666 out: 667 kfree_skb(skb); 668 return 0; 669 } 670 671 static int stmmac_test_flowctrl(struct stmmac_priv *priv) 672 { 673 unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01}; 674 struct phy_device *phydev = priv->dev->phydev; 675 u32 rx_cnt = priv->plat->rx_queues_to_use; 676 struct stmmac_test_priv *tpriv; 677 unsigned int pkt_count; 678 int i, ret = 0; 679 680 if (!phydev || (!phydev->pause && !phydev->asym_pause)) 681 return -EOPNOTSUPP; 682 683 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 684 if (!tpriv) 685 return -ENOMEM; 686 687 tpriv->ok = false; 688 init_completion(&tpriv->comp); 689 tpriv->pt.type = htons(ETH_P_PAUSE); 690 tpriv->pt.func = stmmac_test_flowctrl_validate; 691 tpriv->pt.dev = priv->dev; 692 tpriv->pt.af_packet_priv = tpriv; 693 dev_add_pack(&tpriv->pt); 694 695 /* Compute minimum number of packets to make FIFO full */ 696 pkt_count = priv->plat->rx_fifo_size; 697 if (!pkt_count) 698 pkt_count = priv->dma_cap.rx_fifo_size; 699 pkt_count /= 1400; 700 pkt_count *= 2; 701 702 for (i = 0; i < rx_cnt; i++) 703 stmmac_stop_rx(priv, priv->ioaddr, i); 704 705 ret = dev_set_promiscuity(priv->dev, 1); 706 if (ret) 707 goto cleanup; 708 709 ret = dev_mc_add(priv->dev, paddr); 710 if (ret) 711 goto cleanup; 712 713 for (i = 0; i < pkt_count; i++) { 714 struct stmmac_packet_attrs attr = { }; 715 716 attr.dst = priv->dev->dev_addr; 717 attr.dont_wait = true; 718 attr.size = 1400; 719 720 ret = __stmmac_test_loopback(priv, &attr); 721 if (ret) 722 goto cleanup; 723 if (tpriv->ok) 724 break; 725 } 726 727 /* Wait for some time in case RX Watchdog is enabled */ 728 msleep(200); 729 730 for (i = 0; i < rx_cnt; i++) { 731 struct stmmac_channel *ch = &priv->channel[i]; 732 u32 tail; 733 734 tail = priv->rx_queue[i].dma_rx_phy + 735 (DMA_RX_SIZE * sizeof(struct dma_desc)); 736 737 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); 738 stmmac_start_rx(priv, priv->ioaddr, i); 739 740 local_bh_disable(); 741 napi_reschedule(&ch->rx_napi); 742 local_bh_enable(); 743 } 744 745 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 746 ret = tpriv->ok ? 0 : -ETIMEDOUT; 747 748 cleanup: 749 dev_mc_del(priv->dev, paddr); 750 dev_set_promiscuity(priv->dev, -1); 751 dev_remove_pack(&tpriv->pt); 752 kfree(tpriv); 753 return ret; 754 } 755 756 static int stmmac_test_rss(struct stmmac_priv *priv) 757 { 758 struct stmmac_packet_attrs attr = { }; 759 760 if (!priv->dma_cap.rssen || !priv->rss.enable) 761 return -EOPNOTSUPP; 762 763 attr.dst = priv->dev->dev_addr; 764 attr.exp_hash = true; 765 attr.sport = 0x321; 766 attr.dport = 0x123; 767 768 return __stmmac_test_loopback(priv, &attr); 769 } 770 771 static int stmmac_test_vlan_validate(struct sk_buff *skb, 772 struct net_device *ndev, 773 struct packet_type *pt, 774 struct net_device *orig_ndev) 775 { 776 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 777 struct stmmachdr *shdr; 778 struct ethhdr *ehdr; 779 struct udphdr *uhdr; 780 struct iphdr *ihdr; 781 u16 proto; 782 783 proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q; 784 785 skb = skb_unshare(skb, GFP_ATOMIC); 786 if (!skb) 787 goto out; 788 789 if (skb_linearize(skb)) 790 goto out; 791 if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN)) 792 goto out; 793 if (tpriv->vlan_id) { 794 if (skb->vlan_proto != htons(proto)) 795 goto out; 796 if (skb->vlan_tci != tpriv->vlan_id) 797 goto out; 798 } 799 800 ehdr = (struct ethhdr *)skb_mac_header(skb); 801 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) 802 goto out; 803 804 ihdr = ip_hdr(skb); 805 if (tpriv->double_vlan) 806 ihdr = (struct iphdr *)(skb_network_header(skb) + 4); 807 if (ihdr->protocol != IPPROTO_UDP) 808 goto out; 809 810 uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl); 811 if (uhdr->dest != htons(tpriv->packet->dport)) 812 goto out; 813 814 shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr)); 815 if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC)) 816 goto out; 817 818 tpriv->ok = true; 819 complete(&tpriv->comp); 820 821 out: 822 kfree_skb(skb); 823 return 0; 824 } 825 826 static int stmmac_test_vlanfilt(struct stmmac_priv *priv) 827 { 828 struct stmmac_packet_attrs attr = { }; 829 struct stmmac_test_priv *tpriv; 830 struct sk_buff *skb = NULL; 831 int ret = 0, i; 832 833 if (!priv->dma_cap.vlhash) 834 return -EOPNOTSUPP; 835 836 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 837 if (!tpriv) 838 return -ENOMEM; 839 840 tpriv->ok = false; 841 init_completion(&tpriv->comp); 842 843 tpriv->pt.type = htons(ETH_P_IP); 844 tpriv->pt.func = stmmac_test_vlan_validate; 845 tpriv->pt.dev = priv->dev; 846 tpriv->pt.af_packet_priv = tpriv; 847 tpriv->packet = &attr; 848 849 /* 850 * As we use HASH filtering, false positives may appear. This is a 851 * specially chosen ID so that adjacent IDs (+4) have different 852 * HASH values. 853 */ 854 tpriv->vlan_id = 0x123; 855 dev_add_pack(&tpriv->pt); 856 857 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 858 if (ret) 859 goto cleanup; 860 861 for (i = 0; i < 4; i++) { 862 attr.vlan = 1; 863 attr.vlan_id_out = tpriv->vlan_id + i; 864 attr.dst = priv->dev->dev_addr; 865 attr.sport = 9; 866 attr.dport = 9; 867 868 skb = stmmac_test_get_udp_skb(priv, &attr); 869 if (!skb) { 870 ret = -ENOMEM; 871 goto vlan_del; 872 } 873 874 skb_set_queue_mapping(skb, 0); 875 ret = dev_queue_xmit(skb); 876 if (ret) 877 goto vlan_del; 878 879 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 880 ret = tpriv->ok ? 0 : -ETIMEDOUT; 881 if (ret && !i) { 882 goto vlan_del; 883 } else if (!ret && i) { 884 ret = -EINVAL; 885 goto vlan_del; 886 } else { 887 ret = 0; 888 } 889 890 tpriv->ok = false; 891 } 892 893 vlan_del: 894 vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id); 895 cleanup: 896 dev_remove_pack(&tpriv->pt); 897 kfree(tpriv); 898 return ret; 899 } 900 901 static int stmmac_test_dvlanfilt(struct stmmac_priv *priv) 902 { 903 struct stmmac_packet_attrs attr = { }; 904 struct stmmac_test_priv *tpriv; 905 struct sk_buff *skb = NULL; 906 int ret = 0, i; 907 908 if (!priv->dma_cap.vlhash) 909 return -EOPNOTSUPP; 910 911 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 912 if (!tpriv) 913 return -ENOMEM; 914 915 tpriv->ok = false; 916 tpriv->double_vlan = true; 917 init_completion(&tpriv->comp); 918 919 tpriv->pt.type = htons(ETH_P_8021Q); 920 tpriv->pt.func = stmmac_test_vlan_validate; 921 tpriv->pt.dev = priv->dev; 922 tpriv->pt.af_packet_priv = tpriv; 923 tpriv->packet = &attr; 924 925 /* 926 * As we use HASH filtering, false positives may appear. This is a 927 * specially chosen ID so that adjacent IDs (+4) have different 928 * HASH values. 929 */ 930 tpriv->vlan_id = 0x123; 931 dev_add_pack(&tpriv->pt); 932 933 ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 934 if (ret) 935 goto cleanup; 936 937 for (i = 0; i < 4; i++) { 938 attr.vlan = 2; 939 attr.vlan_id_out = tpriv->vlan_id + i; 940 attr.dst = priv->dev->dev_addr; 941 attr.sport = 9; 942 attr.dport = 9; 943 944 skb = stmmac_test_get_udp_skb(priv, &attr); 945 if (!skb) { 946 ret = -ENOMEM; 947 goto vlan_del; 948 } 949 950 skb_set_queue_mapping(skb, 0); 951 ret = dev_queue_xmit(skb); 952 if (ret) 953 goto vlan_del; 954 955 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 956 ret = tpriv->ok ? 0 : -ETIMEDOUT; 957 if (ret && !i) { 958 goto vlan_del; 959 } else if (!ret && i) { 960 ret = -EINVAL; 961 goto vlan_del; 962 } else { 963 ret = 0; 964 } 965 966 tpriv->ok = false; 967 } 968 969 vlan_del: 970 vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id); 971 cleanup: 972 dev_remove_pack(&tpriv->pt); 973 kfree(tpriv); 974 return ret; 975 } 976 977 #ifdef CONFIG_NET_CLS_ACT 978 static int stmmac_test_rxp(struct stmmac_priv *priv) 979 { 980 unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; 981 struct tc_cls_u32_offload cls_u32 = { }; 982 struct stmmac_packet_attrs attr = { }; 983 struct tc_action **actions, *act; 984 struct tc_u32_sel *sel; 985 struct tcf_exts *exts; 986 int ret, i, nk = 1; 987 988 if (!tc_can_offload(priv->dev)) 989 return -EOPNOTSUPP; 990 if (!priv->dma_cap.frpsel) 991 return -EOPNOTSUPP; 992 993 sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); 994 if (!sel) 995 return -ENOMEM; 996 997 exts = kzalloc(sizeof(*exts), GFP_KERNEL); 998 if (!exts) { 999 ret = -ENOMEM; 1000 goto cleanup_sel; 1001 } 1002 1003 actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); 1004 if (!actions) { 1005 ret = -ENOMEM; 1006 goto cleanup_exts; 1007 } 1008 1009 act = kzalloc(nk * sizeof(*act), GFP_KERNEL); 1010 if (!act) { 1011 ret = -ENOMEM; 1012 goto cleanup_actions; 1013 } 1014 1015 cls_u32.command = TC_CLSU32_NEW_KNODE; 1016 cls_u32.common.chain_index = 0; 1017 cls_u32.common.protocol = htons(ETH_P_ALL); 1018 cls_u32.knode.exts = exts; 1019 cls_u32.knode.sel = sel; 1020 cls_u32.knode.handle = 0x123; 1021 1022 exts->nr_actions = nk; 1023 exts->actions = actions; 1024 for (i = 0; i < nk; i++) { 1025 struct tcf_gact *gact = to_gact(&act[i]); 1026 1027 actions[i] = &act[i]; 1028 gact->tcf_action = TC_ACT_SHOT; 1029 } 1030 1031 sel->nkeys = nk; 1032 sel->offshift = 0; 1033 sel->keys[0].off = 6; 1034 sel->keys[0].val = htonl(0xdeadbeef); 1035 sel->keys[0].mask = ~0x0; 1036 1037 ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1038 if (ret) 1039 goto cleanup_act; 1040 1041 attr.dst = priv->dev->dev_addr; 1042 attr.src = addr; 1043 1044 ret = __stmmac_test_loopback(priv, &attr); 1045 ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */ 1046 1047 cls_u32.command = TC_CLSU32_DELETE_KNODE; 1048 stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); 1049 1050 cleanup_act: 1051 kfree(act); 1052 cleanup_actions: 1053 kfree(actions); 1054 cleanup_exts: 1055 kfree(exts); 1056 cleanup_sel: 1057 kfree(sel); 1058 return ret; 1059 } 1060 #else 1061 static int stmmac_test_rxp(struct stmmac_priv *priv) 1062 { 1063 return -EOPNOTSUPP; 1064 } 1065 #endif 1066 1067 static int stmmac_test_desc_sai(struct stmmac_priv *priv) 1068 { 1069 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1070 struct stmmac_packet_attrs attr = { }; 1071 int ret; 1072 1073 if (!priv->dma_cap.vlins) 1074 return -EOPNOTSUPP; 1075 1076 attr.remove_sa = true; 1077 attr.sarc = true; 1078 attr.src = src; 1079 attr.dst = priv->dev->dev_addr; 1080 1081 priv->sarc_type = 0x1; 1082 1083 ret = __stmmac_test_loopback(priv, &attr); 1084 1085 priv->sarc_type = 0x0; 1086 return ret; 1087 } 1088 1089 static int stmmac_test_desc_sar(struct stmmac_priv *priv) 1090 { 1091 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1092 struct stmmac_packet_attrs attr = { }; 1093 int ret; 1094 1095 if (!priv->dma_cap.vlins) 1096 return -EOPNOTSUPP; 1097 1098 attr.sarc = true; 1099 attr.src = src; 1100 attr.dst = priv->dev->dev_addr; 1101 1102 priv->sarc_type = 0x2; 1103 1104 ret = __stmmac_test_loopback(priv, &attr); 1105 1106 priv->sarc_type = 0x0; 1107 return ret; 1108 } 1109 1110 static int stmmac_test_reg_sai(struct stmmac_priv *priv) 1111 { 1112 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1113 struct stmmac_packet_attrs attr = { }; 1114 int ret; 1115 1116 if (!priv->dma_cap.vlins) 1117 return -EOPNOTSUPP; 1118 1119 attr.remove_sa = true; 1120 attr.sarc = true; 1121 attr.src = src; 1122 attr.dst = priv->dev->dev_addr; 1123 1124 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2)) 1125 return -EOPNOTSUPP; 1126 1127 ret = __stmmac_test_loopback(priv, &attr); 1128 1129 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1130 return ret; 1131 } 1132 1133 static int stmmac_test_reg_sar(struct stmmac_priv *priv) 1134 { 1135 unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 1136 struct stmmac_packet_attrs attr = { }; 1137 int ret; 1138 1139 if (!priv->dma_cap.vlins) 1140 return -EOPNOTSUPP; 1141 1142 attr.sarc = true; 1143 attr.src = src; 1144 attr.dst = priv->dev->dev_addr; 1145 1146 if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3)) 1147 return -EOPNOTSUPP; 1148 1149 ret = __stmmac_test_loopback(priv, &attr); 1150 1151 stmmac_sarc_configure(priv, priv->ioaddr, 0x0); 1152 return ret; 1153 } 1154 1155 static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan) 1156 { 1157 struct stmmac_packet_attrs attr = { }; 1158 struct stmmac_test_priv *tpriv; 1159 struct sk_buff *skb = NULL; 1160 int ret = 0; 1161 u16 proto; 1162 1163 if (!priv->dma_cap.vlins) 1164 return -EOPNOTSUPP; 1165 1166 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1167 if (!tpriv) 1168 return -ENOMEM; 1169 1170 proto = svlan ? ETH_P_8021AD : ETH_P_8021Q; 1171 1172 tpriv->ok = false; 1173 tpriv->double_vlan = svlan; 1174 init_completion(&tpriv->comp); 1175 1176 tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP); 1177 tpriv->pt.func = stmmac_test_vlan_validate; 1178 tpriv->pt.dev = priv->dev; 1179 tpriv->pt.af_packet_priv = tpriv; 1180 tpriv->packet = &attr; 1181 tpriv->vlan_id = 0x123; 1182 dev_add_pack(&tpriv->pt); 1183 1184 ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id); 1185 if (ret) 1186 goto cleanup; 1187 1188 attr.dst = priv->dev->dev_addr; 1189 1190 skb = stmmac_test_get_udp_skb(priv, &attr); 1191 if (!skb) { 1192 ret = -ENOMEM; 1193 goto vlan_del; 1194 } 1195 1196 __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id); 1197 skb->protocol = htons(proto); 1198 1199 skb_set_queue_mapping(skb, 0); 1200 ret = dev_queue_xmit(skb); 1201 if (ret) 1202 goto vlan_del; 1203 1204 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1205 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1206 1207 vlan_del: 1208 vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id); 1209 cleanup: 1210 dev_remove_pack(&tpriv->pt); 1211 kfree(tpriv); 1212 return ret; 1213 } 1214 1215 static int stmmac_test_vlanoff(struct stmmac_priv *priv) 1216 { 1217 return stmmac_test_vlanoff_common(priv, false); 1218 } 1219 1220 static int stmmac_test_svlanoff(struct stmmac_priv *priv) 1221 { 1222 if (!priv->dma_cap.dvlan) 1223 return -EOPNOTSUPP; 1224 return stmmac_test_vlanoff_common(priv, true); 1225 } 1226 1227 #ifdef CONFIG_NET_CLS_ACT 1228 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1229 u32 dst_mask, u32 src_mask) 1230 { 1231 struct flow_dissector_key_ipv4_addrs key, mask; 1232 unsigned long dummy_cookie = 0xdeadbeef; 1233 struct stmmac_packet_attrs attr = { }; 1234 struct flow_dissector *dissector; 1235 struct flow_cls_offload *cls; 1236 struct flow_rule *rule; 1237 int ret; 1238 1239 if (!tc_can_offload(priv->dev)) 1240 return -EOPNOTSUPP; 1241 if (!priv->dma_cap.l3l4fnum) 1242 return -EOPNOTSUPP; 1243 if (priv->rss.enable) 1244 stmmac_rss_configure(priv, priv->hw, NULL, 1245 priv->plat->rx_queues_to_use); 1246 1247 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1248 if (!dissector) { 1249 ret = -ENOMEM; 1250 goto cleanup_rss; 1251 } 1252 1253 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); 1254 dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; 1255 1256 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1257 if (!cls) { 1258 ret = -ENOMEM; 1259 goto cleanup_dissector; 1260 } 1261 1262 cls->common.chain_index = 0; 1263 cls->command = FLOW_CLS_REPLACE; 1264 cls->cookie = dummy_cookie; 1265 1266 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1267 if (!rule) { 1268 ret = -ENOMEM; 1269 goto cleanup_cls; 1270 } 1271 1272 rule->match.dissector = dissector; 1273 rule->match.key = (void *)&key; 1274 rule->match.mask = (void *)&mask; 1275 1276 key.src = htonl(src); 1277 key.dst = htonl(dst); 1278 mask.src = src_mask; 1279 mask.dst = dst_mask; 1280 1281 cls->rule = rule; 1282 1283 rule->action.entries[0].id = FLOW_ACTION_DROP; 1284 rule->action.num_entries = 1; 1285 1286 attr.dst = priv->dev->dev_addr; 1287 attr.ip_dst = dst; 1288 attr.ip_src = src; 1289 1290 /* Shall receive packet */ 1291 ret = __stmmac_test_loopback(priv, &attr); 1292 if (ret) 1293 goto cleanup_rule; 1294 1295 ret = stmmac_tc_setup_cls(priv, priv, cls); 1296 if (ret) 1297 goto cleanup_rule; 1298 1299 /* Shall NOT receive packet */ 1300 ret = __stmmac_test_loopback(priv, &attr); 1301 ret = ret ? 0 : -EINVAL; 1302 1303 cls->command = FLOW_CLS_DESTROY; 1304 stmmac_tc_setup_cls(priv, priv, cls); 1305 cleanup_rule: 1306 kfree(rule); 1307 cleanup_cls: 1308 kfree(cls); 1309 cleanup_dissector: 1310 kfree(dissector); 1311 cleanup_rss: 1312 if (priv->rss.enable) { 1313 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1314 priv->plat->rx_queues_to_use); 1315 } 1316 1317 return ret; 1318 } 1319 #else 1320 static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, 1321 u32 dst_mask, u32 src_mask) 1322 { 1323 return -EOPNOTSUPP; 1324 } 1325 #endif 1326 1327 static int stmmac_test_l3filt_da(struct stmmac_priv *priv) 1328 { 1329 u32 addr = 0x10203040; 1330 1331 return __stmmac_test_l3filt(priv, addr, 0, ~0, 0); 1332 } 1333 1334 static int stmmac_test_l3filt_sa(struct stmmac_priv *priv) 1335 { 1336 u32 addr = 0x10203040; 1337 1338 return __stmmac_test_l3filt(priv, 0, addr, 0, ~0); 1339 } 1340 1341 #ifdef CONFIG_NET_CLS_ACT 1342 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1343 u32 dst_mask, u32 src_mask, bool udp) 1344 { 1345 struct { 1346 struct flow_dissector_key_basic bkey; 1347 struct flow_dissector_key_ports key; 1348 } __aligned(BITS_PER_LONG / 8) keys; 1349 struct { 1350 struct flow_dissector_key_basic bmask; 1351 struct flow_dissector_key_ports mask; 1352 } __aligned(BITS_PER_LONG / 8) masks; 1353 unsigned long dummy_cookie = 0xdeadbeef; 1354 struct stmmac_packet_attrs attr = { }; 1355 struct flow_dissector *dissector; 1356 struct flow_cls_offload *cls; 1357 struct flow_rule *rule; 1358 int ret; 1359 1360 if (!tc_can_offload(priv->dev)) 1361 return -EOPNOTSUPP; 1362 if (!priv->dma_cap.l3l4fnum) 1363 return -EOPNOTSUPP; 1364 if (priv->rss.enable) 1365 stmmac_rss_configure(priv, priv->hw, NULL, 1366 priv->plat->rx_queues_to_use); 1367 1368 dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); 1369 if (!dissector) { 1370 ret = -ENOMEM; 1371 goto cleanup_rss; 1372 } 1373 1374 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); 1375 dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); 1376 dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; 1377 dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); 1378 1379 cls = kzalloc(sizeof(*cls), GFP_KERNEL); 1380 if (!cls) { 1381 ret = -ENOMEM; 1382 goto cleanup_dissector; 1383 } 1384 1385 cls->common.chain_index = 0; 1386 cls->command = FLOW_CLS_REPLACE; 1387 cls->cookie = dummy_cookie; 1388 1389 rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); 1390 if (!rule) { 1391 ret = -ENOMEM; 1392 goto cleanup_cls; 1393 } 1394 1395 rule->match.dissector = dissector; 1396 rule->match.key = (void *)&keys; 1397 rule->match.mask = (void *)&masks; 1398 1399 keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP; 1400 keys.key.src = htons(src); 1401 keys.key.dst = htons(dst); 1402 masks.mask.src = src_mask; 1403 masks.mask.dst = dst_mask; 1404 1405 cls->rule = rule; 1406 1407 rule->action.entries[0].id = FLOW_ACTION_DROP; 1408 rule->action.num_entries = 1; 1409 1410 attr.dst = priv->dev->dev_addr; 1411 attr.tcp = !udp; 1412 attr.sport = src; 1413 attr.dport = dst; 1414 attr.ip_dst = 0; 1415 1416 /* Shall receive packet */ 1417 ret = __stmmac_test_loopback(priv, &attr); 1418 if (ret) 1419 goto cleanup_rule; 1420 1421 ret = stmmac_tc_setup_cls(priv, priv, cls); 1422 if (ret) 1423 goto cleanup_rule; 1424 1425 /* Shall NOT receive packet */ 1426 ret = __stmmac_test_loopback(priv, &attr); 1427 ret = ret ? 0 : -EINVAL; 1428 1429 cls->command = FLOW_CLS_DESTROY; 1430 stmmac_tc_setup_cls(priv, priv, cls); 1431 cleanup_rule: 1432 kfree(rule); 1433 cleanup_cls: 1434 kfree(cls); 1435 cleanup_dissector: 1436 kfree(dissector); 1437 cleanup_rss: 1438 if (priv->rss.enable) { 1439 stmmac_rss_configure(priv, priv->hw, &priv->rss, 1440 priv->plat->rx_queues_to_use); 1441 } 1442 1443 return ret; 1444 } 1445 #else 1446 static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, 1447 u32 dst_mask, u32 src_mask, bool udp) 1448 { 1449 return -EOPNOTSUPP; 1450 } 1451 #endif 1452 1453 static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv) 1454 { 1455 u16 dummy_port = 0x123; 1456 1457 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false); 1458 } 1459 1460 static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv) 1461 { 1462 u16 dummy_port = 0x123; 1463 1464 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false); 1465 } 1466 1467 static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv) 1468 { 1469 u16 dummy_port = 0x123; 1470 1471 return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true); 1472 } 1473 1474 static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv) 1475 { 1476 u16 dummy_port = 0x123; 1477 1478 return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true); 1479 } 1480 1481 static int stmmac_test_arp_validate(struct sk_buff *skb, 1482 struct net_device *ndev, 1483 struct packet_type *pt, 1484 struct net_device *orig_ndev) 1485 { 1486 struct stmmac_test_priv *tpriv = pt->af_packet_priv; 1487 struct ethhdr *ehdr; 1488 struct arphdr *ahdr; 1489 1490 ehdr = (struct ethhdr *)skb_mac_header(skb); 1491 if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) 1492 goto out; 1493 1494 ahdr = arp_hdr(skb); 1495 if (ahdr->ar_op != htons(ARPOP_REPLY)) 1496 goto out; 1497 1498 tpriv->ok = true; 1499 complete(&tpriv->comp); 1500 out: 1501 kfree_skb(skb); 1502 return 0; 1503 } 1504 1505 static int stmmac_test_arpoffload(struct stmmac_priv *priv) 1506 { 1507 unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06}; 1508 unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1509 struct stmmac_packet_attrs attr = { }; 1510 struct stmmac_test_priv *tpriv; 1511 struct sk_buff *skb = NULL; 1512 u32 ip_addr = 0xdeadcafe; 1513 u32 ip_src = 0xdeadbeef; 1514 int ret; 1515 1516 if (!priv->dma_cap.arpoffsel) 1517 return -EOPNOTSUPP; 1518 1519 tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL); 1520 if (!tpriv) 1521 return -ENOMEM; 1522 1523 tpriv->ok = false; 1524 init_completion(&tpriv->comp); 1525 1526 tpriv->pt.type = htons(ETH_P_ARP); 1527 tpriv->pt.func = stmmac_test_arp_validate; 1528 tpriv->pt.dev = priv->dev; 1529 tpriv->pt.af_packet_priv = tpriv; 1530 tpriv->packet = &attr; 1531 dev_add_pack(&tpriv->pt); 1532 1533 attr.src = src; 1534 attr.ip_src = ip_src; 1535 attr.dst = dst; 1536 attr.ip_dst = ip_addr; 1537 1538 skb = stmmac_test_get_arp_skb(priv, &attr); 1539 if (!skb) { 1540 ret = -ENOMEM; 1541 goto cleanup; 1542 } 1543 1544 ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr); 1545 if (ret) 1546 goto cleanup; 1547 1548 ret = dev_set_promiscuity(priv->dev, 1); 1549 if (ret) 1550 goto cleanup; 1551 1552 skb_set_queue_mapping(skb, 0); 1553 ret = dev_queue_xmit(skb); 1554 if (ret) 1555 goto cleanup_promisc; 1556 1557 wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT); 1558 ret = tpriv->ok ? 0 : -ETIMEDOUT; 1559 1560 cleanup_promisc: 1561 dev_set_promiscuity(priv->dev, -1); 1562 cleanup: 1563 stmmac_set_arp_offload(priv, priv->hw, false, 0x0); 1564 dev_remove_pack(&tpriv->pt); 1565 kfree(tpriv); 1566 return ret; 1567 } 1568 1569 static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) 1570 { 1571 struct stmmac_packet_attrs attr = { }; 1572 int size = priv->dma_buf_sz; 1573 1574 attr.dst = priv->dev->dev_addr; 1575 attr.max_size = size - ETH_FCS_LEN; 1576 attr.queue_mapping = queue; 1577 1578 return __stmmac_test_loopback(priv, &attr); 1579 } 1580 1581 static int stmmac_test_jumbo(struct stmmac_priv *priv) 1582 { 1583 return __stmmac_test_jumbo(priv, 0); 1584 } 1585 1586 static int stmmac_test_mjumbo(struct stmmac_priv *priv) 1587 { 1588 u32 chan, tx_cnt = priv->plat->tx_queues_to_use; 1589 int ret; 1590 1591 if (tx_cnt <= 1) 1592 return -EOPNOTSUPP; 1593 1594 for (chan = 0; chan < tx_cnt; chan++) { 1595 ret = __stmmac_test_jumbo(priv, chan); 1596 if (ret) 1597 return ret; 1598 } 1599 1600 return 0; 1601 } 1602 1603 static int stmmac_test_sph(struct stmmac_priv *priv) 1604 { 1605 unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n; 1606 struct stmmac_packet_attrs attr = { }; 1607 int ret; 1608 1609 if (!priv->sph) 1610 return -EOPNOTSUPP; 1611 1612 /* Check for UDP first */ 1613 attr.dst = priv->dev->dev_addr; 1614 attr.tcp = false; 1615 1616 ret = __stmmac_test_loopback(priv, &attr); 1617 if (ret) 1618 return ret; 1619 1620 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1621 if (cnt_end <= cnt_start) 1622 return -EINVAL; 1623 1624 /* Check for TCP now */ 1625 cnt_start = cnt_end; 1626 1627 attr.dst = priv->dev->dev_addr; 1628 attr.tcp = true; 1629 1630 ret = __stmmac_test_loopback(priv, &attr); 1631 if (ret) 1632 return ret; 1633 1634 cnt_end = priv->xstats.rx_split_hdr_pkt_n; 1635 if (cnt_end <= cnt_start) 1636 return -EINVAL; 1637 1638 return 0; 1639 } 1640 1641 #define STMMAC_LOOPBACK_NONE 0 1642 #define STMMAC_LOOPBACK_MAC 1 1643 #define STMMAC_LOOPBACK_PHY 2 1644 1645 static const struct stmmac_test { 1646 char name[ETH_GSTRING_LEN]; 1647 int lb; 1648 int (*fn)(struct stmmac_priv *priv); 1649 } stmmac_selftests[] = { 1650 { 1651 .name = "MAC Loopback ", 1652 .lb = STMMAC_LOOPBACK_MAC, 1653 .fn = stmmac_test_mac_loopback, 1654 }, { 1655 .name = "PHY Loopback ", 1656 .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */ 1657 .fn = stmmac_test_phy_loopback, 1658 }, { 1659 .name = "MMC Counters ", 1660 .lb = STMMAC_LOOPBACK_PHY, 1661 .fn = stmmac_test_mmc, 1662 }, { 1663 .name = "EEE ", 1664 .lb = STMMAC_LOOPBACK_PHY, 1665 .fn = stmmac_test_eee, 1666 }, { 1667 .name = "Hash Filter MC ", 1668 .lb = STMMAC_LOOPBACK_PHY, 1669 .fn = stmmac_test_hfilt, 1670 }, { 1671 .name = "Perfect Filter UC ", 1672 .lb = STMMAC_LOOPBACK_PHY, 1673 .fn = stmmac_test_pfilt, 1674 }, { 1675 .name = "MC Filter ", 1676 .lb = STMMAC_LOOPBACK_PHY, 1677 .fn = stmmac_test_mcfilt, 1678 }, { 1679 .name = "UC Filter ", 1680 .lb = STMMAC_LOOPBACK_PHY, 1681 .fn = stmmac_test_ucfilt, 1682 }, { 1683 .name = "Flow Control ", 1684 .lb = STMMAC_LOOPBACK_PHY, 1685 .fn = stmmac_test_flowctrl, 1686 }, { 1687 .name = "RSS ", 1688 .lb = STMMAC_LOOPBACK_PHY, 1689 .fn = stmmac_test_rss, 1690 }, { 1691 .name = "VLAN Filtering ", 1692 .lb = STMMAC_LOOPBACK_PHY, 1693 .fn = stmmac_test_vlanfilt, 1694 }, { 1695 .name = "Double VLAN Filtering", 1696 .lb = STMMAC_LOOPBACK_PHY, 1697 .fn = stmmac_test_dvlanfilt, 1698 }, { 1699 .name = "Flexible RX Parser ", 1700 .lb = STMMAC_LOOPBACK_PHY, 1701 .fn = stmmac_test_rxp, 1702 }, { 1703 .name = "SA Insertion (desc) ", 1704 .lb = STMMAC_LOOPBACK_PHY, 1705 .fn = stmmac_test_desc_sai, 1706 }, { 1707 .name = "SA Replacement (desc)", 1708 .lb = STMMAC_LOOPBACK_PHY, 1709 .fn = stmmac_test_desc_sar, 1710 }, { 1711 .name = "SA Insertion (reg) ", 1712 .lb = STMMAC_LOOPBACK_PHY, 1713 .fn = stmmac_test_reg_sai, 1714 }, { 1715 .name = "SA Replacement (reg)", 1716 .lb = STMMAC_LOOPBACK_PHY, 1717 .fn = stmmac_test_reg_sar, 1718 }, { 1719 .name = "VLAN TX Insertion ", 1720 .lb = STMMAC_LOOPBACK_PHY, 1721 .fn = stmmac_test_vlanoff, 1722 }, { 1723 .name = "SVLAN TX Insertion ", 1724 .lb = STMMAC_LOOPBACK_PHY, 1725 .fn = stmmac_test_svlanoff, 1726 }, { 1727 .name = "L3 DA Filtering ", 1728 .lb = STMMAC_LOOPBACK_PHY, 1729 .fn = stmmac_test_l3filt_da, 1730 }, { 1731 .name = "L3 SA Filtering ", 1732 .lb = STMMAC_LOOPBACK_PHY, 1733 .fn = stmmac_test_l3filt_sa, 1734 }, { 1735 .name = "L4 DA TCP Filtering ", 1736 .lb = STMMAC_LOOPBACK_PHY, 1737 .fn = stmmac_test_l4filt_da_tcp, 1738 }, { 1739 .name = "L4 SA TCP Filtering ", 1740 .lb = STMMAC_LOOPBACK_PHY, 1741 .fn = stmmac_test_l4filt_sa_tcp, 1742 }, { 1743 .name = "L4 DA UDP Filtering ", 1744 .lb = STMMAC_LOOPBACK_PHY, 1745 .fn = stmmac_test_l4filt_da_udp, 1746 }, { 1747 .name = "L4 SA UDP Filtering ", 1748 .lb = STMMAC_LOOPBACK_PHY, 1749 .fn = stmmac_test_l4filt_sa_udp, 1750 }, { 1751 .name = "ARP Offload ", 1752 .lb = STMMAC_LOOPBACK_PHY, 1753 .fn = stmmac_test_arpoffload, 1754 }, { 1755 .name = "Jumbo Frame ", 1756 .lb = STMMAC_LOOPBACK_PHY, 1757 .fn = stmmac_test_jumbo, 1758 }, { 1759 .name = "Multichannel Jumbo ", 1760 .lb = STMMAC_LOOPBACK_PHY, 1761 .fn = stmmac_test_mjumbo, 1762 }, { 1763 .name = "Split Header ", 1764 .lb = STMMAC_LOOPBACK_PHY, 1765 .fn = stmmac_test_sph, 1766 }, 1767 }; 1768 1769 void stmmac_selftest_run(struct net_device *dev, 1770 struct ethtool_test *etest, u64 *buf) 1771 { 1772 struct stmmac_priv *priv = netdev_priv(dev); 1773 int count = stmmac_selftest_get_count(priv); 1774 int carrier = netif_carrier_ok(dev); 1775 int i, ret; 1776 1777 memset(buf, 0, sizeof(*buf) * count); 1778 stmmac_test_next_id = 0; 1779 1780 if (etest->flags != ETH_TEST_FL_OFFLINE) { 1781 netdev_err(priv->dev, "Only offline tests are supported\n"); 1782 etest->flags |= ETH_TEST_FL_FAILED; 1783 return; 1784 } else if (!carrier) { 1785 netdev_err(priv->dev, "You need valid Link to execute tests\n"); 1786 etest->flags |= ETH_TEST_FL_FAILED; 1787 return; 1788 } 1789 1790 /* We don't want extra traffic */ 1791 netif_carrier_off(dev); 1792 1793 /* Wait for queues drain */ 1794 msleep(200); 1795 1796 for (i = 0; i < count; i++) { 1797 ret = 0; 1798 1799 switch (stmmac_selftests[i].lb) { 1800 case STMMAC_LOOPBACK_PHY: 1801 ret = -EOPNOTSUPP; 1802 if (dev->phydev) 1803 ret = phy_loopback(dev->phydev, true); 1804 if (!ret) 1805 break; 1806 /* Fallthrough */ 1807 case STMMAC_LOOPBACK_MAC: 1808 ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); 1809 break; 1810 case STMMAC_LOOPBACK_NONE: 1811 break; 1812 default: 1813 ret = -EOPNOTSUPP; 1814 break; 1815 } 1816 1817 /* 1818 * First tests will always be MAC / PHY loobpack. If any of 1819 * them is not supported we abort earlier. 1820 */ 1821 if (ret) { 1822 netdev_err(priv->dev, "Loopback is not supported\n"); 1823 etest->flags |= ETH_TEST_FL_FAILED; 1824 break; 1825 } 1826 1827 ret = stmmac_selftests[i].fn(priv); 1828 if (ret && (ret != -EOPNOTSUPP)) 1829 etest->flags |= ETH_TEST_FL_FAILED; 1830 buf[i] = ret; 1831 1832 switch (stmmac_selftests[i].lb) { 1833 case STMMAC_LOOPBACK_PHY: 1834 ret = -EOPNOTSUPP; 1835 if (dev->phydev) 1836 ret = phy_loopback(dev->phydev, false); 1837 if (!ret) 1838 break; 1839 /* Fallthrough */ 1840 case STMMAC_LOOPBACK_MAC: 1841 stmmac_set_mac_loopback(priv, priv->ioaddr, false); 1842 break; 1843 default: 1844 break; 1845 } 1846 } 1847 1848 /* Restart everything */ 1849 if (carrier) 1850 netif_carrier_on(dev); 1851 } 1852 1853 void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data) 1854 { 1855 u8 *p = data; 1856 int i; 1857 1858 for (i = 0; i < stmmac_selftest_get_count(priv); i++) { 1859 snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1, 1860 stmmac_selftests[i].name); 1861 p += ETH_GSTRING_LEN; 1862 } 1863 } 1864 1865 int stmmac_selftest_get_count(struct stmmac_priv *priv) 1866 { 1867 return ARRAY_SIZE(stmmac_selftests); 1868 } 1869