1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2012 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/netdevice.h> 12 #include <linux/module.h> 13 #include <linux/delay.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/pci.h> 16 #include <linux/ethtool.h> 17 #include <linux/ip.h> 18 #include <linux/in.h> 19 #include <linux/udp.h> 20 #include <linux/rtnetlink.h> 21 #include <linux/slab.h> 22 #include "net_driver.h" 23 #include "efx.h" 24 #include "nic.h" 25 #include "selftest.h" 26 #include "workarounds.h" 27 28 /* IRQ latency can be enormous because: 29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a 30 * slow serial console or an old IDE driver doing error recovery 31 * - The PREEMPT_RT patches mostly deal with this, but also allow a 32 * tasklet or normal task to be given higher priority than our IRQ 33 * threads 34 * Try to avoid blaming the hardware for this. 35 */ 36 #define IRQ_TIMEOUT HZ 37 38 /* 39 * Loopback test packet structure 40 * 41 * The self-test should stress every RSS vector, and unfortunately 42 * Falcon only performs RSS on TCP/UDP packets. 43 */ 44 struct ef4_loopback_payload { 45 struct ethhdr header; 46 struct iphdr ip; 47 struct udphdr udp; 48 __be16 iteration; 49 char msg[64]; 50 } __packed; 51 52 /* Loopback test source MAC address */ 53 static const u8 payload_source[ETH_ALEN] __aligned(2) = { 54 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 55 }; 56 57 static const char payload_msg[] = 58 "Hello world! This is an Efx loopback test in progress!"; 59 60 /* Interrupt mode names */ 61 static const unsigned int ef4_interrupt_mode_max = EF4_INT_MODE_MAX; 62 static const char *const ef4_interrupt_mode_names[] = { 63 [EF4_INT_MODE_MSIX] = "MSI-X", 64 [EF4_INT_MODE_MSI] = "MSI", 65 [EF4_INT_MODE_LEGACY] = "legacy", 66 }; 67 #define INT_MODE(efx) \ 68 STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode) 69 70 /** 71 * ef4_loopback_state - persistent state during a loopback selftest 72 * @flush: Drop all packets in ef4_loopback_rx_packet 73 * @packet_count: Number of packets being used in this test 74 * @skbs: An array of skbs transmitted 75 * @offload_csum: Checksums are being offloaded 76 * @rx_good: RX good packet count 77 * @rx_bad: RX bad packet count 78 * @payload: Payload used in tests 79 */ 80 struct ef4_loopback_state { 81 bool flush; 82 int packet_count; 83 struct sk_buff **skbs; 84 bool offload_csum; 85 atomic_t rx_good; 86 atomic_t rx_bad; 87 struct ef4_loopback_payload payload; 88 }; 89 90 /* How long to wait for all the packets to arrive (in ms) */ 91 #define LOOPBACK_TIMEOUT_MS 1000 92 93 /************************************************************************** 94 * 95 * MII, NVRAM and register tests 96 * 97 **************************************************************************/ 98 99 static int ef4_test_phy_alive(struct ef4_nic *efx, struct ef4_self_tests *tests) 100 { 101 int rc = 0; 102 103 if (efx->phy_op->test_alive) { 104 rc = efx->phy_op->test_alive(efx); 105 tests->phy_alive = rc ? -1 : 1; 106 } 107 108 return rc; 109 } 110 111 static int ef4_test_nvram(struct ef4_nic *efx, struct ef4_self_tests *tests) 112 { 113 int rc = 0; 114 115 if (efx->type->test_nvram) { 116 rc = efx->type->test_nvram(efx); 117 if (rc == -EPERM) 118 rc = 0; 119 else 120 tests->nvram = rc ? -1 : 1; 121 } 122 123 return rc; 124 } 125 126 /************************************************************************** 127 * 128 * Interrupt and event queue testing 129 * 130 **************************************************************************/ 131 132 /* Test generation and receipt of interrupts */ 133 static int ef4_test_interrupts(struct ef4_nic *efx, 134 struct ef4_self_tests *tests) 135 { 136 unsigned long timeout, wait; 137 int cpu; 138 int rc; 139 140 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 141 tests->interrupt = -1; 142 143 rc = ef4_nic_irq_test_start(efx); 144 if (rc == -ENOTSUPP) { 145 netif_dbg(efx, drv, efx->net_dev, 146 "direct interrupt testing not supported\n"); 147 tests->interrupt = 0; 148 return 0; 149 } 150 151 timeout = jiffies + IRQ_TIMEOUT; 152 wait = 1; 153 154 /* Wait for arrival of test interrupt. */ 155 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); 156 do { 157 schedule_timeout_uninterruptible(wait); 158 cpu = ef4_nic_irq_test_irq_cpu(efx); 159 if (cpu >= 0) 160 goto success; 161 wait *= 2; 162 } while (time_before(jiffies, timeout)); 163 164 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); 165 return -ETIMEDOUT; 166 167 success: 168 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", 169 INT_MODE(efx), cpu); 170 tests->interrupt = 1; 171 return 0; 172 } 173 174 /* Test generation and receipt of interrupting events */ 175 static int ef4_test_eventq_irq(struct ef4_nic *efx, 176 struct ef4_self_tests *tests) 177 { 178 struct ef4_channel *channel; 179 unsigned int read_ptr[EF4_MAX_CHANNELS]; 180 unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; 181 unsigned long timeout, wait; 182 183 BUILD_BUG_ON(EF4_MAX_CHANNELS > BITS_PER_LONG); 184 185 ef4_for_each_channel(channel, efx) { 186 read_ptr[channel->channel] = channel->eventq_read_ptr; 187 set_bit(channel->channel, &dma_pend); 188 set_bit(channel->channel, &int_pend); 189 ef4_nic_event_test_start(channel); 190 } 191 192 timeout = jiffies + IRQ_TIMEOUT; 193 wait = 1; 194 195 /* Wait for arrival of interrupts. NAPI processing may or may 196 * not complete in time, but we can cope in any case. 197 */ 198 do { 199 schedule_timeout_uninterruptible(wait); 200 201 ef4_for_each_channel(channel, efx) { 202 ef4_stop_eventq(channel); 203 if (channel->eventq_read_ptr != 204 read_ptr[channel->channel]) { 205 set_bit(channel->channel, &napi_ran); 206 clear_bit(channel->channel, &dma_pend); 207 clear_bit(channel->channel, &int_pend); 208 } else { 209 if (ef4_nic_event_present(channel)) 210 clear_bit(channel->channel, &dma_pend); 211 if (ef4_nic_event_test_irq_cpu(channel) >= 0) 212 clear_bit(channel->channel, &int_pend); 213 } 214 ef4_start_eventq(channel); 215 } 216 217 wait *= 2; 218 } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); 219 220 ef4_for_each_channel(channel, efx) { 221 bool dma_seen = !test_bit(channel->channel, &dma_pend); 222 bool int_seen = !test_bit(channel->channel, &int_pend); 223 224 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; 225 tests->eventq_int[channel->channel] = int_seen ? 1 : -1; 226 227 if (dma_seen && int_seen) { 228 netif_dbg(efx, drv, efx->net_dev, 229 "channel %d event queue passed (with%s NAPI)\n", 230 channel->channel, 231 test_bit(channel->channel, &napi_ran) ? 232 "" : "out"); 233 } else { 234 /* Report failure and whether either interrupt or DMA 235 * worked 236 */ 237 netif_err(efx, drv, efx->net_dev, 238 "channel %d timed out waiting for event queue\n", 239 channel->channel); 240 if (int_seen) 241 netif_err(efx, drv, efx->net_dev, 242 "channel %d saw interrupt " 243 "during event queue test\n", 244 channel->channel); 245 if (dma_seen) 246 netif_err(efx, drv, efx->net_dev, 247 "channel %d event was generated, but " 248 "failed to trigger an interrupt\n", 249 channel->channel); 250 } 251 } 252 253 return (dma_pend || int_pend) ? -ETIMEDOUT : 0; 254 } 255 256 static int ef4_test_phy(struct ef4_nic *efx, struct ef4_self_tests *tests, 257 unsigned flags) 258 { 259 int rc; 260 261 if (!efx->phy_op->run_tests) 262 return 0; 263 264 mutex_lock(&efx->mac_lock); 265 rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); 266 mutex_unlock(&efx->mac_lock); 267 if (rc == -EPERM) 268 rc = 0; 269 else 270 netif_info(efx, drv, efx->net_dev, 271 "%s phy selftest\n", rc ? "Failed" : "Passed"); 272 273 return rc; 274 } 275 276 /************************************************************************** 277 * 278 * Loopback testing 279 * NB Only one loopback test can be executing concurrently. 280 * 281 **************************************************************************/ 282 283 /* Loopback test RX callback 284 * This is called for each received packet during loopback testing. 285 */ 286 void ef4_loopback_rx_packet(struct ef4_nic *efx, 287 const char *buf_ptr, int pkt_len) 288 { 289 struct ef4_loopback_state *state = efx->loopback_selftest; 290 struct ef4_loopback_payload *received; 291 struct ef4_loopback_payload *payload; 292 293 BUG_ON(!buf_ptr); 294 295 /* If we are just flushing, then drop the packet */ 296 if ((state == NULL) || state->flush) 297 return; 298 299 payload = &state->payload; 300 301 received = (struct ef4_loopback_payload *) buf_ptr; 302 received->ip.saddr = payload->ip.saddr; 303 if (state->offload_csum) 304 received->ip.check = payload->ip.check; 305 306 /* Check that header exists */ 307 if (pkt_len < sizeof(received->header)) { 308 netif_err(efx, drv, efx->net_dev, 309 "saw runt RX packet (length %d) in %s loopback " 310 "test\n", pkt_len, LOOPBACK_MODE(efx)); 311 goto err; 312 } 313 314 /* Check that the ethernet header exists */ 315 if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { 316 netif_err(efx, drv, efx->net_dev, 317 "saw non-loopback RX packet in %s loopback test\n", 318 LOOPBACK_MODE(efx)); 319 goto err; 320 } 321 322 /* Check packet length */ 323 if (pkt_len != sizeof(*payload)) { 324 netif_err(efx, drv, efx->net_dev, 325 "saw incorrect RX packet length %d (wanted %d) in " 326 "%s loopback test\n", pkt_len, (int)sizeof(*payload), 327 LOOPBACK_MODE(efx)); 328 goto err; 329 } 330 331 /* Check that IP header matches */ 332 if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { 333 netif_err(efx, drv, efx->net_dev, 334 "saw corrupted IP header in %s loopback test\n", 335 LOOPBACK_MODE(efx)); 336 goto err; 337 } 338 339 /* Check that msg and padding matches */ 340 if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { 341 netif_err(efx, drv, efx->net_dev, 342 "saw corrupted RX packet in %s loopback test\n", 343 LOOPBACK_MODE(efx)); 344 goto err; 345 } 346 347 /* Check that iteration matches */ 348 if (received->iteration != payload->iteration) { 349 netif_err(efx, drv, efx->net_dev, 350 "saw RX packet from iteration %d (wanted %d) in " 351 "%s loopback test\n", ntohs(received->iteration), 352 ntohs(payload->iteration), LOOPBACK_MODE(efx)); 353 goto err; 354 } 355 356 /* Increase correct RX count */ 357 netif_vdbg(efx, drv, efx->net_dev, 358 "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); 359 360 atomic_inc(&state->rx_good); 361 return; 362 363 err: 364 #ifdef DEBUG 365 if (atomic_read(&state->rx_bad) == 0) { 366 netif_err(efx, drv, efx->net_dev, "received packet:\n"); 367 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 368 buf_ptr, pkt_len, 0); 369 netif_err(efx, drv, efx->net_dev, "expected packet:\n"); 370 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 371 &state->payload, sizeof(state->payload), 0); 372 } 373 #endif 374 atomic_inc(&state->rx_bad); 375 } 376 377 /* Initialise an ef4_selftest_state for a new iteration */ 378 static void ef4_iterate_state(struct ef4_nic *efx) 379 { 380 struct ef4_loopback_state *state = efx->loopback_selftest; 381 struct net_device *net_dev = efx->net_dev; 382 struct ef4_loopback_payload *payload = &state->payload; 383 384 /* Initialise the layerII header */ 385 ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); 386 ether_addr_copy((u8 *)&payload->header.h_source, payload_source); 387 payload->header.h_proto = htons(ETH_P_IP); 388 389 /* saddr set later and used as incrementing count */ 390 payload->ip.daddr = htonl(INADDR_LOOPBACK); 391 payload->ip.ihl = 5; 392 payload->ip.check = (__force __sum16) htons(0xdead); 393 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); 394 payload->ip.version = IPVERSION; 395 payload->ip.protocol = IPPROTO_UDP; 396 397 /* Initialise udp header */ 398 payload->udp.source = 0; 399 payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - 400 sizeof(struct iphdr)); 401 payload->udp.check = 0; /* checksum ignored */ 402 403 /* Fill out payload */ 404 payload->iteration = htons(ntohs(payload->iteration) + 1); 405 memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); 406 407 /* Fill out remaining state members */ 408 atomic_set(&state->rx_good, 0); 409 atomic_set(&state->rx_bad, 0); 410 smp_wmb(); 411 } 412 413 static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue) 414 { 415 struct ef4_nic *efx = tx_queue->efx; 416 struct ef4_loopback_state *state = efx->loopback_selftest; 417 struct ef4_loopback_payload *payload; 418 struct sk_buff *skb; 419 int i; 420 netdev_tx_t rc; 421 422 /* Transmit N copies of buffer */ 423 for (i = 0; i < state->packet_count; i++) { 424 /* Allocate an skb, holding an extra reference for 425 * transmit completion counting */ 426 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 427 if (!skb) 428 return -ENOMEM; 429 state->skbs[i] = skb; 430 skb_get(skb); 431 432 /* Copy the payload in, incrementing the source address to 433 * exercise the rss vectors */ 434 payload = skb_put(skb, sizeof(state->payload)); 435 memcpy(payload, &state->payload, sizeof(state->payload)); 436 payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); 437 438 /* Ensure everything we've written is visible to the 439 * interrupt handler. */ 440 smp_wmb(); 441 442 netif_tx_lock_bh(efx->net_dev); 443 rc = ef4_enqueue_skb(tx_queue, skb); 444 netif_tx_unlock_bh(efx->net_dev); 445 446 if (rc != NETDEV_TX_OK) { 447 netif_err(efx, drv, efx->net_dev, 448 "TX queue %d could not transmit packet %d of " 449 "%d in %s loopback test\n", tx_queue->queue, 450 i + 1, state->packet_count, 451 LOOPBACK_MODE(efx)); 452 453 /* Defer cleaning up the other skbs for the caller */ 454 kfree_skb(skb); 455 return -EPIPE; 456 } 457 } 458 459 return 0; 460 } 461 462 static int ef4_poll_loopback(struct ef4_nic *efx) 463 { 464 struct ef4_loopback_state *state = efx->loopback_selftest; 465 466 return atomic_read(&state->rx_good) == state->packet_count; 467 } 468 469 static int ef4_end_loopback(struct ef4_tx_queue *tx_queue, 470 struct ef4_loopback_self_tests *lb_tests) 471 { 472 struct ef4_nic *efx = tx_queue->efx; 473 struct ef4_loopback_state *state = efx->loopback_selftest; 474 struct sk_buff *skb; 475 int tx_done = 0, rx_good, rx_bad; 476 int i, rc = 0; 477 478 netif_tx_lock_bh(efx->net_dev); 479 480 /* Count the number of tx completions, and decrement the refcnt. Any 481 * skbs not already completed will be free'd when the queue is flushed */ 482 for (i = 0; i < state->packet_count; i++) { 483 skb = state->skbs[i]; 484 if (skb && !skb_shared(skb)) 485 ++tx_done; 486 dev_kfree_skb(skb); 487 } 488 489 netif_tx_unlock_bh(efx->net_dev); 490 491 /* Check TX completion and received packet counts */ 492 rx_good = atomic_read(&state->rx_good); 493 rx_bad = atomic_read(&state->rx_bad); 494 if (tx_done != state->packet_count) { 495 /* Don't free the skbs; they will be picked up on TX 496 * overflow or channel teardown. 497 */ 498 netif_err(efx, drv, efx->net_dev, 499 "TX queue %d saw only %d out of an expected %d " 500 "TX completion events in %s loopback test\n", 501 tx_queue->queue, tx_done, state->packet_count, 502 LOOPBACK_MODE(efx)); 503 rc = -ETIMEDOUT; 504 /* Allow to fall through so we see the RX errors as well */ 505 } 506 507 /* We may always be up to a flush away from our desired packet total */ 508 if (rx_good != state->packet_count) { 509 netif_dbg(efx, drv, efx->net_dev, 510 "TX queue %d saw only %d out of an expected %d " 511 "received packets in %s loopback test\n", 512 tx_queue->queue, rx_good, state->packet_count, 513 LOOPBACK_MODE(efx)); 514 rc = -ETIMEDOUT; 515 /* Fall through */ 516 } 517 518 /* Update loopback test structure */ 519 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; 520 lb_tests->tx_done[tx_queue->queue] += tx_done; 521 lb_tests->rx_good += rx_good; 522 lb_tests->rx_bad += rx_bad; 523 524 return rc; 525 } 526 527 static int 528 ef4_test_loopback(struct ef4_tx_queue *tx_queue, 529 struct ef4_loopback_self_tests *lb_tests) 530 { 531 struct ef4_nic *efx = tx_queue->efx; 532 struct ef4_loopback_state *state = efx->loopback_selftest; 533 int i, begin_rc, end_rc; 534 535 for (i = 0; i < 3; i++) { 536 /* Determine how many packets to send */ 537 state->packet_count = efx->txq_entries / 3; 538 state->packet_count = min(1 << (i << 2), state->packet_count); 539 state->skbs = kcalloc(state->packet_count, 540 sizeof(state->skbs[0]), GFP_KERNEL); 541 if (!state->skbs) 542 return -ENOMEM; 543 state->flush = false; 544 545 netif_dbg(efx, drv, efx->net_dev, 546 "TX queue %d testing %s loopback with %d packets\n", 547 tx_queue->queue, LOOPBACK_MODE(efx), 548 state->packet_count); 549 550 ef4_iterate_state(efx); 551 begin_rc = ef4_begin_loopback(tx_queue); 552 553 /* This will normally complete very quickly, but be 554 * prepared to wait much longer. */ 555 msleep(1); 556 if (!ef4_poll_loopback(efx)) { 557 msleep(LOOPBACK_TIMEOUT_MS); 558 ef4_poll_loopback(efx); 559 } 560 561 end_rc = ef4_end_loopback(tx_queue, lb_tests); 562 kfree(state->skbs); 563 564 if (begin_rc || end_rc) { 565 /* Wait a while to ensure there are no packets 566 * floating around after a failure. */ 567 schedule_timeout_uninterruptible(HZ / 10); 568 return begin_rc ? begin_rc : end_rc; 569 } 570 } 571 572 netif_dbg(efx, drv, efx->net_dev, 573 "TX queue %d passed %s loopback test with a burst length " 574 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 575 state->packet_count); 576 577 return 0; 578 } 579 580 /* Wait for link up. On Falcon, we would prefer to rely on ef4_monitor, but 581 * any contention on the mac lock (via e.g. ef4_mac_mcast_work) causes it 582 * to delay and retry. Therefore, it's safer to just poll directly. Wait 583 * for link up and any faults to dissipate. */ 584 static int ef4_wait_for_link(struct ef4_nic *efx) 585 { 586 struct ef4_link_state *link_state = &efx->link_state; 587 int count, link_up_count = 0; 588 bool link_up; 589 590 for (count = 0; count < 40; count++) { 591 schedule_timeout_uninterruptible(HZ / 10); 592 593 if (efx->type->monitor != NULL) { 594 mutex_lock(&efx->mac_lock); 595 efx->type->monitor(efx); 596 mutex_unlock(&efx->mac_lock); 597 } 598 599 mutex_lock(&efx->mac_lock); 600 link_up = link_state->up; 601 if (link_up) 602 link_up = !efx->type->check_mac_fault(efx); 603 mutex_unlock(&efx->mac_lock); 604 605 if (link_up) { 606 if (++link_up_count == 2) 607 return 0; 608 } else { 609 link_up_count = 0; 610 } 611 } 612 613 return -ETIMEDOUT; 614 } 615 616 static int ef4_test_loopbacks(struct ef4_nic *efx, struct ef4_self_tests *tests, 617 unsigned int loopback_modes) 618 { 619 enum ef4_loopback_mode mode; 620 struct ef4_loopback_state *state; 621 struct ef4_channel *channel = 622 ef4_get_channel(efx, efx->tx_channel_offset); 623 struct ef4_tx_queue *tx_queue; 624 int rc = 0; 625 626 /* Set the port loopback_selftest member. From this point on 627 * all received packets will be dropped. Mark the state as 628 * "flushing" so all inflight packets are dropped */ 629 state = kzalloc(sizeof(*state), GFP_KERNEL); 630 if (state == NULL) 631 return -ENOMEM; 632 BUG_ON(efx->loopback_selftest); 633 state->flush = true; 634 efx->loopback_selftest = state; 635 636 /* Test all supported loopback modes */ 637 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { 638 if (!(loopback_modes & (1 << mode))) 639 continue; 640 641 /* Move the port into the specified loopback mode. */ 642 state->flush = true; 643 mutex_lock(&efx->mac_lock); 644 efx->loopback_mode = mode; 645 rc = __ef4_reconfigure_port(efx); 646 mutex_unlock(&efx->mac_lock); 647 if (rc) { 648 netif_err(efx, drv, efx->net_dev, 649 "unable to move into %s loopback\n", 650 LOOPBACK_MODE(efx)); 651 goto out; 652 } 653 654 rc = ef4_wait_for_link(efx); 655 if (rc) { 656 netif_err(efx, drv, efx->net_dev, 657 "loopback %s never came up\n", 658 LOOPBACK_MODE(efx)); 659 goto out; 660 } 661 662 /* Test all enabled types of TX queue */ 663 ef4_for_each_channel_tx_queue(tx_queue, channel) { 664 state->offload_csum = (tx_queue->queue & 665 EF4_TXQ_TYPE_OFFLOAD); 666 rc = ef4_test_loopback(tx_queue, 667 &tests->loopback[mode]); 668 if (rc) 669 goto out; 670 } 671 } 672 673 out: 674 /* Remove the flush. The caller will remove the loopback setting */ 675 state->flush = true; 676 efx->loopback_selftest = NULL; 677 wmb(); 678 kfree(state); 679 680 if (rc == -EPERM) 681 rc = 0; 682 683 return rc; 684 } 685 686 /************************************************************************** 687 * 688 * Entry point 689 * 690 *************************************************************************/ 691 692 int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests, 693 unsigned flags) 694 { 695 enum ef4_loopback_mode loopback_mode = efx->loopback_mode; 696 int phy_mode = efx->phy_mode; 697 int rc_test = 0, rc_reset, rc; 698 699 ef4_selftest_async_cancel(efx); 700 701 /* Online (i.e. non-disruptive) testing 702 * This checks interrupt generation, event delivery and PHY presence. */ 703 704 rc = ef4_test_phy_alive(efx, tests); 705 if (rc && !rc_test) 706 rc_test = rc; 707 708 rc = ef4_test_nvram(efx, tests); 709 if (rc && !rc_test) 710 rc_test = rc; 711 712 rc = ef4_test_interrupts(efx, tests); 713 if (rc && !rc_test) 714 rc_test = rc; 715 716 rc = ef4_test_eventq_irq(efx, tests); 717 if (rc && !rc_test) 718 rc_test = rc; 719 720 if (rc_test) 721 return rc_test; 722 723 if (!(flags & ETH_TEST_FL_OFFLINE)) 724 return ef4_test_phy(efx, tests, flags); 725 726 /* Offline (i.e. disruptive) testing 727 * This checks MAC and PHY loopback on the specified port. */ 728 729 /* Detach the device so the kernel doesn't transmit during the 730 * loopback test and the watchdog timeout doesn't fire. 731 */ 732 ef4_device_detach_sync(efx); 733 734 if (efx->type->test_chip) { 735 rc_reset = efx->type->test_chip(efx, tests); 736 if (rc_reset) { 737 netif_err(efx, hw, efx->net_dev, 738 "Unable to recover from chip test\n"); 739 ef4_schedule_reset(efx, RESET_TYPE_DISABLE); 740 return rc_reset; 741 } 742 743 if ((tests->memory < 0 || tests->registers < 0) && !rc_test) 744 rc_test = -EIO; 745 } 746 747 /* Ensure that the phy is powered and out of loopback 748 * for the bist and loopback tests */ 749 mutex_lock(&efx->mac_lock); 750 efx->phy_mode &= ~PHY_MODE_LOW_POWER; 751 efx->loopback_mode = LOOPBACK_NONE; 752 __ef4_reconfigure_port(efx); 753 mutex_unlock(&efx->mac_lock); 754 755 rc = ef4_test_phy(efx, tests, flags); 756 if (rc && !rc_test) 757 rc_test = rc; 758 759 rc = ef4_test_loopbacks(efx, tests, efx->loopback_modes); 760 if (rc && !rc_test) 761 rc_test = rc; 762 763 /* restore the PHY to the previous state */ 764 mutex_lock(&efx->mac_lock); 765 efx->phy_mode = phy_mode; 766 efx->loopback_mode = loopback_mode; 767 __ef4_reconfigure_port(efx); 768 mutex_unlock(&efx->mac_lock); 769 770 netif_device_attach(efx->net_dev); 771 772 return rc_test; 773 } 774 775 void ef4_selftest_async_start(struct ef4_nic *efx) 776 { 777 struct ef4_channel *channel; 778 779 ef4_for_each_channel(channel, efx) 780 ef4_nic_event_test_start(channel); 781 schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); 782 } 783 784 void ef4_selftest_async_cancel(struct ef4_nic *efx) 785 { 786 cancel_delayed_work_sync(&efx->selftest_work); 787 } 788 789 void ef4_selftest_async_work(struct work_struct *data) 790 { 791 struct ef4_nic *efx = container_of(data, struct ef4_nic, 792 selftest_work.work); 793 struct ef4_channel *channel; 794 int cpu; 795 796 ef4_for_each_channel(channel, efx) { 797 cpu = ef4_nic_event_test_irq_cpu(channel); 798 if (cpu < 0) 799 netif_err(efx, ifup, efx->net_dev, 800 "channel %d failed to trigger an interrupt\n", 801 channel->channel); 802 else 803 netif_dbg(efx, ifup, efx->net_dev, 804 "channel %d triggered interrupt on CPU %d\n", 805 channel->channel, cpu); 806 } 807 } 808