1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2012 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/netdevice.h> 12 #include <linux/module.h> 13 #include <linux/delay.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/pci.h> 16 #include <linux/ethtool.h> 17 #include <linux/ip.h> 18 #include <linux/in.h> 19 #include <linux/udp.h> 20 #include <linux/rtnetlink.h> 21 #include <linux/slab.h> 22 #include "net_driver.h" 23 #include "efx.h" 24 #include "nic.h" 25 #include "selftest.h" 26 #include "workarounds.h" 27 28 /* IRQ latency can be enormous because: 29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a 30 * slow serial console or an old IDE driver doing error recovery 31 * - The PREEMPT_RT patches mostly deal with this, but also allow a 32 * tasklet or normal task to be given higher priority than our IRQ 33 * threads 34 * Try to avoid blaming the hardware for this. 35 */ 36 #define IRQ_TIMEOUT HZ 37 38 /* 39 * Loopback test packet structure 40 * 41 * The self-test should stress every RSS vector, and unfortunately 42 * Falcon only performs RSS on TCP/UDP packets. 43 */ 44 struct efx_loopback_payload { 45 struct ethhdr header; 46 struct iphdr ip; 47 struct udphdr udp; 48 __be16 iteration; 49 char msg[64]; 50 } __packed; 51 52 /* Loopback test source MAC address */ 53 static const u8 payload_source[ETH_ALEN] __aligned(2) = { 54 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 55 }; 56 57 static const char payload_msg[] = 58 "Hello world! This is an Efx loopback test in progress!"; 59 60 /* Interrupt mode names */ 61 static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; 62 static const char *const efx_interrupt_mode_names[] = { 63 [EFX_INT_MODE_MSIX] = "MSI-X", 64 [EFX_INT_MODE_MSI] = "MSI", 65 [EFX_INT_MODE_LEGACY] = "legacy", 66 }; 67 #define INT_MODE(efx) \ 68 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) 69 70 /** 71 * efx_loopback_state - persistent state during a loopback selftest 72 * @flush: Drop all packets in efx_loopback_rx_packet 73 * @packet_count: Number of packets being used in this test 74 * @skbs: An array of skbs transmitted 75 * @offload_csum: Checksums are being offloaded 76 * @rx_good: RX good packet count 77 * @rx_bad: RX bad packet count 78 * @payload: Payload used in tests 79 */ 80 struct efx_loopback_state { 81 bool flush; 82 int packet_count; 83 struct sk_buff **skbs; 84 bool offload_csum; 85 atomic_t rx_good; 86 atomic_t rx_bad; 87 struct efx_loopback_payload payload; 88 }; 89 90 /* How long to wait for all the packets to arrive (in ms) */ 91 #define LOOPBACK_TIMEOUT_MS 1000 92 93 /************************************************************************** 94 * 95 * MII, NVRAM and register tests 96 * 97 **************************************************************************/ 98 99 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) 100 { 101 int rc = 0; 102 103 if (efx->phy_op->test_alive) { 104 rc = efx->phy_op->test_alive(efx); 105 tests->phy_alive = rc ? -1 : 1; 106 } 107 108 return rc; 109 } 110 111 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) 112 { 113 int rc = 0; 114 115 if (efx->type->test_nvram) { 116 rc = efx->type->test_nvram(efx); 117 if (rc == -EPERM) 118 rc = 0; 119 else 120 tests->nvram = rc ? -1 : 1; 121 } 122 123 return rc; 124 } 125 126 /************************************************************************** 127 * 128 * Interrupt and event queue testing 129 * 130 **************************************************************************/ 131 132 /* Test generation and receipt of interrupts */ 133 static int efx_test_interrupts(struct efx_nic *efx, 134 struct efx_self_tests *tests) 135 { 136 unsigned long timeout, wait; 137 int cpu; 138 139 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 140 tests->interrupt = -1; 141 142 efx_nic_irq_test_start(efx); 143 timeout = jiffies + IRQ_TIMEOUT; 144 wait = 1; 145 146 /* Wait for arrival of test interrupt. */ 147 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); 148 do { 149 schedule_timeout_uninterruptible(wait); 150 cpu = efx_nic_irq_test_irq_cpu(efx); 151 if (cpu >= 0) 152 goto success; 153 wait *= 2; 154 } while (time_before(jiffies, timeout)); 155 156 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); 157 return -ETIMEDOUT; 158 159 success: 160 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", 161 INT_MODE(efx), cpu); 162 tests->interrupt = 1; 163 return 0; 164 } 165 166 /* Test generation and receipt of interrupting events */ 167 static int efx_test_eventq_irq(struct efx_nic *efx, 168 struct efx_self_tests *tests) 169 { 170 struct efx_channel *channel; 171 unsigned int read_ptr[EFX_MAX_CHANNELS]; 172 unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; 173 unsigned long timeout, wait; 174 175 BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG); 176 177 efx_for_each_channel(channel, efx) { 178 read_ptr[channel->channel] = channel->eventq_read_ptr; 179 set_bit(channel->channel, &dma_pend); 180 set_bit(channel->channel, &int_pend); 181 efx_nic_event_test_start(channel); 182 } 183 184 timeout = jiffies + IRQ_TIMEOUT; 185 wait = 1; 186 187 /* Wait for arrival of interrupts. NAPI processing may or may 188 * not complete in time, but we can cope in any case. 189 */ 190 do { 191 schedule_timeout_uninterruptible(wait); 192 193 efx_for_each_channel(channel, efx) { 194 efx_stop_eventq(channel); 195 if (channel->eventq_read_ptr != 196 read_ptr[channel->channel]) { 197 set_bit(channel->channel, &napi_ran); 198 clear_bit(channel->channel, &dma_pend); 199 clear_bit(channel->channel, &int_pend); 200 } else { 201 if (efx_nic_event_present(channel)) 202 clear_bit(channel->channel, &dma_pend); 203 if (efx_nic_event_test_irq_cpu(channel) >= 0) 204 clear_bit(channel->channel, &int_pend); 205 } 206 efx_start_eventq(channel); 207 } 208 209 wait *= 2; 210 } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); 211 212 efx_for_each_channel(channel, efx) { 213 bool dma_seen = !test_bit(channel->channel, &dma_pend); 214 bool int_seen = !test_bit(channel->channel, &int_pend); 215 216 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; 217 tests->eventq_int[channel->channel] = int_seen ? 1 : -1; 218 219 if (dma_seen && int_seen) { 220 netif_dbg(efx, drv, efx->net_dev, 221 "channel %d event queue passed (with%s NAPI)\n", 222 channel->channel, 223 test_bit(channel->channel, &napi_ran) ? 224 "" : "out"); 225 } else { 226 /* Report failure and whether either interrupt or DMA 227 * worked 228 */ 229 netif_err(efx, drv, efx->net_dev, 230 "channel %d timed out waiting for event queue\n", 231 channel->channel); 232 if (int_seen) 233 netif_err(efx, drv, efx->net_dev, 234 "channel %d saw interrupt " 235 "during event queue test\n", 236 channel->channel); 237 if (dma_seen) 238 netif_err(efx, drv, efx->net_dev, 239 "channel %d event was generated, but " 240 "failed to trigger an interrupt\n", 241 channel->channel); 242 } 243 } 244 245 return (dma_pend || int_pend) ? -ETIMEDOUT : 0; 246 } 247 248 static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, 249 unsigned flags) 250 { 251 int rc; 252 253 if (!efx->phy_op->run_tests) 254 return 0; 255 256 mutex_lock(&efx->mac_lock); 257 rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); 258 mutex_unlock(&efx->mac_lock); 259 if (rc == -EPERM) 260 rc = 0; 261 else 262 netif_info(efx, drv, efx->net_dev, 263 "%s phy selftest\n", rc ? "Failed" : "Passed"); 264 265 return rc; 266 } 267 268 /************************************************************************** 269 * 270 * Loopback testing 271 * NB Only one loopback test can be executing concurrently. 272 * 273 **************************************************************************/ 274 275 /* Loopback test RX callback 276 * This is called for each received packet during loopback testing. 277 */ 278 void efx_loopback_rx_packet(struct efx_nic *efx, 279 const char *buf_ptr, int pkt_len) 280 { 281 struct efx_loopback_state *state = efx->loopback_selftest; 282 struct efx_loopback_payload *received; 283 struct efx_loopback_payload *payload; 284 285 BUG_ON(!buf_ptr); 286 287 /* If we are just flushing, then drop the packet */ 288 if ((state == NULL) || state->flush) 289 return; 290 291 payload = &state->payload; 292 293 received = (struct efx_loopback_payload *) buf_ptr; 294 received->ip.saddr = payload->ip.saddr; 295 if (state->offload_csum) 296 received->ip.check = payload->ip.check; 297 298 /* Check that header exists */ 299 if (pkt_len < sizeof(received->header)) { 300 netif_err(efx, drv, efx->net_dev, 301 "saw runt RX packet (length %d) in %s loopback " 302 "test\n", pkt_len, LOOPBACK_MODE(efx)); 303 goto err; 304 } 305 306 /* Check that the ethernet header exists */ 307 if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { 308 netif_err(efx, drv, efx->net_dev, 309 "saw non-loopback RX packet in %s loopback test\n", 310 LOOPBACK_MODE(efx)); 311 goto err; 312 } 313 314 /* Check packet length */ 315 if (pkt_len != sizeof(*payload)) { 316 netif_err(efx, drv, efx->net_dev, 317 "saw incorrect RX packet length %d (wanted %d) in " 318 "%s loopback test\n", pkt_len, (int)sizeof(*payload), 319 LOOPBACK_MODE(efx)); 320 goto err; 321 } 322 323 /* Check that IP header matches */ 324 if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { 325 netif_err(efx, drv, efx->net_dev, 326 "saw corrupted IP header in %s loopback test\n", 327 LOOPBACK_MODE(efx)); 328 goto err; 329 } 330 331 /* Check that msg and padding matches */ 332 if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { 333 netif_err(efx, drv, efx->net_dev, 334 "saw corrupted RX packet in %s loopback test\n", 335 LOOPBACK_MODE(efx)); 336 goto err; 337 } 338 339 /* Check that iteration matches */ 340 if (received->iteration != payload->iteration) { 341 netif_err(efx, drv, efx->net_dev, 342 "saw RX packet from iteration %d (wanted %d) in " 343 "%s loopback test\n", ntohs(received->iteration), 344 ntohs(payload->iteration), LOOPBACK_MODE(efx)); 345 goto err; 346 } 347 348 /* Increase correct RX count */ 349 netif_vdbg(efx, drv, efx->net_dev, 350 "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); 351 352 atomic_inc(&state->rx_good); 353 return; 354 355 err: 356 #ifdef DEBUG 357 if (atomic_read(&state->rx_bad) == 0) { 358 netif_err(efx, drv, efx->net_dev, "received packet:\n"); 359 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 360 buf_ptr, pkt_len, 0); 361 netif_err(efx, drv, efx->net_dev, "expected packet:\n"); 362 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 363 &state->payload, sizeof(state->payload), 0); 364 } 365 #endif 366 atomic_inc(&state->rx_bad); 367 } 368 369 /* Initialise an efx_selftest_state for a new iteration */ 370 static void efx_iterate_state(struct efx_nic *efx) 371 { 372 struct efx_loopback_state *state = efx->loopback_selftest; 373 struct net_device *net_dev = efx->net_dev; 374 struct efx_loopback_payload *payload = &state->payload; 375 376 /* Initialise the layerII header */ 377 ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); 378 ether_addr_copy((u8 *)&payload->header.h_source, payload_source); 379 payload->header.h_proto = htons(ETH_P_IP); 380 381 /* saddr set later and used as incrementing count */ 382 payload->ip.daddr = htonl(INADDR_LOOPBACK); 383 payload->ip.ihl = 5; 384 payload->ip.check = (__force __sum16) htons(0xdead); 385 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); 386 payload->ip.version = IPVERSION; 387 payload->ip.protocol = IPPROTO_UDP; 388 389 /* Initialise udp header */ 390 payload->udp.source = 0; 391 payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - 392 sizeof(struct iphdr)); 393 payload->udp.check = 0; /* checksum ignored */ 394 395 /* Fill out payload */ 396 payload->iteration = htons(ntohs(payload->iteration) + 1); 397 memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); 398 399 /* Fill out remaining state members */ 400 atomic_set(&state->rx_good, 0); 401 atomic_set(&state->rx_bad, 0); 402 smp_wmb(); 403 } 404 405 static int efx_begin_loopback(struct efx_tx_queue *tx_queue) 406 { 407 struct efx_nic *efx = tx_queue->efx; 408 struct efx_loopback_state *state = efx->loopback_selftest; 409 struct efx_loopback_payload *payload; 410 struct sk_buff *skb; 411 int i; 412 netdev_tx_t rc; 413 414 /* Transmit N copies of buffer */ 415 for (i = 0; i < state->packet_count; i++) { 416 /* Allocate an skb, holding an extra reference for 417 * transmit completion counting */ 418 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 419 if (!skb) 420 return -ENOMEM; 421 state->skbs[i] = skb; 422 skb_get(skb); 423 424 /* Copy the payload in, incrementing the source address to 425 * exercise the rss vectors */ 426 payload = ((struct efx_loopback_payload *) 427 skb_put(skb, sizeof(state->payload))); 428 memcpy(payload, &state->payload, sizeof(state->payload)); 429 payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); 430 431 /* Ensure everything we've written is visible to the 432 * interrupt handler. */ 433 smp_wmb(); 434 435 netif_tx_lock_bh(efx->net_dev); 436 rc = efx_enqueue_skb(tx_queue, skb); 437 netif_tx_unlock_bh(efx->net_dev); 438 439 if (rc != NETDEV_TX_OK) { 440 netif_err(efx, drv, efx->net_dev, 441 "TX queue %d could not transmit packet %d of " 442 "%d in %s loopback test\n", tx_queue->queue, 443 i + 1, state->packet_count, 444 LOOPBACK_MODE(efx)); 445 446 /* Defer cleaning up the other skbs for the caller */ 447 kfree_skb(skb); 448 return -EPIPE; 449 } 450 } 451 452 return 0; 453 } 454 455 static int efx_poll_loopback(struct efx_nic *efx) 456 { 457 struct efx_loopback_state *state = efx->loopback_selftest; 458 459 return atomic_read(&state->rx_good) == state->packet_count; 460 } 461 462 static int efx_end_loopback(struct efx_tx_queue *tx_queue, 463 struct efx_loopback_self_tests *lb_tests) 464 { 465 struct efx_nic *efx = tx_queue->efx; 466 struct efx_loopback_state *state = efx->loopback_selftest; 467 struct sk_buff *skb; 468 int tx_done = 0, rx_good, rx_bad; 469 int i, rc = 0; 470 471 netif_tx_lock_bh(efx->net_dev); 472 473 /* Count the number of tx completions, and decrement the refcnt. Any 474 * skbs not already completed will be free'd when the queue is flushed */ 475 for (i = 0; i < state->packet_count; i++) { 476 skb = state->skbs[i]; 477 if (skb && !skb_shared(skb)) 478 ++tx_done; 479 dev_kfree_skb(skb); 480 } 481 482 netif_tx_unlock_bh(efx->net_dev); 483 484 /* Check TX completion and received packet counts */ 485 rx_good = atomic_read(&state->rx_good); 486 rx_bad = atomic_read(&state->rx_bad); 487 if (tx_done != state->packet_count) { 488 /* Don't free the skbs; they will be picked up on TX 489 * overflow or channel teardown. 490 */ 491 netif_err(efx, drv, efx->net_dev, 492 "TX queue %d saw only %d out of an expected %d " 493 "TX completion events in %s loopback test\n", 494 tx_queue->queue, tx_done, state->packet_count, 495 LOOPBACK_MODE(efx)); 496 rc = -ETIMEDOUT; 497 /* Allow to fall through so we see the RX errors as well */ 498 } 499 500 /* We may always be up to a flush away from our desired packet total */ 501 if (rx_good != state->packet_count) { 502 netif_dbg(efx, drv, efx->net_dev, 503 "TX queue %d saw only %d out of an expected %d " 504 "received packets in %s loopback test\n", 505 tx_queue->queue, rx_good, state->packet_count, 506 LOOPBACK_MODE(efx)); 507 rc = -ETIMEDOUT; 508 /* Fall through */ 509 } 510 511 /* Update loopback test structure */ 512 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; 513 lb_tests->tx_done[tx_queue->queue] += tx_done; 514 lb_tests->rx_good += rx_good; 515 lb_tests->rx_bad += rx_bad; 516 517 return rc; 518 } 519 520 static int 521 efx_test_loopback(struct efx_tx_queue *tx_queue, 522 struct efx_loopback_self_tests *lb_tests) 523 { 524 struct efx_nic *efx = tx_queue->efx; 525 struct efx_loopback_state *state = efx->loopback_selftest; 526 int i, begin_rc, end_rc; 527 528 for (i = 0; i < 3; i++) { 529 /* Determine how many packets to send */ 530 state->packet_count = efx->txq_entries / 3; 531 state->packet_count = min(1 << (i << 2), state->packet_count); 532 state->skbs = kcalloc(state->packet_count, 533 sizeof(state->skbs[0]), GFP_KERNEL); 534 if (!state->skbs) 535 return -ENOMEM; 536 state->flush = false; 537 538 netif_dbg(efx, drv, efx->net_dev, 539 "TX queue %d testing %s loopback with %d packets\n", 540 tx_queue->queue, LOOPBACK_MODE(efx), 541 state->packet_count); 542 543 efx_iterate_state(efx); 544 begin_rc = efx_begin_loopback(tx_queue); 545 546 /* This will normally complete very quickly, but be 547 * prepared to wait much longer. */ 548 msleep(1); 549 if (!efx_poll_loopback(efx)) { 550 msleep(LOOPBACK_TIMEOUT_MS); 551 efx_poll_loopback(efx); 552 } 553 554 end_rc = efx_end_loopback(tx_queue, lb_tests); 555 kfree(state->skbs); 556 557 if (begin_rc || end_rc) { 558 /* Wait a while to ensure there are no packets 559 * floating around after a failure. */ 560 schedule_timeout_uninterruptible(HZ / 10); 561 return begin_rc ? begin_rc : end_rc; 562 } 563 } 564 565 netif_dbg(efx, drv, efx->net_dev, 566 "TX queue %d passed %s loopback test with a burst length " 567 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 568 state->packet_count); 569 570 return 0; 571 } 572 573 /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but 574 * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it 575 * to delay and retry. Therefore, it's safer to just poll directly. Wait 576 * for link up and any faults to dissipate. */ 577 static int efx_wait_for_link(struct efx_nic *efx) 578 { 579 struct efx_link_state *link_state = &efx->link_state; 580 int count, link_up_count = 0; 581 bool link_up; 582 583 for (count = 0; count < 40; count++) { 584 schedule_timeout_uninterruptible(HZ / 10); 585 586 if (efx->type->monitor != NULL) { 587 mutex_lock(&efx->mac_lock); 588 efx->type->monitor(efx); 589 mutex_unlock(&efx->mac_lock); 590 } 591 592 mutex_lock(&efx->mac_lock); 593 link_up = link_state->up; 594 if (link_up) 595 link_up = !efx->type->check_mac_fault(efx); 596 mutex_unlock(&efx->mac_lock); 597 598 if (link_up) { 599 if (++link_up_count == 2) 600 return 0; 601 } else { 602 link_up_count = 0; 603 } 604 } 605 606 return -ETIMEDOUT; 607 } 608 609 static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, 610 unsigned int loopback_modes) 611 { 612 enum efx_loopback_mode mode; 613 struct efx_loopback_state *state; 614 struct efx_channel *channel = 615 efx_get_channel(efx, efx->tx_channel_offset); 616 struct efx_tx_queue *tx_queue; 617 int rc = 0; 618 619 /* Set the port loopback_selftest member. From this point on 620 * all received packets will be dropped. Mark the state as 621 * "flushing" so all inflight packets are dropped */ 622 state = kzalloc(sizeof(*state), GFP_KERNEL); 623 if (state == NULL) 624 return -ENOMEM; 625 BUG_ON(efx->loopback_selftest); 626 state->flush = true; 627 efx->loopback_selftest = state; 628 629 /* Test all supported loopback modes */ 630 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { 631 if (!(loopback_modes & (1 << mode))) 632 continue; 633 634 /* Move the port into the specified loopback mode. */ 635 state->flush = true; 636 mutex_lock(&efx->mac_lock); 637 efx->loopback_mode = mode; 638 rc = __efx_reconfigure_port(efx); 639 mutex_unlock(&efx->mac_lock); 640 if (rc) { 641 netif_err(efx, drv, efx->net_dev, 642 "unable to move into %s loopback\n", 643 LOOPBACK_MODE(efx)); 644 goto out; 645 } 646 647 rc = efx_wait_for_link(efx); 648 if (rc) { 649 netif_err(efx, drv, efx->net_dev, 650 "loopback %s never came up\n", 651 LOOPBACK_MODE(efx)); 652 goto out; 653 } 654 655 /* Test all enabled types of TX queue */ 656 efx_for_each_channel_tx_queue(tx_queue, channel) { 657 state->offload_csum = (tx_queue->queue & 658 EFX_TXQ_TYPE_OFFLOAD); 659 rc = efx_test_loopback(tx_queue, 660 &tests->loopback[mode]); 661 if (rc) 662 goto out; 663 } 664 } 665 666 out: 667 /* Remove the flush. The caller will remove the loopback setting */ 668 state->flush = true; 669 efx->loopback_selftest = NULL; 670 wmb(); 671 kfree(state); 672 673 if (rc == -EPERM) 674 rc = 0; 675 676 return rc; 677 } 678 679 /************************************************************************** 680 * 681 * Entry point 682 * 683 *************************************************************************/ 684 685 int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, 686 unsigned flags) 687 { 688 enum efx_loopback_mode loopback_mode = efx->loopback_mode; 689 int phy_mode = efx->phy_mode; 690 int rc_test = 0, rc_reset, rc; 691 692 efx_selftest_async_cancel(efx); 693 694 /* Online (i.e. non-disruptive) testing 695 * This checks interrupt generation, event delivery and PHY presence. */ 696 697 rc = efx_test_phy_alive(efx, tests); 698 if (rc && !rc_test) 699 rc_test = rc; 700 701 rc = efx_test_nvram(efx, tests); 702 if (rc && !rc_test) 703 rc_test = rc; 704 705 rc = efx_test_interrupts(efx, tests); 706 if (rc && !rc_test) 707 rc_test = rc; 708 709 rc = efx_test_eventq_irq(efx, tests); 710 if (rc && !rc_test) 711 rc_test = rc; 712 713 if (rc_test) 714 return rc_test; 715 716 if (!(flags & ETH_TEST_FL_OFFLINE)) 717 return efx_test_phy(efx, tests, flags); 718 719 /* Offline (i.e. disruptive) testing 720 * This checks MAC and PHY loopback on the specified port. */ 721 722 /* Detach the device so the kernel doesn't transmit during the 723 * loopback test and the watchdog timeout doesn't fire. 724 */ 725 efx_device_detach_sync(efx); 726 727 if (efx->type->test_chip) { 728 rc_reset = efx->type->test_chip(efx, tests); 729 if (rc_reset) { 730 netif_err(efx, hw, efx->net_dev, 731 "Unable to recover from chip test\n"); 732 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 733 return rc_reset; 734 } 735 736 if ((tests->memory < 0 || tests->registers < 0) && !rc_test) 737 rc_test = -EIO; 738 } 739 740 /* Ensure that the phy is powered and out of loopback 741 * for the bist and loopback tests */ 742 mutex_lock(&efx->mac_lock); 743 efx->phy_mode &= ~PHY_MODE_LOW_POWER; 744 efx->loopback_mode = LOOPBACK_NONE; 745 __efx_reconfigure_port(efx); 746 mutex_unlock(&efx->mac_lock); 747 748 rc = efx_test_phy(efx, tests, flags); 749 if (rc && !rc_test) 750 rc_test = rc; 751 752 rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); 753 if (rc && !rc_test) 754 rc_test = rc; 755 756 /* restore the PHY to the previous state */ 757 mutex_lock(&efx->mac_lock); 758 efx->phy_mode = phy_mode; 759 efx->loopback_mode = loopback_mode; 760 __efx_reconfigure_port(efx); 761 mutex_unlock(&efx->mac_lock); 762 763 netif_device_attach(efx->net_dev); 764 765 return rc_test; 766 } 767 768 void efx_selftest_async_start(struct efx_nic *efx) 769 { 770 struct efx_channel *channel; 771 772 efx_for_each_channel(channel, efx) 773 efx_nic_event_test_start(channel); 774 schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); 775 } 776 777 void efx_selftest_async_cancel(struct efx_nic *efx) 778 { 779 cancel_delayed_work_sync(&efx->selftest_work); 780 } 781 782 void efx_selftest_async_work(struct work_struct *data) 783 { 784 struct efx_nic *efx = container_of(data, struct efx_nic, 785 selftest_work.work); 786 struct efx_channel *channel; 787 int cpu; 788 789 efx_for_each_channel(channel, efx) { 790 cpu = efx_nic_event_test_irq_cpu(channel); 791 if (cpu < 0) 792 netif_err(efx, ifup, efx->net_dev, 793 "channel %d failed to trigger an interrupt\n", 794 channel->channel); 795 else 796 netif_dbg(efx, ifup, efx->net_dev, 797 "channel %d triggered interrupt on CPU %d\n", 798 channel->channel, cpu); 799 } 800 } 801