1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2006-2012 Solarflare Communications Inc. 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/module.h> 10 #include <linux/delay.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/pci.h> 13 #include <linux/ethtool.h> 14 #include <linux/ip.h> 15 #include <linux/in.h> 16 #include <linux/udp.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/slab.h> 19 #include "net_driver.h" 20 #include "efx.h" 21 #include "efx_common.h" 22 #include "efx_channels.h" 23 #include "nic.h" 24 #include "mcdi_port_common.h" 25 #include "selftest.h" 26 #include "workarounds.h" 27 28 /* IRQ latency can be enormous because: 29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a 30 * slow serial console or an old IDE driver doing error recovery 31 * - The PREEMPT_RT patches mostly deal with this, but also allow a 32 * tasklet or normal task to be given higher priority than our IRQ 33 * threads 34 * Try to avoid blaming the hardware for this. 35 */ 36 #define IRQ_TIMEOUT HZ 37 38 /* 39 * Loopback test packet structure 40 * 41 * The self-test should stress every RSS vector. 42 */ 43 struct efx_loopback_payload { 44 char pad[2]; /* Ensures ip is 4-byte aligned */ 45 struct ethhdr header; 46 struct iphdr ip; 47 struct udphdr udp; 48 __be16 iteration; 49 char msg[64]; 50 } __packed __aligned(4); 51 #define EFX_LOOPBACK_PAYLOAD_LEN (sizeof(struct efx_loopback_payload) - \ 52 offsetof(struct efx_loopback_payload, \ 53 header)) 54 55 /* Loopback test source MAC address */ 56 static const u8 payload_source[ETH_ALEN] __aligned(2) = { 57 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 58 }; 59 60 static const char payload_msg[] = 61 "Hello world! This is an Efx loopback test in progress!"; 62 63 /* Interrupt mode names */ 64 static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; 65 static const char *const efx_interrupt_mode_names[] = { 66 [EFX_INT_MODE_MSIX] = "MSI-X", 67 [EFX_INT_MODE_MSI] = "MSI", 68 [EFX_INT_MODE_LEGACY] = "legacy", 69 }; 70 #define INT_MODE(efx) \ 71 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) 72 73 /** 74 * struct efx_loopback_state - persistent state during a loopback selftest 75 * @flush: Drop all packets in efx_loopback_rx_packet 76 * @packet_count: Number of packets being used in this test 77 * @skbs: An array of skbs transmitted 78 * @offload_csum: Checksums are being offloaded 79 * @rx_good: RX good packet count 80 * @rx_bad: RX bad packet count 81 * @payload: Payload used in tests 82 */ 83 struct efx_loopback_state { 84 bool flush; 85 int packet_count; 86 struct sk_buff **skbs; 87 bool offload_csum; 88 atomic_t rx_good; 89 atomic_t rx_bad; 90 struct efx_loopback_payload payload; 91 }; 92 93 /* How long to wait for all the packets to arrive (in ms) */ 94 #define LOOPBACK_TIMEOUT_MS 1000 95 96 /************************************************************************** 97 * 98 * MII, NVRAM and register tests 99 * 100 **************************************************************************/ 101 102 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) 103 { 104 int rc = 0; 105 106 rc = efx_mcdi_phy_test_alive(efx); 107 tests->phy_alive = rc ? -1 : 1; 108 109 return rc; 110 } 111 112 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) 113 { 114 int rc = 0; 115 116 if (efx->type->test_nvram) { 117 rc = efx->type->test_nvram(efx); 118 if (rc == -EPERM) 119 rc = 0; 120 else 121 tests->nvram = rc ? -1 : 1; 122 } 123 124 return rc; 125 } 126 127 /************************************************************************** 128 * 129 * Interrupt and event queue testing 130 * 131 **************************************************************************/ 132 133 /* Test generation and receipt of interrupts */ 134 static int efx_test_interrupts(struct efx_nic *efx, 135 struct efx_self_tests *tests) 136 { 137 unsigned long timeout, wait; 138 int cpu; 139 int rc; 140 141 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 142 tests->interrupt = -1; 143 144 rc = efx_nic_irq_test_start(efx); 145 if (rc == -ENOTSUPP) { 146 netif_dbg(efx, drv, efx->net_dev, 147 "direct interrupt testing not supported\n"); 148 tests->interrupt = 0; 149 return 0; 150 } 151 152 timeout = jiffies + IRQ_TIMEOUT; 153 wait = 1; 154 155 /* Wait for arrival of test interrupt. */ 156 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); 157 do { 158 schedule_timeout_uninterruptible(wait); 159 cpu = efx_nic_irq_test_irq_cpu(efx); 160 if (cpu >= 0) 161 goto success; 162 wait *= 2; 163 } while (time_before(jiffies, timeout)); 164 165 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); 166 return -ETIMEDOUT; 167 168 success: 169 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", 170 INT_MODE(efx), cpu); 171 tests->interrupt = 1; 172 return 0; 173 } 174 175 /* Test generation and receipt of interrupting events */ 176 static int efx_test_eventq_irq(struct efx_nic *efx, 177 struct efx_self_tests *tests) 178 { 179 struct efx_channel *channel; 180 unsigned int read_ptr[EFX_MAX_CHANNELS]; 181 unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; 182 unsigned long timeout, wait; 183 184 BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG); 185 186 efx_for_each_channel(channel, efx) { 187 read_ptr[channel->channel] = channel->eventq_read_ptr; 188 set_bit(channel->channel, &dma_pend); 189 set_bit(channel->channel, &int_pend); 190 efx_nic_event_test_start(channel); 191 } 192 193 timeout = jiffies + IRQ_TIMEOUT; 194 wait = 1; 195 196 /* Wait for arrival of interrupts. NAPI processing may or may 197 * not complete in time, but we can cope in any case. 198 */ 199 do { 200 schedule_timeout_uninterruptible(wait); 201 202 efx_for_each_channel(channel, efx) { 203 efx_stop_eventq(channel); 204 if (channel->eventq_read_ptr != 205 read_ptr[channel->channel]) { 206 set_bit(channel->channel, &napi_ran); 207 clear_bit(channel->channel, &dma_pend); 208 clear_bit(channel->channel, &int_pend); 209 } else { 210 if (efx_nic_event_present(channel)) 211 clear_bit(channel->channel, &dma_pend); 212 if (efx_nic_event_test_irq_cpu(channel) >= 0) 213 clear_bit(channel->channel, &int_pend); 214 } 215 efx_start_eventq(channel); 216 } 217 218 wait *= 2; 219 } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); 220 221 efx_for_each_channel(channel, efx) { 222 bool dma_seen = !test_bit(channel->channel, &dma_pend); 223 bool int_seen = !test_bit(channel->channel, &int_pend); 224 225 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; 226 tests->eventq_int[channel->channel] = int_seen ? 1 : -1; 227 228 if (dma_seen && int_seen) { 229 netif_dbg(efx, drv, efx->net_dev, 230 "channel %d event queue passed (with%s NAPI)\n", 231 channel->channel, 232 test_bit(channel->channel, &napi_ran) ? 233 "" : "out"); 234 } else { 235 /* Report failure and whether either interrupt or DMA 236 * worked 237 */ 238 netif_err(efx, drv, efx->net_dev, 239 "channel %d timed out waiting for event queue\n", 240 channel->channel); 241 if (int_seen) 242 netif_err(efx, drv, efx->net_dev, 243 "channel %d saw interrupt " 244 "during event queue test\n", 245 channel->channel); 246 if (dma_seen) 247 netif_err(efx, drv, efx->net_dev, 248 "channel %d event was generated, but " 249 "failed to trigger an interrupt\n", 250 channel->channel); 251 } 252 } 253 254 return (dma_pend || int_pend) ? -ETIMEDOUT : 0; 255 } 256 257 static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, 258 unsigned flags) 259 { 260 int rc; 261 262 mutex_lock(&efx->mac_lock); 263 rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags); 264 mutex_unlock(&efx->mac_lock); 265 if (rc == -EPERM) 266 rc = 0; 267 else 268 netif_info(efx, drv, efx->net_dev, 269 "%s phy selftest\n", rc ? "Failed" : "Passed"); 270 271 return rc; 272 } 273 274 /************************************************************************** 275 * 276 * Loopback testing 277 * NB Only one loopback test can be executing concurrently. 278 * 279 **************************************************************************/ 280 281 /* Loopback test RX callback 282 * This is called for each received packet during loopback testing. 283 */ 284 void efx_loopback_rx_packet(struct efx_nic *efx, 285 const char *buf_ptr, int pkt_len) 286 { 287 struct efx_loopback_state *state = efx->loopback_selftest; 288 struct efx_loopback_payload received; 289 struct efx_loopback_payload *payload; 290 291 BUG_ON(!buf_ptr); 292 293 /* If we are just flushing, then drop the packet */ 294 if ((state == NULL) || state->flush) 295 return; 296 297 payload = &state->payload; 298 299 memcpy(&received.header, buf_ptr, 300 min_t(int, pkt_len, EFX_LOOPBACK_PAYLOAD_LEN)); 301 received.ip.saddr = payload->ip.saddr; 302 if (state->offload_csum) 303 received.ip.check = payload->ip.check; 304 305 /* Check that header exists */ 306 if (pkt_len < sizeof(received.header)) { 307 netif_err(efx, drv, efx->net_dev, 308 "saw runt RX packet (length %d) in %s loopback " 309 "test\n", pkt_len, LOOPBACK_MODE(efx)); 310 goto err; 311 } 312 313 /* Check that the ethernet header exists */ 314 if (memcmp(&received.header, &payload->header, ETH_HLEN) != 0) { 315 netif_err(efx, drv, efx->net_dev, 316 "saw non-loopback RX packet in %s loopback test\n", 317 LOOPBACK_MODE(efx)); 318 goto err; 319 } 320 321 /* Check packet length */ 322 if (pkt_len != EFX_LOOPBACK_PAYLOAD_LEN) { 323 netif_err(efx, drv, efx->net_dev, 324 "saw incorrect RX packet length %d (wanted %d) in " 325 "%s loopback test\n", pkt_len, 326 (int)EFX_LOOPBACK_PAYLOAD_LEN, LOOPBACK_MODE(efx)); 327 goto err; 328 } 329 330 /* Check that IP header matches */ 331 if (memcmp(&received.ip, &payload->ip, sizeof(payload->ip)) != 0) { 332 netif_err(efx, drv, efx->net_dev, 333 "saw corrupted IP header in %s loopback test\n", 334 LOOPBACK_MODE(efx)); 335 goto err; 336 } 337 338 /* Check that msg and padding matches */ 339 if (memcmp(&received.msg, &payload->msg, sizeof(received.msg)) != 0) { 340 netif_err(efx, drv, efx->net_dev, 341 "saw corrupted RX packet in %s loopback test\n", 342 LOOPBACK_MODE(efx)); 343 goto err; 344 } 345 346 /* Check that iteration matches */ 347 if (received.iteration != payload->iteration) { 348 netif_err(efx, drv, efx->net_dev, 349 "saw RX packet from iteration %d (wanted %d) in " 350 "%s loopback test\n", ntohs(received.iteration), 351 ntohs(payload->iteration), LOOPBACK_MODE(efx)); 352 goto err; 353 } 354 355 /* Increase correct RX count */ 356 netif_vdbg(efx, drv, efx->net_dev, 357 "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); 358 359 atomic_inc(&state->rx_good); 360 return; 361 362 err: 363 #ifdef DEBUG 364 if (atomic_read(&state->rx_bad) == 0) { 365 netif_err(efx, drv, efx->net_dev, "received packet:\n"); 366 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 367 buf_ptr, pkt_len, 0); 368 netif_err(efx, drv, efx->net_dev, "expected packet:\n"); 369 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 370 &state->payload.header, EFX_LOOPBACK_PAYLOAD_LEN, 371 0); 372 } 373 #endif 374 atomic_inc(&state->rx_bad); 375 } 376 377 /* Initialise an efx_selftest_state for a new iteration */ 378 static void efx_iterate_state(struct efx_nic *efx) 379 { 380 struct efx_loopback_state *state = efx->loopback_selftest; 381 struct net_device *net_dev = efx->net_dev; 382 struct efx_loopback_payload *payload = &state->payload; 383 384 /* Initialise the layerII header */ 385 ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); 386 ether_addr_copy((u8 *)&payload->header.h_source, payload_source); 387 payload->header.h_proto = htons(ETH_P_IP); 388 389 /* saddr set later and used as incrementing count */ 390 payload->ip.daddr = htonl(INADDR_LOOPBACK); 391 payload->ip.ihl = 5; 392 payload->ip.check = (__force __sum16) htons(0xdead); 393 payload->ip.tot_len = htons(sizeof(*payload) - 394 offsetof(struct efx_loopback_payload, ip)); 395 payload->ip.version = IPVERSION; 396 payload->ip.protocol = IPPROTO_UDP; 397 398 /* Initialise udp header */ 399 payload->udp.source = 0; 400 payload->udp.len = htons(sizeof(*payload) - 401 offsetof(struct efx_loopback_payload, udp)); 402 payload->udp.check = 0; /* checksum ignored */ 403 404 /* Fill out payload */ 405 payload->iteration = htons(ntohs(payload->iteration) + 1); 406 memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); 407 408 /* Fill out remaining state members */ 409 atomic_set(&state->rx_good, 0); 410 atomic_set(&state->rx_bad, 0); 411 smp_wmb(); 412 } 413 414 static int efx_begin_loopback(struct efx_tx_queue *tx_queue) 415 { 416 struct efx_nic *efx = tx_queue->efx; 417 struct efx_loopback_state *state = efx->loopback_selftest; 418 struct efx_loopback_payload *payload; 419 struct sk_buff *skb; 420 int i; 421 netdev_tx_t rc; 422 423 /* Transmit N copies of buffer */ 424 for (i = 0; i < state->packet_count; i++) { 425 /* Allocate an skb, holding an extra reference for 426 * transmit completion counting */ 427 skb = alloc_skb(EFX_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); 428 if (!skb) 429 return -ENOMEM; 430 state->skbs[i] = skb; 431 skb_get(skb); 432 433 /* Copy the payload in, incrementing the source address to 434 * exercise the rss vectors */ 435 payload = skb_put(skb, sizeof(state->payload)); 436 memcpy(payload, &state->payload, sizeof(state->payload)); 437 payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); 438 /* Strip off the leading padding */ 439 skb_pull(skb, offsetof(struct efx_loopback_payload, header)); 440 441 /* Ensure everything we've written is visible to the 442 * interrupt handler. */ 443 smp_wmb(); 444 445 netif_tx_lock_bh(efx->net_dev); 446 rc = efx_enqueue_skb(tx_queue, skb); 447 netif_tx_unlock_bh(efx->net_dev); 448 449 if (rc != NETDEV_TX_OK) { 450 netif_err(efx, drv, efx->net_dev, 451 "TX queue %d could not transmit packet %d of " 452 "%d in %s loopback test\n", tx_queue->label, 453 i + 1, state->packet_count, 454 LOOPBACK_MODE(efx)); 455 456 /* Defer cleaning up the other skbs for the caller */ 457 kfree_skb(skb); 458 return -EPIPE; 459 } 460 } 461 462 return 0; 463 } 464 465 static int efx_poll_loopback(struct efx_nic *efx) 466 { 467 struct efx_loopback_state *state = efx->loopback_selftest; 468 469 return atomic_read(&state->rx_good) == state->packet_count; 470 } 471 472 static int efx_end_loopback(struct efx_tx_queue *tx_queue, 473 struct efx_loopback_self_tests *lb_tests) 474 { 475 struct efx_nic *efx = tx_queue->efx; 476 struct efx_loopback_state *state = efx->loopback_selftest; 477 struct sk_buff *skb; 478 int tx_done = 0, rx_good, rx_bad; 479 int i, rc = 0; 480 481 netif_tx_lock_bh(efx->net_dev); 482 483 /* Count the number of tx completions, and decrement the refcnt. Any 484 * skbs not already completed will be free'd when the queue is flushed */ 485 for (i = 0; i < state->packet_count; i++) { 486 skb = state->skbs[i]; 487 if (skb && !skb_shared(skb)) 488 ++tx_done; 489 dev_kfree_skb(skb); 490 } 491 492 netif_tx_unlock_bh(efx->net_dev); 493 494 /* Check TX completion and received packet counts */ 495 rx_good = atomic_read(&state->rx_good); 496 rx_bad = atomic_read(&state->rx_bad); 497 if (tx_done != state->packet_count) { 498 /* Don't free the skbs; they will be picked up on TX 499 * overflow or channel teardown. 500 */ 501 netif_err(efx, drv, efx->net_dev, 502 "TX queue %d saw only %d out of an expected %d " 503 "TX completion events in %s loopback test\n", 504 tx_queue->label, tx_done, state->packet_count, 505 LOOPBACK_MODE(efx)); 506 rc = -ETIMEDOUT; 507 /* Allow to fall through so we see the RX errors as well */ 508 } 509 510 /* We may always be up to a flush away from our desired packet total */ 511 if (rx_good != state->packet_count) { 512 netif_dbg(efx, drv, efx->net_dev, 513 "TX queue %d saw only %d out of an expected %d " 514 "received packets in %s loopback test\n", 515 tx_queue->label, rx_good, state->packet_count, 516 LOOPBACK_MODE(efx)); 517 rc = -ETIMEDOUT; 518 /* Fall through */ 519 } 520 521 /* Update loopback test structure */ 522 lb_tests->tx_sent[tx_queue->label] += state->packet_count; 523 lb_tests->tx_done[tx_queue->label] += tx_done; 524 lb_tests->rx_good += rx_good; 525 lb_tests->rx_bad += rx_bad; 526 527 return rc; 528 } 529 530 static int 531 efx_test_loopback(struct efx_tx_queue *tx_queue, 532 struct efx_loopback_self_tests *lb_tests) 533 { 534 struct efx_nic *efx = tx_queue->efx; 535 struct efx_loopback_state *state = efx->loopback_selftest; 536 int i, begin_rc, end_rc; 537 538 for (i = 0; i < 3; i++) { 539 /* Determine how many packets to send */ 540 state->packet_count = efx->txq_entries / 3; 541 state->packet_count = min(1 << (i << 2), state->packet_count); 542 state->skbs = kcalloc(state->packet_count, 543 sizeof(state->skbs[0]), GFP_KERNEL); 544 if (!state->skbs) 545 return -ENOMEM; 546 state->flush = false; 547 548 netif_dbg(efx, drv, efx->net_dev, 549 "TX queue %d (hw %d) testing %s loopback with %d packets\n", 550 tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), 551 state->packet_count); 552 553 efx_iterate_state(efx); 554 begin_rc = efx_begin_loopback(tx_queue); 555 556 /* This will normally complete very quickly, but be 557 * prepared to wait much longer. */ 558 msleep(1); 559 if (!efx_poll_loopback(efx)) { 560 msleep(LOOPBACK_TIMEOUT_MS); 561 efx_poll_loopback(efx); 562 } 563 564 end_rc = efx_end_loopback(tx_queue, lb_tests); 565 kfree(state->skbs); 566 567 if (begin_rc || end_rc) { 568 /* Wait a while to ensure there are no packets 569 * floating around after a failure. */ 570 schedule_timeout_uninterruptible(HZ / 10); 571 return begin_rc ? begin_rc : end_rc; 572 } 573 } 574 575 netif_dbg(efx, drv, efx->net_dev, 576 "TX queue %d passed %s loopback test with a burst length " 577 "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx), 578 state->packet_count); 579 580 return 0; 581 } 582 583 static int efx_wait_for_link(struct efx_nic *efx) 584 { 585 struct efx_link_state *link_state = &efx->link_state; 586 int count, link_up_count = 0; 587 bool link_up; 588 589 for (count = 0; count < 40; count++) { 590 schedule_timeout_uninterruptible(HZ / 10); 591 592 if (efx->type->monitor != NULL) { 593 mutex_lock(&efx->mac_lock); 594 efx->type->monitor(efx); 595 mutex_unlock(&efx->mac_lock); 596 } 597 598 mutex_lock(&efx->mac_lock); 599 link_up = link_state->up; 600 if (link_up) 601 link_up = !efx->type->check_mac_fault(efx); 602 mutex_unlock(&efx->mac_lock); 603 604 if (link_up) { 605 if (++link_up_count == 2) 606 return 0; 607 } else { 608 link_up_count = 0; 609 } 610 } 611 612 return -ETIMEDOUT; 613 } 614 615 static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, 616 unsigned int loopback_modes) 617 { 618 enum efx_loopback_mode mode; 619 struct efx_loopback_state *state; 620 struct efx_channel *channel = 621 efx_get_channel(efx, efx->tx_channel_offset); 622 struct efx_tx_queue *tx_queue; 623 int rc = 0; 624 625 /* Set the port loopback_selftest member. From this point on 626 * all received packets will be dropped. Mark the state as 627 * "flushing" so all inflight packets are dropped */ 628 state = kzalloc(sizeof(*state), GFP_KERNEL); 629 if (state == NULL) 630 return -ENOMEM; 631 BUG_ON(efx->loopback_selftest); 632 state->flush = true; 633 efx->loopback_selftest = state; 634 635 /* Test all supported loopback modes */ 636 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { 637 if (!(loopback_modes & (1 << mode))) 638 continue; 639 640 /* Move the port into the specified loopback mode. */ 641 state->flush = true; 642 mutex_lock(&efx->mac_lock); 643 efx->loopback_mode = mode; 644 rc = __efx_reconfigure_port(efx); 645 mutex_unlock(&efx->mac_lock); 646 if (rc) { 647 netif_err(efx, drv, efx->net_dev, 648 "unable to move into %s loopback\n", 649 LOOPBACK_MODE(efx)); 650 goto out; 651 } 652 653 rc = efx_wait_for_link(efx); 654 if (rc) { 655 netif_err(efx, drv, efx->net_dev, 656 "loopback %s never came up\n", 657 LOOPBACK_MODE(efx)); 658 goto out; 659 } 660 661 /* Test all enabled types of TX queue */ 662 efx_for_each_channel_tx_queue(tx_queue, channel) { 663 state->offload_csum = (tx_queue->type & 664 EFX_TXQ_TYPE_OUTER_CSUM); 665 rc = efx_test_loopback(tx_queue, 666 &tests->loopback[mode]); 667 if (rc) 668 goto out; 669 } 670 } 671 672 out: 673 /* Remove the flush. The caller will remove the loopback setting */ 674 state->flush = true; 675 efx->loopback_selftest = NULL; 676 wmb(); 677 kfree(state); 678 679 if (rc == -EPERM) 680 rc = 0; 681 682 return rc; 683 } 684 685 /************************************************************************** 686 * 687 * Entry point 688 * 689 *************************************************************************/ 690 691 int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, 692 unsigned flags) 693 { 694 enum efx_loopback_mode loopback_mode = efx->loopback_mode; 695 int phy_mode = efx->phy_mode; 696 int rc_test = 0, rc_reset, rc; 697 698 efx_selftest_async_cancel(efx); 699 700 /* Online (i.e. non-disruptive) testing 701 * This checks interrupt generation, event delivery and PHY presence. */ 702 703 rc = efx_test_phy_alive(efx, tests); 704 if (rc && !rc_test) 705 rc_test = rc; 706 707 rc = efx_test_nvram(efx, tests); 708 if (rc && !rc_test) 709 rc_test = rc; 710 711 rc = efx_test_interrupts(efx, tests); 712 if (rc && !rc_test) 713 rc_test = rc; 714 715 rc = efx_test_eventq_irq(efx, tests); 716 if (rc && !rc_test) 717 rc_test = rc; 718 719 if (rc_test) 720 return rc_test; 721 722 if (!(flags & ETH_TEST_FL_OFFLINE)) 723 return efx_test_phy(efx, tests, flags); 724 725 /* Offline (i.e. disruptive) testing 726 * This checks MAC and PHY loopback on the specified port. */ 727 728 /* Detach the device so the kernel doesn't transmit during the 729 * loopback test and the watchdog timeout doesn't fire. 730 */ 731 efx_device_detach_sync(efx); 732 733 if (efx->type->test_chip) { 734 rc_reset = efx->type->test_chip(efx, tests); 735 if (rc_reset) { 736 netif_err(efx, hw, efx->net_dev, 737 "Unable to recover from chip test\n"); 738 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 739 return rc_reset; 740 } 741 742 if ((tests->memory < 0 || tests->registers < 0) && !rc_test) 743 rc_test = -EIO; 744 } 745 746 /* Ensure that the phy is powered and out of loopback 747 * for the bist and loopback tests */ 748 mutex_lock(&efx->mac_lock); 749 efx->phy_mode &= ~PHY_MODE_LOW_POWER; 750 efx->loopback_mode = LOOPBACK_NONE; 751 __efx_reconfigure_port(efx); 752 mutex_unlock(&efx->mac_lock); 753 754 rc = efx_test_phy(efx, tests, flags); 755 if (rc && !rc_test) 756 rc_test = rc; 757 758 rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); 759 if (rc && !rc_test) 760 rc_test = rc; 761 762 /* restore the PHY to the previous state */ 763 mutex_lock(&efx->mac_lock); 764 efx->phy_mode = phy_mode; 765 efx->loopback_mode = loopback_mode; 766 __efx_reconfigure_port(efx); 767 mutex_unlock(&efx->mac_lock); 768 769 efx_device_attach_if_not_resetting(efx); 770 771 return rc_test; 772 } 773 774 void efx_selftest_async_start(struct efx_nic *efx) 775 { 776 struct efx_channel *channel; 777 778 efx_for_each_channel(channel, efx) 779 efx_nic_event_test_start(channel); 780 schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); 781 } 782 783 void efx_selftest_async_cancel(struct efx_nic *efx) 784 { 785 cancel_delayed_work_sync(&efx->selftest_work); 786 } 787 788 static void efx_selftest_async_work(struct work_struct *data) 789 { 790 struct efx_nic *efx = container_of(data, struct efx_nic, 791 selftest_work.work); 792 struct efx_channel *channel; 793 int cpu; 794 795 efx_for_each_channel(channel, efx) { 796 cpu = efx_nic_event_test_irq_cpu(channel); 797 if (cpu < 0) 798 netif_err(efx, ifup, efx->net_dev, 799 "channel %d failed to trigger an interrupt\n", 800 channel->channel); 801 else 802 netif_dbg(efx, ifup, efx->net_dev, 803 "channel %d triggered interrupt on CPU %d\n", 804 channel->channel, cpu); 805 } 806 } 807 808 void efx_selftest_async_init(struct efx_nic *efx) 809 { 810 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); 811 } 812