1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2006-2012 Solarflare Communications Inc. 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/module.h> 10 #include <linux/delay.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/pci.h> 13 #include <linux/ethtool.h> 14 #include <linux/ip.h> 15 #include <linux/in.h> 16 #include <linux/udp.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/slab.h> 19 #include "net_driver.h" 20 #include "efx.h" 21 #include "efx_common.h" 22 #include "efx_channels.h" 23 #include "nic.h" 24 #include "mcdi_port_common.h" 25 #include "selftest.h" 26 #include "workarounds.h" 27 28 /* IRQ latency can be enormous because: 29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a 30 * slow serial console or an old IDE driver doing error recovery 31 * - The PREEMPT_RT patches mostly deal with this, but also allow a 32 * tasklet or normal task to be given higher priority than our IRQ 33 * threads 34 * Try to avoid blaming the hardware for this. 35 */ 36 #define IRQ_TIMEOUT HZ 37 38 /* 39 * Loopback test packet structure 40 * 41 * The self-test should stress every RSS vector, and unfortunately 42 * Falcon only performs RSS on TCP/UDP packets. 43 */ 44 struct efx_loopback_payload { 45 char pad[2]; /* Ensures ip is 4-byte aligned */ 46 struct_group_attr(packet, __packed, 47 struct ethhdr header; 48 struct iphdr ip; 49 struct udphdr udp; 50 __be16 iteration; 51 char msg[64]; 52 ); 53 } __packed __aligned(4); 54 #define EFX_LOOPBACK_PAYLOAD_LEN \ 55 sizeof_field(struct efx_loopback_payload, packet) 56 57 /* Loopback test source MAC address */ 58 static const u8 payload_source[ETH_ALEN] __aligned(2) = { 59 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 60 }; 61 62 static const char payload_msg[] = 63 "Hello world! This is an Efx loopback test in progress!"; 64 65 /* Interrupt mode names */ 66 static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX; 67 static const char *const efx_siena_interrupt_mode_names[] = { 68 [EFX_INT_MODE_MSIX] = "MSI-X", 69 [EFX_INT_MODE_MSI] = "MSI", 70 [EFX_INT_MODE_LEGACY] = "legacy", 71 }; 72 #define INT_MODE(efx) \ 73 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode) 74 75 /** 76 * struct efx_loopback_state - persistent state during a loopback selftest 77 * @flush: Drop all packets in efx_siena_loopback_rx_packet 78 * @packet_count: Number of packets being used in this test 79 * @skbs: An array of skbs transmitted 80 * @offload_csum: Checksums are being offloaded 81 * @rx_good: RX good packet count 82 * @rx_bad: RX bad packet count 83 * @payload: Payload used in tests 84 */ 85 struct efx_loopback_state { 86 bool flush; 87 int packet_count; 88 struct sk_buff **skbs; 89 bool offload_csum; 90 atomic_t rx_good; 91 atomic_t rx_bad; 92 struct efx_loopback_payload payload; 93 }; 94 95 /* How long to wait for all the packets to arrive (in ms) */ 96 #define LOOPBACK_TIMEOUT_MS 1000 97 98 /************************************************************************** 99 * 100 * MII, NVRAM and register tests 101 * 102 **************************************************************************/ 103 104 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) 105 { 106 int rc = 0; 107 108 rc = efx_siena_mcdi_phy_test_alive(efx); 109 tests->phy_alive = rc ? -1 : 1; 110 111 return rc; 112 } 113 114 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) 115 { 116 int rc = 0; 117 118 if (efx->type->test_nvram) { 119 rc = efx->type->test_nvram(efx); 120 if (rc == -EPERM) 121 rc = 0; 122 else 123 tests->nvram = rc ? -1 : 1; 124 } 125 126 return rc; 127 } 128 129 /************************************************************************** 130 * 131 * Interrupt and event queue testing 132 * 133 **************************************************************************/ 134 135 /* Test generation and receipt of interrupts */ 136 static int efx_test_interrupts(struct efx_nic *efx, 137 struct efx_self_tests *tests) 138 { 139 unsigned long timeout, wait; 140 int cpu; 141 int rc; 142 143 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 144 tests->interrupt = -1; 145 146 rc = efx_siena_irq_test_start(efx); 147 if (rc == -ENOTSUPP) { 148 netif_dbg(efx, drv, efx->net_dev, 149 "direct interrupt testing not supported\n"); 150 tests->interrupt = 0; 151 return 0; 152 } 153 154 timeout = jiffies + IRQ_TIMEOUT; 155 wait = 1; 156 157 /* Wait for arrival of test interrupt. */ 158 netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); 159 do { 160 schedule_timeout_uninterruptible(wait); 161 cpu = efx_nic_irq_test_irq_cpu(efx); 162 if (cpu >= 0) 163 goto success; 164 wait *= 2; 165 } while (time_before(jiffies, timeout)); 166 167 netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); 168 return -ETIMEDOUT; 169 170 success: 171 netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", 172 INT_MODE(efx), cpu); 173 tests->interrupt = 1; 174 return 0; 175 } 176 177 /* Test generation and receipt of interrupting events */ 178 static int efx_test_eventq_irq(struct efx_nic *efx, 179 struct efx_self_tests *tests) 180 { 181 struct efx_channel *channel; 182 unsigned int read_ptr[EFX_MAX_CHANNELS]; 183 unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; 184 unsigned long timeout, wait; 185 186 BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG); 187 188 efx_for_each_channel(channel, efx) { 189 read_ptr[channel->channel] = channel->eventq_read_ptr; 190 set_bit(channel->channel, &dma_pend); 191 set_bit(channel->channel, &int_pend); 192 efx_siena_event_test_start(channel); 193 } 194 195 timeout = jiffies + IRQ_TIMEOUT; 196 wait = 1; 197 198 /* Wait for arrival of interrupts. NAPI processing may or may 199 * not complete in time, but we can cope in any case. 200 */ 201 do { 202 schedule_timeout_uninterruptible(wait); 203 204 efx_for_each_channel(channel, efx) { 205 efx_siena_stop_eventq(channel); 206 if (channel->eventq_read_ptr != 207 read_ptr[channel->channel]) { 208 set_bit(channel->channel, &napi_ran); 209 clear_bit(channel->channel, &dma_pend); 210 clear_bit(channel->channel, &int_pend); 211 } else { 212 if (efx_siena_event_present(channel)) 213 clear_bit(channel->channel, &dma_pend); 214 if (efx_nic_event_test_irq_cpu(channel) >= 0) 215 clear_bit(channel->channel, &int_pend); 216 } 217 efx_siena_start_eventq(channel); 218 } 219 220 wait *= 2; 221 } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); 222 223 efx_for_each_channel(channel, efx) { 224 bool dma_seen = !test_bit(channel->channel, &dma_pend); 225 bool int_seen = !test_bit(channel->channel, &int_pend); 226 227 tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; 228 tests->eventq_int[channel->channel] = int_seen ? 1 : -1; 229 230 if (dma_seen && int_seen) { 231 netif_dbg(efx, drv, efx->net_dev, 232 "channel %d event queue passed (with%s NAPI)\n", 233 channel->channel, 234 test_bit(channel->channel, &napi_ran) ? 235 "" : "out"); 236 } else { 237 /* Report failure and whether either interrupt or DMA 238 * worked 239 */ 240 netif_err(efx, drv, efx->net_dev, 241 "channel %d timed out waiting for event queue\n", 242 channel->channel); 243 if (int_seen) 244 netif_err(efx, drv, efx->net_dev, 245 "channel %d saw interrupt " 246 "during event queue test\n", 247 channel->channel); 248 if (dma_seen) 249 netif_err(efx, drv, efx->net_dev, 250 "channel %d event was generated, but " 251 "failed to trigger an interrupt\n", 252 channel->channel); 253 } 254 } 255 256 return (dma_pend || int_pend) ? -ETIMEDOUT : 0; 257 } 258 259 static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, 260 unsigned flags) 261 { 262 int rc; 263 264 mutex_lock(&efx->mac_lock); 265 rc = efx_siena_mcdi_phy_run_tests(efx, tests->phy_ext, flags); 266 mutex_unlock(&efx->mac_lock); 267 if (rc == -EPERM) 268 rc = 0; 269 else 270 netif_info(efx, drv, efx->net_dev, 271 "%s phy selftest\n", rc ? "Failed" : "Passed"); 272 273 return rc; 274 } 275 276 /************************************************************************** 277 * 278 * Loopback testing 279 * NB Only one loopback test can be executing concurrently. 280 * 281 **************************************************************************/ 282 283 /* Loopback test RX callback 284 * This is called for each received packet during loopback testing. 285 */ 286 void efx_siena_loopback_rx_packet(struct efx_nic *efx, 287 const char *buf_ptr, int pkt_len) 288 { 289 struct efx_loopback_state *state = efx->loopback_selftest; 290 struct efx_loopback_payload received; 291 struct efx_loopback_payload *payload; 292 293 BUG_ON(!buf_ptr); 294 295 /* If we are just flushing, then drop the packet */ 296 if ((state == NULL) || state->flush) 297 return; 298 299 payload = &state->payload; 300 301 memcpy(&received.packet, buf_ptr, 302 min_t(int, pkt_len, EFX_LOOPBACK_PAYLOAD_LEN)); 303 received.ip.saddr = payload->ip.saddr; 304 if (state->offload_csum) 305 received.ip.check = payload->ip.check; 306 307 /* Check that header exists */ 308 if (pkt_len < sizeof(received.header)) { 309 netif_err(efx, drv, efx->net_dev, 310 "saw runt RX packet (length %d) in %s loopback " 311 "test\n", pkt_len, LOOPBACK_MODE(efx)); 312 goto err; 313 } 314 315 /* Check that the ethernet header exists */ 316 if (memcmp(&received.header, &payload->header, ETH_HLEN) != 0) { 317 netif_err(efx, drv, efx->net_dev, 318 "saw non-loopback RX packet in %s loopback test\n", 319 LOOPBACK_MODE(efx)); 320 goto err; 321 } 322 323 /* Check packet length */ 324 if (pkt_len != EFX_LOOPBACK_PAYLOAD_LEN) { 325 netif_err(efx, drv, efx->net_dev, 326 "saw incorrect RX packet length %d (wanted %d) in " 327 "%s loopback test\n", pkt_len, 328 (int)EFX_LOOPBACK_PAYLOAD_LEN, LOOPBACK_MODE(efx)); 329 goto err; 330 } 331 332 /* Check that IP header matches */ 333 if (memcmp(&received.ip, &payload->ip, sizeof(payload->ip)) != 0) { 334 netif_err(efx, drv, efx->net_dev, 335 "saw corrupted IP header in %s loopback test\n", 336 LOOPBACK_MODE(efx)); 337 goto err; 338 } 339 340 /* Check that msg and padding matches */ 341 if (memcmp(&received.msg, &payload->msg, sizeof(received.msg)) != 0) { 342 netif_err(efx, drv, efx->net_dev, 343 "saw corrupted RX packet in %s loopback test\n", 344 LOOPBACK_MODE(efx)); 345 goto err; 346 } 347 348 /* Check that iteration matches */ 349 if (received.iteration != payload->iteration) { 350 netif_err(efx, drv, efx->net_dev, 351 "saw RX packet from iteration %d (wanted %d) in " 352 "%s loopback test\n", ntohs(received.iteration), 353 ntohs(payload->iteration), LOOPBACK_MODE(efx)); 354 goto err; 355 } 356 357 /* Increase correct RX count */ 358 netif_vdbg(efx, drv, efx->net_dev, 359 "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); 360 361 atomic_inc(&state->rx_good); 362 return; 363 364 err: 365 #ifdef DEBUG 366 if (atomic_read(&state->rx_bad) == 0) { 367 netif_err(efx, drv, efx->net_dev, "received packet:\n"); 368 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 369 buf_ptr, pkt_len, 0); 370 netif_err(efx, drv, efx->net_dev, "expected packet:\n"); 371 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 372 &state->payload.packet, EFX_LOOPBACK_PAYLOAD_LEN, 373 0); 374 } 375 #endif 376 atomic_inc(&state->rx_bad); 377 } 378 379 /* Initialise an efx_siena_selftest_state for a new iteration */ 380 static void efx_iterate_state(struct efx_nic *efx) 381 { 382 struct efx_loopback_state *state = efx->loopback_selftest; 383 struct net_device *net_dev = efx->net_dev; 384 struct efx_loopback_payload *payload = &state->payload; 385 386 /* Initialise the layerII header */ 387 ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); 388 ether_addr_copy((u8 *)&payload->header.h_source, payload_source); 389 payload->header.h_proto = htons(ETH_P_IP); 390 391 /* saddr set later and used as incrementing count */ 392 payload->ip.daddr = htonl(INADDR_LOOPBACK); 393 payload->ip.ihl = 5; 394 payload->ip.check = (__force __sum16) htons(0xdead); 395 payload->ip.tot_len = htons(sizeof(*payload) - 396 offsetof(struct efx_loopback_payload, ip)); 397 payload->ip.version = IPVERSION; 398 payload->ip.protocol = IPPROTO_UDP; 399 400 /* Initialise udp header */ 401 payload->udp.source = 0; 402 payload->udp.len = htons(sizeof(*payload) - 403 offsetof(struct efx_loopback_payload, udp)); 404 payload->udp.check = 0; /* checksum ignored */ 405 406 /* Fill out payload */ 407 payload->iteration = htons(ntohs(payload->iteration) + 1); 408 memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); 409 410 /* Fill out remaining state members */ 411 atomic_set(&state->rx_good, 0); 412 atomic_set(&state->rx_bad, 0); 413 smp_wmb(); 414 } 415 416 static int efx_begin_loopback(struct efx_tx_queue *tx_queue) 417 { 418 struct efx_nic *efx = tx_queue->efx; 419 struct efx_loopback_state *state = efx->loopback_selftest; 420 struct efx_loopback_payload *payload; 421 struct sk_buff *skb; 422 int i; 423 netdev_tx_t rc; 424 425 /* Transmit N copies of buffer */ 426 for (i = 0; i < state->packet_count; i++) { 427 /* Allocate an skb, holding an extra reference for 428 * transmit completion counting */ 429 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 430 if (!skb) 431 return -ENOMEM; 432 state->skbs[i] = skb; 433 skb_get(skb); 434 435 /* Copy the payload in, incrementing the source address to 436 * exercise the rss vectors */ 437 payload = skb_put(skb, sizeof(state->payload)); 438 memcpy(payload, &state->payload, sizeof(state->payload)); 439 payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); 440 /* Strip off the leading padding */ 441 skb_pull(skb, offsetof(struct efx_loopback_payload, header)); 442 /* Strip off the trailing padding */ 443 skb_trim(skb, EFX_LOOPBACK_PAYLOAD_LEN); 444 445 /* Ensure everything we've written is visible to the 446 * interrupt handler. */ 447 smp_wmb(); 448 449 netif_tx_lock_bh(efx->net_dev); 450 rc = efx_enqueue_skb(tx_queue, skb); 451 netif_tx_unlock_bh(efx->net_dev); 452 453 if (rc != NETDEV_TX_OK) { 454 netif_err(efx, drv, efx->net_dev, 455 "TX queue %d could not transmit packet %d of " 456 "%d in %s loopback test\n", tx_queue->label, 457 i + 1, state->packet_count, 458 LOOPBACK_MODE(efx)); 459 460 /* Defer cleaning up the other skbs for the caller */ 461 kfree_skb(skb); 462 return -EPIPE; 463 } 464 } 465 466 return 0; 467 } 468 469 static int efx_poll_loopback(struct efx_nic *efx) 470 { 471 struct efx_loopback_state *state = efx->loopback_selftest; 472 473 return atomic_read(&state->rx_good) == state->packet_count; 474 } 475 476 static int efx_end_loopback(struct efx_tx_queue *tx_queue, 477 struct efx_loopback_self_tests *lb_tests) 478 { 479 struct efx_nic *efx = tx_queue->efx; 480 struct efx_loopback_state *state = efx->loopback_selftest; 481 struct sk_buff *skb; 482 int tx_done = 0, rx_good, rx_bad; 483 int i, rc = 0; 484 485 netif_tx_lock_bh(efx->net_dev); 486 487 /* Count the number of tx completions, and decrement the refcnt. Any 488 * skbs not already completed will be free'd when the queue is flushed */ 489 for (i = 0; i < state->packet_count; i++) { 490 skb = state->skbs[i]; 491 if (skb && !skb_shared(skb)) 492 ++tx_done; 493 dev_kfree_skb(skb); 494 } 495 496 netif_tx_unlock_bh(efx->net_dev); 497 498 /* Check TX completion and received packet counts */ 499 rx_good = atomic_read(&state->rx_good); 500 rx_bad = atomic_read(&state->rx_bad); 501 if (tx_done != state->packet_count) { 502 /* Don't free the skbs; they will be picked up on TX 503 * overflow or channel teardown. 504 */ 505 netif_err(efx, drv, efx->net_dev, 506 "TX queue %d saw only %d out of an expected %d " 507 "TX completion events in %s loopback test\n", 508 tx_queue->label, tx_done, state->packet_count, 509 LOOPBACK_MODE(efx)); 510 rc = -ETIMEDOUT; 511 /* Allow to fall through so we see the RX errors as well */ 512 } 513 514 /* We may always be up to a flush away from our desired packet total */ 515 if (rx_good != state->packet_count) { 516 netif_dbg(efx, drv, efx->net_dev, 517 "TX queue %d saw only %d out of an expected %d " 518 "received packets in %s loopback test\n", 519 tx_queue->label, rx_good, state->packet_count, 520 LOOPBACK_MODE(efx)); 521 rc = -ETIMEDOUT; 522 /* Fall through */ 523 } 524 525 /* Update loopback test structure */ 526 lb_tests->tx_sent[tx_queue->label] += state->packet_count; 527 lb_tests->tx_done[tx_queue->label] += tx_done; 528 lb_tests->rx_good += rx_good; 529 lb_tests->rx_bad += rx_bad; 530 531 return rc; 532 } 533 534 static int 535 efx_test_loopback(struct efx_tx_queue *tx_queue, 536 struct efx_loopback_self_tests *lb_tests) 537 { 538 struct efx_nic *efx = tx_queue->efx; 539 struct efx_loopback_state *state = efx->loopback_selftest; 540 int i, begin_rc, end_rc; 541 542 for (i = 0; i < 3; i++) { 543 /* Determine how many packets to send */ 544 state->packet_count = efx->txq_entries / 3; 545 state->packet_count = min(1 << (i << 2), state->packet_count); 546 state->skbs = kcalloc(state->packet_count, 547 sizeof(state->skbs[0]), GFP_KERNEL); 548 if (!state->skbs) 549 return -ENOMEM; 550 state->flush = false; 551 552 netif_dbg(efx, drv, efx->net_dev, 553 "TX queue %d (hw %d) testing %s loopback with %d packets\n", 554 tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), 555 state->packet_count); 556 557 efx_iterate_state(efx); 558 begin_rc = efx_begin_loopback(tx_queue); 559 560 /* This will normally complete very quickly, but be 561 * prepared to wait much longer. */ 562 msleep(1); 563 if (!efx_poll_loopback(efx)) { 564 msleep(LOOPBACK_TIMEOUT_MS); 565 efx_poll_loopback(efx); 566 } 567 568 end_rc = efx_end_loopback(tx_queue, lb_tests); 569 kfree(state->skbs); 570 571 if (begin_rc || end_rc) { 572 /* Wait a while to ensure there are no packets 573 * floating around after a failure. */ 574 schedule_timeout_uninterruptible(HZ / 10); 575 return begin_rc ? begin_rc : end_rc; 576 } 577 } 578 579 netif_dbg(efx, drv, efx->net_dev, 580 "TX queue %d passed %s loopback test with a burst length " 581 "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx), 582 state->packet_count); 583 584 return 0; 585 } 586 587 /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but 588 * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it 589 * to delay and retry. Therefore, it's safer to just poll directly. Wait 590 * for link up and any faults to dissipate. */ 591 static int efx_wait_for_link(struct efx_nic *efx) 592 { 593 struct efx_link_state *link_state = &efx->link_state; 594 int count, link_up_count = 0; 595 bool link_up; 596 597 for (count = 0; count < 40; count++) { 598 schedule_timeout_uninterruptible(HZ / 10); 599 600 if (efx->type->monitor != NULL) { 601 mutex_lock(&efx->mac_lock); 602 efx->type->monitor(efx); 603 mutex_unlock(&efx->mac_lock); 604 } 605 606 mutex_lock(&efx->mac_lock); 607 link_up = link_state->up; 608 if (link_up) 609 link_up = !efx->type->check_mac_fault(efx); 610 mutex_unlock(&efx->mac_lock); 611 612 if (link_up) { 613 if (++link_up_count == 2) 614 return 0; 615 } else { 616 link_up_count = 0; 617 } 618 } 619 620 return -ETIMEDOUT; 621 } 622 623 static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, 624 unsigned int loopback_modes) 625 { 626 enum efx_loopback_mode mode; 627 struct efx_loopback_state *state; 628 struct efx_channel *channel = 629 efx_get_channel(efx, efx->tx_channel_offset); 630 struct efx_tx_queue *tx_queue; 631 int rc = 0; 632 633 /* Set the port loopback_selftest member. From this point on 634 * all received packets will be dropped. Mark the state as 635 * "flushing" so all inflight packets are dropped */ 636 state = kzalloc(sizeof(*state), GFP_KERNEL); 637 if (state == NULL) 638 return -ENOMEM; 639 BUG_ON(efx->loopback_selftest); 640 state->flush = true; 641 efx->loopback_selftest = state; 642 643 /* Test all supported loopback modes */ 644 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { 645 if (!(loopback_modes & (1 << mode))) 646 continue; 647 648 /* Move the port into the specified loopback mode. */ 649 state->flush = true; 650 mutex_lock(&efx->mac_lock); 651 efx->loopback_mode = mode; 652 rc = __efx_siena_reconfigure_port(efx); 653 mutex_unlock(&efx->mac_lock); 654 if (rc) { 655 netif_err(efx, drv, efx->net_dev, 656 "unable to move into %s loopback\n", 657 LOOPBACK_MODE(efx)); 658 goto out; 659 } 660 661 rc = efx_wait_for_link(efx); 662 if (rc) { 663 netif_err(efx, drv, efx->net_dev, 664 "loopback %s never came up\n", 665 LOOPBACK_MODE(efx)); 666 goto out; 667 } 668 669 /* Test all enabled types of TX queue */ 670 efx_for_each_channel_tx_queue(tx_queue, channel) { 671 state->offload_csum = (tx_queue->type & 672 EFX_TXQ_TYPE_OUTER_CSUM); 673 rc = efx_test_loopback(tx_queue, 674 &tests->loopback[mode]); 675 if (rc) 676 goto out; 677 } 678 } 679 680 out: 681 /* Remove the flush. The caller will remove the loopback setting */ 682 state->flush = true; 683 efx->loopback_selftest = NULL; 684 wmb(); 685 kfree(state); 686 687 if (rc == -EPERM) 688 rc = 0; 689 690 return rc; 691 } 692 693 /************************************************************************** 694 * 695 * Entry point 696 * 697 *************************************************************************/ 698 699 int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests, 700 unsigned int flags) 701 { 702 enum efx_loopback_mode loopback_mode = efx->loopback_mode; 703 int phy_mode = efx->phy_mode; 704 int rc_test = 0, rc_reset, rc; 705 706 efx_siena_selftest_async_cancel(efx); 707 708 /* Online (i.e. non-disruptive) testing 709 * This checks interrupt generation, event delivery and PHY presence. */ 710 711 rc = efx_test_phy_alive(efx, tests); 712 if (rc && !rc_test) 713 rc_test = rc; 714 715 rc = efx_test_nvram(efx, tests); 716 if (rc && !rc_test) 717 rc_test = rc; 718 719 rc = efx_test_interrupts(efx, tests); 720 if (rc && !rc_test) 721 rc_test = rc; 722 723 rc = efx_test_eventq_irq(efx, tests); 724 if (rc && !rc_test) 725 rc_test = rc; 726 727 if (rc_test) 728 return rc_test; 729 730 if (!(flags & ETH_TEST_FL_OFFLINE)) 731 return efx_test_phy(efx, tests, flags); 732 733 /* Offline (i.e. disruptive) testing 734 * This checks MAC and PHY loopback on the specified port. */ 735 736 /* Detach the device so the kernel doesn't transmit during the 737 * loopback test and the watchdog timeout doesn't fire. 738 */ 739 efx_device_detach_sync(efx); 740 741 if (efx->type->test_chip) { 742 rc_reset = efx->type->test_chip(efx, tests); 743 if (rc_reset) { 744 netif_err(efx, hw, efx->net_dev, 745 "Unable to recover from chip test\n"); 746 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); 747 return rc_reset; 748 } 749 750 if ((tests->memory < 0 || tests->registers < 0) && !rc_test) 751 rc_test = -EIO; 752 } 753 754 /* Ensure that the phy is powered and out of loopback 755 * for the bist and loopback tests */ 756 mutex_lock(&efx->mac_lock); 757 efx->phy_mode &= ~PHY_MODE_LOW_POWER; 758 efx->loopback_mode = LOOPBACK_NONE; 759 __efx_siena_reconfigure_port(efx); 760 mutex_unlock(&efx->mac_lock); 761 762 rc = efx_test_phy(efx, tests, flags); 763 if (rc && !rc_test) 764 rc_test = rc; 765 766 rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); 767 if (rc && !rc_test) 768 rc_test = rc; 769 770 /* restore the PHY to the previous state */ 771 mutex_lock(&efx->mac_lock); 772 efx->phy_mode = phy_mode; 773 efx->loopback_mode = loopback_mode; 774 __efx_siena_reconfigure_port(efx); 775 mutex_unlock(&efx->mac_lock); 776 777 efx_device_attach_if_not_resetting(efx); 778 779 return rc_test; 780 } 781 782 void efx_siena_selftest_async_start(struct efx_nic *efx) 783 { 784 struct efx_channel *channel; 785 786 efx_for_each_channel(channel, efx) 787 efx_siena_event_test_start(channel); 788 schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); 789 } 790 791 void efx_siena_selftest_async_cancel(struct efx_nic *efx) 792 { 793 cancel_delayed_work_sync(&efx->selftest_work); 794 } 795 796 static void efx_siena_selftest_async_work(struct work_struct *data) 797 { 798 struct efx_nic *efx = container_of(data, struct efx_nic, 799 selftest_work.work); 800 struct efx_channel *channel; 801 int cpu; 802 803 efx_for_each_channel(channel, efx) { 804 cpu = efx_nic_event_test_irq_cpu(channel); 805 if (cpu < 0) 806 netif_err(efx, ifup, efx->net_dev, 807 "channel %d failed to trigger an interrupt\n", 808 channel->channel); 809 else 810 netif_dbg(efx, ifup, efx->net_dev, 811 "channel %d triggered interrupt on CPU %d\n", 812 channel->channel, cpu); 813 } 814 } 815 816 void efx_siena_selftest_async_init(struct efx_nic *efx) 817 { 818 INIT_DELAYED_WORK(&efx->selftest_work, efx_siena_selftest_async_work); 819 } 820