1 /* Intel Ethernet Switch Host Interface Driver 2 * Copyright(c) 2013 - 2015 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 * 16 * Contact Information: 17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 19 */ 20 21 #include <linux/module.h> 22 #include <linux/aer.h> 23 24 #include "fm10k.h" 25 26 static const struct fm10k_info *fm10k_info_tbl[] = { 27 [fm10k_device_pf] = &fm10k_pf_info, 28 [fm10k_device_vf] = &fm10k_vf_info, 29 }; 30 31 /** 32 * fm10k_pci_tbl - PCI Device ID Table 33 * 34 * Wildcard entries (PCI_ANY_ID) should come last 35 * Last entry must be all 0s 36 * 37 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 38 * Class, Class Mask, private data (not used) } 39 */ 40 static const struct pci_device_id fm10k_pci_tbl[] = { 41 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf }, 42 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf }, 43 /* required last entry */ 44 { 0, } 45 }; 46 MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl); 47 48 u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg) 49 { 50 struct fm10k_intfc *interface = hw->back; 51 u16 value = 0; 52 53 if (FM10K_REMOVED(hw->hw_addr)) 54 return ~value; 55 56 pci_read_config_word(interface->pdev, reg, &value); 57 if (value == 0xFFFF) 58 fm10k_write_flush(hw); 59 60 return value; 61 } 62 63 u32 fm10k_read_reg(struct fm10k_hw *hw, int reg) 64 { 65 u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); 66 u32 value = 0; 67 68 if (FM10K_REMOVED(hw_addr)) 69 return ~value; 70 71 value = readl(&hw_addr[reg]); 72 if (!(~value) && (!reg || !(~readl(hw_addr)))) { 73 struct fm10k_intfc *interface = hw->back; 74 struct net_device *netdev = interface->netdev; 75 76 hw->hw_addr = NULL; 77 netif_device_detach(netdev); 78 netdev_err(netdev, "PCIe link lost, device now detached\n"); 79 } 80 81 return value; 82 } 83 84 static int fm10k_hw_ready(struct fm10k_intfc *interface) 85 { 86 struct fm10k_hw *hw = &interface->hw; 87 88 fm10k_write_flush(hw); 89 90 return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0; 91 } 92 93 void fm10k_service_event_schedule(struct fm10k_intfc *interface) 94 { 95 if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) && 96 !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state)) 97 queue_work(fm10k_workqueue, &interface->service_task); 98 } 99 100 static void fm10k_service_event_complete(struct fm10k_intfc *interface) 101 { 102 BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); 103 104 /* flush memory to make sure state is correct before next watchog */ 105 smp_mb__before_atomic(); 106 clear_bit(__FM10K_SERVICE_SCHED, &interface->state); 107 } 108 109 /** 110 * fm10k_service_timer - Timer Call-back 111 * @data: pointer to interface cast into an unsigned long 112 **/ 113 static void fm10k_service_timer(unsigned long data) 114 { 115 struct fm10k_intfc *interface = (struct fm10k_intfc *)data; 116 117 /* Reset the timer */ 118 mod_timer(&interface->service_timer, (HZ * 2) + jiffies); 119 120 fm10k_service_event_schedule(interface); 121 } 122 123 static void fm10k_detach_subtask(struct fm10k_intfc *interface) 124 { 125 struct net_device *netdev = interface->netdev; 126 127 /* do nothing if device is still present or hw_addr is set */ 128 if (netif_device_present(netdev) || interface->hw.hw_addr) 129 return; 130 131 rtnl_lock(); 132 133 if (netif_running(netdev)) 134 dev_close(netdev); 135 136 rtnl_unlock(); 137 } 138 139 static void fm10k_reinit(struct fm10k_intfc *interface) 140 { 141 struct net_device *netdev = interface->netdev; 142 struct fm10k_hw *hw = &interface->hw; 143 int err; 144 145 WARN_ON(in_interrupt()); 146 147 /* put off any impending NetWatchDogTimeout */ 148 netdev->trans_start = jiffies; 149 150 while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) 151 usleep_range(1000, 2000); 152 153 rtnl_lock(); 154 155 fm10k_iov_suspend(interface->pdev); 156 157 if (netif_running(netdev)) 158 fm10k_close(netdev); 159 160 fm10k_mbx_free_irq(interface); 161 162 /* free interrupts */ 163 fm10k_clear_queueing_scheme(interface); 164 165 /* delay any future reset requests */ 166 interface->last_reset = jiffies + (10 * HZ); 167 168 /* reset and initialize the hardware so it is in a known state */ 169 err = hw->mac.ops.reset_hw(hw); 170 if (err) { 171 dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err); 172 goto reinit_err; 173 } 174 175 err = hw->mac.ops.init_hw(hw); 176 if (err) { 177 dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); 178 goto reinit_err; 179 } 180 181 err = fm10k_init_queueing_scheme(interface); 182 if (err) { 183 dev_err(&interface->pdev->dev, 184 "init_queueing_scheme failed: %d\n", err); 185 goto reinit_err; 186 } 187 188 /* reassociate interrupts */ 189 err = fm10k_mbx_request_irq(interface); 190 if (err) 191 goto err_mbx_irq; 192 193 err = fm10k_hw_ready(interface); 194 if (err) 195 goto err_open; 196 197 /* update hardware address for VFs if perm_addr has changed */ 198 if (hw->mac.type == fm10k_mac_vf) { 199 if (is_valid_ether_addr(hw->mac.perm_addr)) { 200 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); 201 ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); 202 ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); 203 netdev->addr_assign_type &= ~NET_ADDR_RANDOM; 204 } 205 206 if (hw->mac.vlan_override) 207 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 208 else 209 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; 210 } 211 212 /* reset clock */ 213 fm10k_ts_reset(interface); 214 215 err = netif_running(netdev) ? fm10k_open(netdev) : 0; 216 if (err) 217 goto err_open; 218 219 fm10k_iov_resume(interface->pdev); 220 221 rtnl_unlock(); 222 223 clear_bit(__FM10K_RESETTING, &interface->state); 224 225 return; 226 err_open: 227 fm10k_mbx_free_irq(interface); 228 err_mbx_irq: 229 fm10k_clear_queueing_scheme(interface); 230 reinit_err: 231 netif_device_detach(netdev); 232 233 rtnl_unlock(); 234 235 clear_bit(__FM10K_RESETTING, &interface->state); 236 } 237 238 static void fm10k_reset_subtask(struct fm10k_intfc *interface) 239 { 240 if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED)) 241 return; 242 243 interface->flags &= ~FM10K_FLAG_RESET_REQUESTED; 244 245 netdev_err(interface->netdev, "Reset interface\n"); 246 247 fm10k_reinit(interface); 248 } 249 250 /** 251 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping 252 * @interface: board private structure 253 * 254 * Configure the SWPRI to PC mapping for the port. 255 **/ 256 static void fm10k_configure_swpri_map(struct fm10k_intfc *interface) 257 { 258 struct net_device *netdev = interface->netdev; 259 struct fm10k_hw *hw = &interface->hw; 260 int i; 261 262 /* clear flag indicating update is needed */ 263 interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG; 264 265 /* these registers are only available on the PF */ 266 if (hw->mac.type != fm10k_mac_pf) 267 return; 268 269 /* configure SWPRI to PC map */ 270 for (i = 0; i < FM10K_SWPRI_MAX; i++) 271 fm10k_write_reg(hw, FM10K_SWPRI_MAP(i), 272 netdev_get_prio_tc_map(netdev, i)); 273 } 274 275 /** 276 * fm10k_watchdog_update_host_state - Update the link status based on host. 277 * @interface: board private structure 278 **/ 279 static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) 280 { 281 struct fm10k_hw *hw = &interface->hw; 282 s32 err; 283 284 if (test_bit(__FM10K_LINK_DOWN, &interface->state)) { 285 interface->host_ready = false; 286 if (time_is_after_jiffies(interface->link_down_event)) 287 return; 288 clear_bit(__FM10K_LINK_DOWN, &interface->state); 289 } 290 291 if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) { 292 if (rtnl_trylock()) { 293 fm10k_configure_swpri_map(interface); 294 rtnl_unlock(); 295 } 296 } 297 298 /* lock the mailbox for transmit and receive */ 299 fm10k_mbx_lock(interface); 300 301 err = hw->mac.ops.get_host_state(hw, &interface->host_ready); 302 if (err && time_is_before_jiffies(interface->last_reset)) 303 interface->flags |= FM10K_FLAG_RESET_REQUESTED; 304 305 /* free the lock */ 306 fm10k_mbx_unlock(interface); 307 } 308 309 /** 310 * fm10k_mbx_subtask - Process upstream and downstream mailboxes 311 * @interface: board private structure 312 * 313 * This function will process both the upstream and downstream mailboxes. 314 **/ 315 static void fm10k_mbx_subtask(struct fm10k_intfc *interface) 316 { 317 /* process upstream mailbox and update device state */ 318 fm10k_watchdog_update_host_state(interface); 319 320 /* process downstream mailboxes */ 321 fm10k_iov_mbx(interface); 322 } 323 324 /** 325 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready 326 * @interface: board private structure 327 **/ 328 static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface) 329 { 330 struct net_device *netdev = interface->netdev; 331 332 /* only continue if link state is currently down */ 333 if (netif_carrier_ok(netdev)) 334 return; 335 336 netif_info(interface, drv, netdev, "NIC Link is up\n"); 337 338 netif_carrier_on(netdev); 339 netif_tx_wake_all_queues(netdev); 340 } 341 342 /** 343 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready 344 * @interface: board private structure 345 **/ 346 static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface) 347 { 348 struct net_device *netdev = interface->netdev; 349 350 /* only continue if link state is currently up */ 351 if (!netif_carrier_ok(netdev)) 352 return; 353 354 netif_info(interface, drv, netdev, "NIC Link is down\n"); 355 356 netif_carrier_off(netdev); 357 netif_tx_stop_all_queues(netdev); 358 } 359 360 /** 361 * fm10k_update_stats - Update the board statistics counters. 362 * @interface: board private structure 363 **/ 364 void fm10k_update_stats(struct fm10k_intfc *interface) 365 { 366 struct net_device_stats *net_stats = &interface->netdev->stats; 367 struct fm10k_hw *hw = &interface->hw; 368 u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0; 369 u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0; 370 u64 rx_link_errors = 0; 371 u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0; 372 u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0; 373 u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0; 374 u64 tx_bytes_nic = 0, tx_pkts_nic = 0; 375 u64 bytes, pkts; 376 int i; 377 378 /* do not allow stats update via service task for next second */ 379 interface->next_stats_update = jiffies + HZ; 380 381 /* gather some stats to the interface struct that are per queue */ 382 for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { 383 struct fm10k_ring *tx_ring = interface->tx_ring[i]; 384 385 restart_queue += tx_ring->tx_stats.restart_queue; 386 tx_busy += tx_ring->tx_stats.tx_busy; 387 tx_csum_errors += tx_ring->tx_stats.csum_err; 388 bytes += tx_ring->stats.bytes; 389 pkts += tx_ring->stats.packets; 390 hw_csum_tx_good += tx_ring->tx_stats.csum_good; 391 } 392 393 interface->restart_queue = restart_queue; 394 interface->tx_busy = tx_busy; 395 net_stats->tx_bytes = bytes; 396 net_stats->tx_packets = pkts; 397 interface->tx_csum_errors = tx_csum_errors; 398 interface->hw_csum_tx_good = hw_csum_tx_good; 399 400 /* gather some stats to the interface struct that are per queue */ 401 for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { 402 struct fm10k_ring *rx_ring = interface->rx_ring[i]; 403 404 bytes += rx_ring->stats.bytes; 405 pkts += rx_ring->stats.packets; 406 alloc_failed += rx_ring->rx_stats.alloc_failed; 407 rx_csum_errors += rx_ring->rx_stats.csum_err; 408 rx_errors += rx_ring->rx_stats.errors; 409 hw_csum_rx_good += rx_ring->rx_stats.csum_good; 410 rx_switch_errors += rx_ring->rx_stats.switch_errors; 411 rx_drops += rx_ring->rx_stats.drops; 412 rx_pp_errors += rx_ring->rx_stats.pp_errors; 413 rx_link_errors += rx_ring->rx_stats.link_errors; 414 rx_length_errors += rx_ring->rx_stats.length_errors; 415 } 416 417 net_stats->rx_bytes = bytes; 418 net_stats->rx_packets = pkts; 419 interface->alloc_failed = alloc_failed; 420 interface->rx_csum_errors = rx_csum_errors; 421 interface->hw_csum_rx_good = hw_csum_rx_good; 422 interface->rx_switch_errors = rx_switch_errors; 423 interface->rx_drops = rx_drops; 424 interface->rx_pp_errors = rx_pp_errors; 425 interface->rx_link_errors = rx_link_errors; 426 interface->rx_length_errors = rx_length_errors; 427 428 hw->mac.ops.update_hw_stats(hw, &interface->stats); 429 430 for (i = 0; i < hw->mac.max_queues; i++) { 431 struct fm10k_hw_stats_q *q = &interface->stats.q[i]; 432 433 tx_bytes_nic += q->tx_bytes.count; 434 tx_pkts_nic += q->tx_packets.count; 435 rx_bytes_nic += q->rx_bytes.count; 436 rx_pkts_nic += q->rx_packets.count; 437 rx_drops_nic += q->rx_drops.count; 438 } 439 440 interface->tx_bytes_nic = tx_bytes_nic; 441 interface->tx_packets_nic = tx_pkts_nic; 442 interface->rx_bytes_nic = rx_bytes_nic; 443 interface->rx_packets_nic = rx_pkts_nic; 444 interface->rx_drops_nic = rx_drops_nic; 445 446 /* Fill out the OS statistics structure */ 447 net_stats->rx_errors = rx_errors; 448 net_stats->rx_dropped = interface->stats.nodesc_drop.count; 449 } 450 451 /** 452 * fm10k_watchdog_flush_tx - flush queues on host not ready 453 * @interface - pointer to the device interface structure 454 **/ 455 static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) 456 { 457 int some_tx_pending = 0; 458 int i; 459 460 /* nothing to do if carrier is up */ 461 if (netif_carrier_ok(interface->netdev)) 462 return; 463 464 for (i = 0; i < interface->num_tx_queues; i++) { 465 struct fm10k_ring *tx_ring = interface->tx_ring[i]; 466 467 if (tx_ring->next_to_use != tx_ring->next_to_clean) { 468 some_tx_pending = 1; 469 break; 470 } 471 } 472 473 /* We've lost link, so the controller stops DMA, but we've got 474 * queued Tx work that's never going to get done, so reset 475 * controller to flush Tx. 476 */ 477 if (some_tx_pending) 478 interface->flags |= FM10K_FLAG_RESET_REQUESTED; 479 } 480 481 /** 482 * fm10k_watchdog_subtask - check and bring link up 483 * @interface - pointer to the device interface structure 484 **/ 485 static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) 486 { 487 /* if interface is down do nothing */ 488 if (test_bit(__FM10K_DOWN, &interface->state) || 489 test_bit(__FM10K_RESETTING, &interface->state)) 490 return; 491 492 if (interface->host_ready) 493 fm10k_watchdog_host_is_ready(interface); 494 else 495 fm10k_watchdog_host_not_ready(interface); 496 497 /* update stats only once every second */ 498 if (time_is_before_jiffies(interface->next_stats_update)) 499 fm10k_update_stats(interface); 500 501 /* flush any uncompleted work */ 502 fm10k_watchdog_flush_tx(interface); 503 } 504 505 /** 506 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts 507 * @interface - pointer to the device interface structure 508 * 509 * This function serves two purposes. First it strobes the interrupt lines 510 * in order to make certain interrupts are occurring. Secondly it sets the 511 * bits needed to check for TX hangs. As a result we should immediately 512 * determine if a hang has occurred. 513 */ 514 static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) 515 { 516 int i; 517 518 /* If we're down or resetting, just bail */ 519 if (test_bit(__FM10K_DOWN, &interface->state) || 520 test_bit(__FM10K_RESETTING, &interface->state)) 521 return; 522 523 /* rate limit tx hang checks to only once every 2 seconds */ 524 if (time_is_after_eq_jiffies(interface->next_tx_hang_check)) 525 return; 526 interface->next_tx_hang_check = jiffies + (2 * HZ); 527 528 if (netif_carrier_ok(interface->netdev)) { 529 /* Force detection of hung controller */ 530 for (i = 0; i < interface->num_tx_queues; i++) 531 set_check_for_tx_hang(interface->tx_ring[i]); 532 533 /* Rearm all in-use q_vectors for immediate firing */ 534 for (i = 0; i < interface->num_q_vectors; i++) { 535 struct fm10k_q_vector *qv = interface->q_vector[i]; 536 537 if (!qv->tx.count && !qv->rx.count) 538 continue; 539 writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr); 540 } 541 } 542 } 543 544 /** 545 * fm10k_service_task - manages and runs subtasks 546 * @work: pointer to work_struct containing our data 547 **/ 548 static void fm10k_service_task(struct work_struct *work) 549 { 550 struct fm10k_intfc *interface; 551 552 interface = container_of(work, struct fm10k_intfc, service_task); 553 554 /* tasks run even when interface is down */ 555 fm10k_mbx_subtask(interface); 556 fm10k_detach_subtask(interface); 557 fm10k_reset_subtask(interface); 558 559 /* tasks only run when interface is up */ 560 fm10k_watchdog_subtask(interface); 561 fm10k_check_hang_subtask(interface); 562 fm10k_ts_tx_subtask(interface); 563 564 /* release lock on service events to allow scheduling next event */ 565 fm10k_service_event_complete(interface); 566 } 567 568 /** 569 * fm10k_configure_tx_ring - Configure Tx ring after Reset 570 * @interface: board private structure 571 * @ring: structure containing ring specific data 572 * 573 * Configure the Tx descriptor ring after a reset. 574 **/ 575 static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, 576 struct fm10k_ring *ring) 577 { 578 struct fm10k_hw *hw = &interface->hw; 579 u64 tdba = ring->dma; 580 u32 size = ring->count * sizeof(struct fm10k_tx_desc); 581 u32 txint = FM10K_INT_MAP_DISABLE; 582 u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT); 583 u8 reg_idx = ring->reg_idx; 584 585 /* disable queue to avoid issues while updating state */ 586 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0); 587 fm10k_write_flush(hw); 588 589 /* possible poll here to verify ring resources have been cleaned */ 590 591 /* set location and size for descriptor ring */ 592 fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 593 fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32); 594 fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size); 595 596 /* reset head and tail pointers */ 597 fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0); 598 fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0); 599 600 /* store tail pointer */ 601 ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; 602 603 /* reset ntu and ntc to place SW in sync with hardware */ 604 ring->next_to_clean = 0; 605 ring->next_to_use = 0; 606 607 /* Map interrupt */ 608 if (ring->q_vector) { 609 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); 610 txint |= FM10K_INT_MAP_TIMER0; 611 } 612 613 fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint); 614 615 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */ 616 fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), 617 FM10K_PFVTCTL_FTAG_DESC_ENABLE); 618 619 /* Initialize XPS */ 620 if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) && 621 ring->q_vector) 622 netif_set_xps_queue(ring->netdev, 623 &ring->q_vector->affinity_mask, 624 ring->queue_index); 625 626 /* enable queue */ 627 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); 628 } 629 630 /** 631 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration 632 * @interface: board private structure 633 * @ring: structure containing ring specific data 634 * 635 * Verify the Tx descriptor ring is ready for transmit. 636 **/ 637 static void fm10k_enable_tx_ring(struct fm10k_intfc *interface, 638 struct fm10k_ring *ring) 639 { 640 struct fm10k_hw *hw = &interface->hw; 641 int wait_loop = 10; 642 u32 txdctl; 643 u8 reg_idx = ring->reg_idx; 644 645 /* if we are already enabled just exit */ 646 if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE) 647 return; 648 649 /* poll to verify queue is enabled */ 650 do { 651 usleep_range(1000, 2000); 652 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)); 653 } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop); 654 if (!wait_loop) 655 netif_err(interface, drv, interface->netdev, 656 "Could not enable Tx Queue %d\n", reg_idx); 657 } 658 659 /** 660 * fm10k_configure_tx - Configure Transmit Unit after Reset 661 * @interface: board private structure 662 * 663 * Configure the Tx unit of the MAC after a reset. 664 **/ 665 static void fm10k_configure_tx(struct fm10k_intfc *interface) 666 { 667 int i; 668 669 /* Setup the HW Tx Head and Tail descriptor pointers */ 670 for (i = 0; i < interface->num_tx_queues; i++) 671 fm10k_configure_tx_ring(interface, interface->tx_ring[i]); 672 673 /* poll here to verify that Tx rings are now enabled */ 674 for (i = 0; i < interface->num_tx_queues; i++) 675 fm10k_enable_tx_ring(interface, interface->tx_ring[i]); 676 } 677 678 /** 679 * fm10k_configure_rx_ring - Configure Rx ring after Reset 680 * @interface: board private structure 681 * @ring: structure containing ring specific data 682 * 683 * Configure the Rx descriptor ring after a reset. 684 **/ 685 static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, 686 struct fm10k_ring *ring) 687 { 688 u64 rdba = ring->dma; 689 struct fm10k_hw *hw = &interface->hw; 690 u32 size = ring->count * sizeof(union fm10k_rx_desc); 691 u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF; 692 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; 693 u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN; 694 u32 rxint = FM10K_INT_MAP_DISABLE; 695 u8 rx_pause = interface->rx_pause; 696 u8 reg_idx = ring->reg_idx; 697 698 /* disable queue to avoid issues while updating state */ 699 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0); 700 fm10k_write_flush(hw); 701 702 /* possible poll here to verify ring resources have been cleaned */ 703 704 /* set location and size for descriptor ring */ 705 fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 706 fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32); 707 fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size); 708 709 /* reset head and tail pointers */ 710 fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0); 711 fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0); 712 713 /* store tail pointer */ 714 ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; 715 716 /* reset ntu and ntc to place SW in sync with hardware */ 717 ring->next_to_clean = 0; 718 ring->next_to_use = 0; 719 ring->next_to_alloc = 0; 720 721 /* Configure the Rx buffer size for one buff without split */ 722 srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; 723 724 /* Configure the Rx ring to suppress loopback packets */ 725 srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; 726 fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); 727 728 /* Enable drop on empty */ 729 #ifdef CONFIG_DCB 730 if (interface->pfc_en) 731 rx_pause = interface->pfc_en; 732 #endif 733 if (!(rx_pause & (1 << ring->qos_pc))) 734 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; 735 736 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); 737 738 /* assign default VLAN to queue */ 739 ring->vid = hw->mac.default_vid; 740 741 /* if we have an active VLAN, disable default VLAN ID */ 742 if (test_bit(hw->mac.default_vid, interface->active_vlans)) 743 ring->vid |= FM10K_VLAN_CLEAR; 744 745 /* Map interrupt */ 746 if (ring->q_vector) { 747 rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); 748 rxint |= FM10K_INT_MAP_TIMER1; 749 } 750 751 fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint); 752 753 /* enable queue */ 754 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); 755 756 /* place buffers on ring for receive data */ 757 fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring)); 758 } 759 760 /** 761 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings 762 * @interface: board private structure 763 * 764 * Configure the drop enable bits for the Rx rings. 765 **/ 766 void fm10k_update_rx_drop_en(struct fm10k_intfc *interface) 767 { 768 struct fm10k_hw *hw = &interface->hw; 769 u8 rx_pause = interface->rx_pause; 770 int i; 771 772 #ifdef CONFIG_DCB 773 if (interface->pfc_en) 774 rx_pause = interface->pfc_en; 775 776 #endif 777 for (i = 0; i < interface->num_rx_queues; i++) { 778 struct fm10k_ring *ring = interface->rx_ring[i]; 779 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; 780 u8 reg_idx = ring->reg_idx; 781 782 if (!(rx_pause & (1 << ring->qos_pc))) 783 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; 784 785 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); 786 } 787 } 788 789 /** 790 * fm10k_configure_dglort - Configure Receive DGLORT after reset 791 * @interface: board private structure 792 * 793 * Configure the DGLORT description and RSS tables. 794 **/ 795 static void fm10k_configure_dglort(struct fm10k_intfc *interface) 796 { 797 struct fm10k_dglort_cfg dglort = { 0 }; 798 struct fm10k_hw *hw = &interface->hw; 799 int i; 800 u32 mrqc; 801 802 /* Fill out hash function seeds */ 803 for (i = 0; i < FM10K_RSSRK_SIZE; i++) 804 fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]); 805 806 /* Write RETA table to hardware */ 807 for (i = 0; i < FM10K_RETA_SIZE; i++) 808 fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]); 809 810 /* Generate RSS hash based on packet types, TCP/UDP 811 * port numbers and/or IPv4/v6 src and dst addresses 812 */ 813 mrqc = FM10K_MRQC_IPV4 | 814 FM10K_MRQC_TCP_IPV4 | 815 FM10K_MRQC_IPV6 | 816 FM10K_MRQC_TCP_IPV6; 817 818 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) 819 mrqc |= FM10K_MRQC_UDP_IPV4; 820 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) 821 mrqc |= FM10K_MRQC_UDP_IPV6; 822 823 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); 824 825 /* configure default DGLORT mapping for RSS/DCB */ 826 dglort.inner_rss = 1; 827 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); 828 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); 829 hw->mac.ops.configure_dglort_map(hw, &dglort); 830 831 /* assign GLORT per queue for queue mapped testing */ 832 if (interface->glort_count > 64) { 833 memset(&dglort, 0, sizeof(dglort)); 834 dglort.inner_rss = 1; 835 dglort.glort = interface->glort + 64; 836 dglort.idx = fm10k_dglort_pf_queue; 837 dglort.queue_l = fls(interface->num_rx_queues - 1); 838 hw->mac.ops.configure_dglort_map(hw, &dglort); 839 } 840 841 /* assign glort value for RSS/DCB specific to this interface */ 842 memset(&dglort, 0, sizeof(dglort)); 843 dglort.inner_rss = 1; 844 dglort.glort = interface->glort; 845 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); 846 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); 847 /* configure DGLORT mapping for RSS/DCB */ 848 dglort.idx = fm10k_dglort_pf_rss; 849 if (interface->l2_accel) 850 dglort.shared_l = fls(interface->l2_accel->size); 851 hw->mac.ops.configure_dglort_map(hw, &dglort); 852 } 853 854 /** 855 * fm10k_configure_rx - Configure Receive Unit after Reset 856 * @interface: board private structure 857 * 858 * Configure the Rx unit of the MAC after a reset. 859 **/ 860 static void fm10k_configure_rx(struct fm10k_intfc *interface) 861 { 862 int i; 863 864 /* Configure SWPRI to PC map */ 865 fm10k_configure_swpri_map(interface); 866 867 /* Configure RSS and DGLORT map */ 868 fm10k_configure_dglort(interface); 869 870 /* Setup the HW Rx Head and Tail descriptor pointers */ 871 for (i = 0; i < interface->num_rx_queues; i++) 872 fm10k_configure_rx_ring(interface, interface->rx_ring[i]); 873 874 /* possible poll here to verify that Rx rings are now enabled */ 875 } 876 877 static void fm10k_napi_enable_all(struct fm10k_intfc *interface) 878 { 879 struct fm10k_q_vector *q_vector; 880 int q_idx; 881 882 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { 883 q_vector = interface->q_vector[q_idx]; 884 napi_enable(&q_vector->napi); 885 } 886 } 887 888 static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) 889 { 890 struct fm10k_q_vector *q_vector = data; 891 892 if (q_vector->rx.count || q_vector->tx.count) 893 napi_schedule_irqoff(&q_vector->napi); 894 895 return IRQ_HANDLED; 896 } 897 898 static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) 899 { 900 struct fm10k_intfc *interface = data; 901 struct fm10k_hw *hw = &interface->hw; 902 struct fm10k_mbx_info *mbx = &hw->mbx; 903 904 /* re-enable mailbox interrupt and indicate 20us delay */ 905 fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), 906 FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> 907 hw->mac.itr_scale)); 908 909 /* service upstream mailbox */ 910 if (fm10k_mbx_trylock(interface)) { 911 mbx->ops.process(hw, mbx); 912 fm10k_mbx_unlock(interface); 913 } 914 915 hw->mac.get_host_state = true; 916 fm10k_service_event_schedule(interface); 917 918 return IRQ_HANDLED; 919 } 920 921 #ifdef CONFIG_NET_POLL_CONTROLLER 922 /** 923 * fm10k_netpoll - A Polling 'interrupt' handler 924 * @netdev: network interface device structure 925 * 926 * This is used by netconsole to send skbs without having to re-enable 927 * interrupts. It's not called while the normal interrupt routine is executing. 928 **/ 929 void fm10k_netpoll(struct net_device *netdev) 930 { 931 struct fm10k_intfc *interface = netdev_priv(netdev); 932 int i; 933 934 /* if interface is down do nothing */ 935 if (test_bit(__FM10K_DOWN, &interface->state)) 936 return; 937 938 for (i = 0; i < interface->num_q_vectors; i++) 939 fm10k_msix_clean_rings(0, interface->q_vector[i]); 940 } 941 942 #endif 943 #define FM10K_ERR_MSG(type) case (type): error = #type; break 944 static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, 945 struct fm10k_fault *fault) 946 { 947 struct pci_dev *pdev = interface->pdev; 948 struct fm10k_hw *hw = &interface->hw; 949 struct fm10k_iov_data *iov_data = interface->iov_data; 950 char *error; 951 952 switch (type) { 953 case FM10K_PCA_FAULT: 954 switch (fault->type) { 955 default: 956 error = "Unknown PCA error"; 957 break; 958 FM10K_ERR_MSG(PCA_NO_FAULT); 959 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR); 960 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF); 961 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF); 962 FM10K_ERR_MSG(PCA_MALICIOUS_REQ); 963 FM10K_ERR_MSG(PCA_POISONED_TLP); 964 FM10K_ERR_MSG(PCA_TLP_ABORT); 965 } 966 break; 967 case FM10K_THI_FAULT: 968 switch (fault->type) { 969 default: 970 error = "Unknown THI error"; 971 break; 972 FM10K_ERR_MSG(THI_NO_FAULT); 973 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT); 974 } 975 break; 976 case FM10K_FUM_FAULT: 977 switch (fault->type) { 978 default: 979 error = "Unknown FUM error"; 980 break; 981 FM10K_ERR_MSG(FUM_NO_FAULT); 982 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR); 983 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS); 984 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR); 985 FM10K_ERR_MSG(FUM_RO_ERROR); 986 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR); 987 FM10K_ERR_MSG(FUM_CSR_TIMEOUT); 988 FM10K_ERR_MSG(FUM_INVALID_TYPE); 989 FM10K_ERR_MSG(FUM_INVALID_LENGTH); 990 FM10K_ERR_MSG(FUM_INVALID_BE); 991 FM10K_ERR_MSG(FUM_INVALID_ALIGN); 992 } 993 break; 994 default: 995 error = "Undocumented fault"; 996 break; 997 } 998 999 dev_warn(&pdev->dev, 1000 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n", 1001 error, fault->address, fault->specinfo, 1002 PCI_SLOT(fault->func), PCI_FUNC(fault->func)); 1003 1004 /* For VF faults, clear out the respective LPORT, reset the queue 1005 * resources, and then reconnect to the mailbox. This allows the 1006 * VF in question to resume behavior. For transient faults that are 1007 * the result of non-malicious behavior this will log the fault and 1008 * allow the VF to resume functionality. Obviously for malicious VFs 1009 * they will be able to attempt malicious behavior again. In this 1010 * case, the system administrator will need to step in and manually 1011 * remove or disable the VF in question. 1012 */ 1013 if (fault->func && iov_data) { 1014 int vf = fault->func - 1; 1015 struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf]; 1016 1017 hw->iov.ops.reset_lport(hw, vf_info); 1018 hw->iov.ops.reset_resources(hw, vf_info); 1019 1020 /* reset_lport disables the VF, so re-enable it */ 1021 hw->iov.ops.set_lport(hw, vf_info, vf, 1022 FM10K_VF_FLAG_MULTI_CAPABLE); 1023 1024 /* reset_resources will disconnect from the mbx */ 1025 vf_info->mbx.ops.connect(hw, &vf_info->mbx); 1026 } 1027 } 1028 1029 static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) 1030 { 1031 struct fm10k_hw *hw = &interface->hw; 1032 struct fm10k_fault fault = { 0 }; 1033 int type, err; 1034 1035 for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT; 1036 eicr; 1037 eicr >>= 1, type += FM10K_FAULT_SIZE) { 1038 /* only check if there is an error reported */ 1039 if (!(eicr & 0x1)) 1040 continue; 1041 1042 /* retrieve fault info */ 1043 err = hw->mac.ops.get_fault(hw, type, &fault); 1044 if (err) { 1045 dev_err(&interface->pdev->dev, 1046 "error reading fault\n"); 1047 continue; 1048 } 1049 1050 fm10k_handle_fault(interface, type, &fault); 1051 } 1052 } 1053 1054 static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) 1055 { 1056 struct fm10k_hw *hw = &interface->hw; 1057 const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; 1058 u32 maxholdq; 1059 int q; 1060 1061 if (!(eicr & FM10K_EICR_MAXHOLDTIME)) 1062 return; 1063 1064 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7)); 1065 if (maxholdq) 1066 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); 1067 for (q = 255;;) { 1068 if (maxholdq & (1 << 31)) { 1069 if (q < FM10K_MAX_QUEUES_PF) { 1070 interface->rx_overrun_pf++; 1071 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); 1072 } else { 1073 interface->rx_overrun_vf++; 1074 } 1075 } 1076 1077 maxholdq *= 2; 1078 if (!maxholdq) 1079 q &= ~(32 - 1); 1080 1081 if (!q) 1082 break; 1083 1084 if (q-- % 32) 1085 continue; 1086 1087 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32)); 1088 if (maxholdq) 1089 fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq); 1090 } 1091 } 1092 1093 static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) 1094 { 1095 struct fm10k_intfc *interface = data; 1096 struct fm10k_hw *hw = &interface->hw; 1097 struct fm10k_mbx_info *mbx = &hw->mbx; 1098 u32 eicr; 1099 1100 /* unmask any set bits related to this interrupt */ 1101 eicr = fm10k_read_reg(hw, FM10K_EICR); 1102 fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX | 1103 FM10K_EICR_SWITCHREADY | 1104 FM10K_EICR_SWITCHNOTREADY)); 1105 1106 /* report any faults found to the message log */ 1107 fm10k_report_fault(interface, eicr); 1108 1109 /* reset any queues disabled due to receiver overrun */ 1110 fm10k_reset_drop_on_empty(interface, eicr); 1111 1112 /* service mailboxes */ 1113 if (fm10k_mbx_trylock(interface)) { 1114 mbx->ops.process(hw, mbx); 1115 /* handle VFLRE events */ 1116 fm10k_iov_event(interface); 1117 fm10k_mbx_unlock(interface); 1118 } 1119 1120 /* if switch toggled state we should reset GLORTs */ 1121 if (eicr & FM10K_EICR_SWITCHNOTREADY) { 1122 /* force link down for at least 4 seconds */ 1123 interface->link_down_event = jiffies + (4 * HZ); 1124 set_bit(__FM10K_LINK_DOWN, &interface->state); 1125 1126 /* reset dglort_map back to no config */ 1127 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; 1128 } 1129 1130 /* we should validate host state after interrupt event */ 1131 hw->mac.get_host_state = true; 1132 1133 /* validate host state, and handle VF mailboxes in the service task */ 1134 fm10k_service_event_schedule(interface); 1135 1136 /* re-enable mailbox interrupt and indicate 20us delay */ 1137 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), 1138 FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> 1139 hw->mac.itr_scale)); 1140 1141 return IRQ_HANDLED; 1142 } 1143 1144 void fm10k_mbx_free_irq(struct fm10k_intfc *interface) 1145 { 1146 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; 1147 struct fm10k_hw *hw = &interface->hw; 1148 int itr_reg; 1149 1150 /* no mailbox IRQ to free if MSI-X is not enabled */ 1151 if (!interface->msix_entries) 1152 return; 1153 1154 /* disconnect the mailbox */ 1155 hw->mbx.ops.disconnect(hw, &hw->mbx); 1156 1157 /* disable Mailbox cause */ 1158 if (hw->mac.type == fm10k_mac_pf) { 1159 fm10k_write_reg(hw, FM10K_EIMR, 1160 FM10K_EIMR_DISABLE(PCA_FAULT) | 1161 FM10K_EIMR_DISABLE(FUM_FAULT) | 1162 FM10K_EIMR_DISABLE(MAILBOX) | 1163 FM10K_EIMR_DISABLE(SWITCHREADY) | 1164 FM10K_EIMR_DISABLE(SWITCHNOTREADY) | 1165 FM10K_EIMR_DISABLE(SRAMERROR) | 1166 FM10K_EIMR_DISABLE(VFLR) | 1167 FM10K_EIMR_DISABLE(MAXHOLDTIME)); 1168 itr_reg = FM10K_ITR(FM10K_MBX_VECTOR); 1169 } else { 1170 itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR); 1171 } 1172 1173 fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET); 1174 1175 free_irq(entry->vector, interface); 1176 } 1177 1178 static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, 1179 struct fm10k_mbx_info *mbx) 1180 { 1181 bool vlan_override = hw->mac.vlan_override; 1182 u16 default_vid = hw->mac.default_vid; 1183 struct fm10k_intfc *interface; 1184 s32 err; 1185 1186 err = fm10k_msg_mac_vlan_vf(hw, results, mbx); 1187 if (err) 1188 return err; 1189 1190 interface = container_of(hw, struct fm10k_intfc, hw); 1191 1192 /* MAC was changed so we need reset */ 1193 if (is_valid_ether_addr(hw->mac.perm_addr) && 1194 !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr)) 1195 interface->flags |= FM10K_FLAG_RESET_REQUESTED; 1196 1197 /* VLAN override was changed, or default VLAN changed */ 1198 if ((vlan_override != hw->mac.vlan_override) || 1199 (default_vid != hw->mac.default_vid)) 1200 interface->flags |= FM10K_FLAG_RESET_REQUESTED; 1201 1202 return 0; 1203 } 1204 1205 static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, 1206 struct fm10k_mbx_info __always_unused *mbx) 1207 { 1208 struct fm10k_intfc *interface; 1209 u64 timestamp; 1210 s32 err; 1211 1212 err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP], 1213 ×tamp); 1214 if (err) 1215 return err; 1216 1217 interface = container_of(hw, struct fm10k_intfc, hw); 1218 1219 fm10k_ts_tx_hwtstamp(interface, 0, timestamp); 1220 1221 return 0; 1222 } 1223 1224 /* generic error handler for mailbox issues */ 1225 static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, 1226 struct fm10k_mbx_info __always_unused *mbx) 1227 { 1228 struct fm10k_intfc *interface; 1229 struct pci_dev *pdev; 1230 1231 interface = container_of(hw, struct fm10k_intfc, hw); 1232 pdev = interface->pdev; 1233 1234 dev_err(&pdev->dev, "Unknown message ID %u\n", 1235 **results & FM10K_TLV_ID_MASK); 1236 1237 return 0; 1238 } 1239 1240 static const struct fm10k_msg_data vf_mbx_data[] = { 1241 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), 1242 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr), 1243 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), 1244 FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf), 1245 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), 1246 }; 1247 1248 static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface) 1249 { 1250 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; 1251 struct net_device *dev = interface->netdev; 1252 struct fm10k_hw *hw = &interface->hw; 1253 int err; 1254 1255 /* Use timer0 for interrupt moderation on the mailbox */ 1256 u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry; 1257 1258 /* register mailbox handlers */ 1259 err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); 1260 if (err) 1261 return err; 1262 1263 /* request the IRQ */ 1264 err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0, 1265 dev->name, interface); 1266 if (err) { 1267 netif_err(interface, probe, dev, 1268 "request_irq for msix_mbx failed: %d\n", err); 1269 return err; 1270 } 1271 1272 /* map all of the interrupt sources */ 1273 fm10k_write_reg(hw, FM10K_VFINT_MAP, itr); 1274 1275 /* enable interrupt */ 1276 fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE); 1277 1278 return 0; 1279 } 1280 1281 static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, 1282 struct fm10k_mbx_info *mbx) 1283 { 1284 struct fm10k_intfc *interface; 1285 u32 dglort_map = hw->mac.dglort_map; 1286 s32 err; 1287 1288 err = fm10k_msg_lport_map_pf(hw, results, mbx); 1289 if (err) 1290 return err; 1291 1292 interface = container_of(hw, struct fm10k_intfc, hw); 1293 1294 /* we need to reset if port count was just updated */ 1295 if (dglort_map != hw->mac.dglort_map) 1296 interface->flags |= FM10K_FLAG_RESET_REQUESTED; 1297 1298 return 0; 1299 } 1300 1301 static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, 1302 struct fm10k_mbx_info __always_unused *mbx) 1303 { 1304 struct fm10k_intfc *interface; 1305 u16 glort, pvid; 1306 u32 pvid_update; 1307 s32 err; 1308 1309 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], 1310 &pvid_update); 1311 if (err) 1312 return err; 1313 1314 /* extract values from the pvid update */ 1315 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); 1316 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); 1317 1318 /* if glort is not valid return error */ 1319 if (!fm10k_glort_valid_pf(hw, glort)) 1320 return FM10K_ERR_PARAM; 1321 1322 /* verify VLAN ID is valid */ 1323 if (pvid >= FM10K_VLAN_TABLE_VID_MAX) 1324 return FM10K_ERR_PARAM; 1325 1326 interface = container_of(hw, struct fm10k_intfc, hw); 1327 1328 /* check to see if this belongs to one of the VFs */ 1329 err = fm10k_iov_update_pvid(interface, glort, pvid); 1330 if (!err) 1331 return 0; 1332 1333 /* we need to reset if default VLAN was just updated */ 1334 if (pvid != hw->mac.default_vid) 1335 interface->flags |= FM10K_FLAG_RESET_REQUESTED; 1336 1337 hw->mac.default_vid = pvid; 1338 1339 return 0; 1340 } 1341 1342 static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, 1343 struct fm10k_mbx_info __always_unused *mbx) 1344 { 1345 struct fm10k_swapi_1588_timestamp timestamp; 1346 struct fm10k_iov_data *iov_data; 1347 struct fm10k_intfc *interface; 1348 u16 sglort, vf_idx; 1349 s32 err; 1350 1351 err = fm10k_tlv_attr_get_le_struct( 1352 results[FM10K_PF_ATTR_ID_1588_TIMESTAMP], 1353 ×tamp, sizeof(timestamp)); 1354 if (err) 1355 return err; 1356 1357 interface = container_of(hw, struct fm10k_intfc, hw); 1358 1359 if (timestamp.dglort) { 1360 fm10k_ts_tx_hwtstamp(interface, timestamp.dglort, 1361 le64_to_cpu(timestamp.egress)); 1362 return 0; 1363 } 1364 1365 /* either dglort or sglort must be set */ 1366 if (!timestamp.sglort) 1367 return FM10K_ERR_PARAM; 1368 1369 /* verify GLORT is at least one of the ones we own */ 1370 sglort = le16_to_cpu(timestamp.sglort); 1371 if (!fm10k_glort_valid_pf(hw, sglort)) 1372 return FM10K_ERR_PARAM; 1373 1374 if (sglort == interface->glort) { 1375 fm10k_ts_tx_hwtstamp(interface, 0, 1376 le64_to_cpu(timestamp.ingress)); 1377 return 0; 1378 } 1379 1380 /* if there is no iov_data then there is no mailboxes to process */ 1381 if (!ACCESS_ONCE(interface->iov_data)) 1382 return FM10K_ERR_PARAM; 1383 1384 rcu_read_lock(); 1385 1386 /* notify VF if this timestamp belongs to it */ 1387 iov_data = interface->iov_data; 1388 vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort; 1389 1390 if (!iov_data || vf_idx >= iov_data->num_vfs) { 1391 err = FM10K_ERR_PARAM; 1392 goto err_unlock; 1393 } 1394 1395 err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx], 1396 le64_to_cpu(timestamp.ingress)); 1397 1398 err_unlock: 1399 rcu_read_unlock(); 1400 1401 return err; 1402 } 1403 1404 static const struct fm10k_msg_data pf_mbx_data[] = { 1405 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), 1406 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), 1407 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map), 1408 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), 1409 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), 1410 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid), 1411 FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf), 1412 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), 1413 }; 1414 1415 static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) 1416 { 1417 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; 1418 struct net_device *dev = interface->netdev; 1419 struct fm10k_hw *hw = &interface->hw; 1420 int err; 1421 1422 /* Use timer0 for interrupt moderation on the mailbox */ 1423 u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry; 1424 u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry; 1425 1426 /* register mailbox handlers */ 1427 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); 1428 if (err) 1429 return err; 1430 1431 /* request the IRQ */ 1432 err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0, 1433 dev->name, interface); 1434 if (err) { 1435 netif_err(interface, probe, dev, 1436 "request_irq for msix_mbx failed: %d\n", err); 1437 return err; 1438 } 1439 1440 /* Enable interrupts w/ no moderation for "other" interrupts */ 1441 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr); 1442 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr); 1443 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr); 1444 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr); 1445 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr); 1446 1447 /* Enable interrupts w/ moderation for mailbox */ 1448 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr); 1449 1450 /* Enable individual interrupt causes */ 1451 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | 1452 FM10K_EIMR_ENABLE(FUM_FAULT) | 1453 FM10K_EIMR_ENABLE(MAILBOX) | 1454 FM10K_EIMR_ENABLE(SWITCHREADY) | 1455 FM10K_EIMR_ENABLE(SWITCHNOTREADY) | 1456 FM10K_EIMR_ENABLE(SRAMERROR) | 1457 FM10K_EIMR_ENABLE(VFLR) | 1458 FM10K_EIMR_ENABLE(MAXHOLDTIME)); 1459 1460 /* enable interrupt */ 1461 fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE); 1462 1463 return 0; 1464 } 1465 1466 int fm10k_mbx_request_irq(struct fm10k_intfc *interface) 1467 { 1468 struct fm10k_hw *hw = &interface->hw; 1469 int err; 1470 1471 /* enable Mailbox cause */ 1472 if (hw->mac.type == fm10k_mac_pf) 1473 err = fm10k_mbx_request_irq_pf(interface); 1474 else 1475 err = fm10k_mbx_request_irq_vf(interface); 1476 if (err) 1477 return err; 1478 1479 /* connect mailbox */ 1480 err = hw->mbx.ops.connect(hw, &hw->mbx); 1481 1482 /* if the mailbox failed to connect, then free IRQ */ 1483 if (err) 1484 fm10k_mbx_free_irq(interface); 1485 1486 return err; 1487 } 1488 1489 /** 1490 * fm10k_qv_free_irq - release interrupts associated with queue vectors 1491 * @interface: board private structure 1492 * 1493 * Release all interrupts associated with this interface 1494 **/ 1495 void fm10k_qv_free_irq(struct fm10k_intfc *interface) 1496 { 1497 int vector = interface->num_q_vectors; 1498 struct fm10k_hw *hw = &interface->hw; 1499 struct msix_entry *entry; 1500 1501 entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector]; 1502 1503 while (vector) { 1504 struct fm10k_q_vector *q_vector; 1505 1506 vector--; 1507 entry--; 1508 q_vector = interface->q_vector[vector]; 1509 1510 if (!q_vector->tx.count && !q_vector->rx.count) 1511 continue; 1512 1513 /* clear the affinity_mask in the IRQ descriptor */ 1514 irq_set_affinity_hint(entry->vector, NULL); 1515 1516 /* disable interrupts */ 1517 writel(FM10K_ITR_MASK_SET, q_vector->itr); 1518 1519 free_irq(entry->vector, q_vector); 1520 } 1521 } 1522 1523 /** 1524 * fm10k_qv_request_irq - initialize interrupts for queue vectors 1525 * @interface: board private structure 1526 * 1527 * Attempts to configure interrupts using the best available 1528 * capabilities of the hardware and kernel. 1529 **/ 1530 int fm10k_qv_request_irq(struct fm10k_intfc *interface) 1531 { 1532 struct net_device *dev = interface->netdev; 1533 struct fm10k_hw *hw = &interface->hw; 1534 struct msix_entry *entry; 1535 int ri = 0, ti = 0; 1536 int vector, err; 1537 1538 entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; 1539 1540 for (vector = 0; vector < interface->num_q_vectors; vector++) { 1541 struct fm10k_q_vector *q_vector = interface->q_vector[vector]; 1542 1543 /* name the vector */ 1544 if (q_vector->tx.count && q_vector->rx.count) { 1545 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1546 "%s-TxRx-%d", dev->name, ri++); 1547 ti++; 1548 } else if (q_vector->rx.count) { 1549 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1550 "%s-rx-%d", dev->name, ri++); 1551 } else if (q_vector->tx.count) { 1552 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1553 "%s-tx-%d", dev->name, ti++); 1554 } else { 1555 /* skip this unused q_vector */ 1556 continue; 1557 } 1558 1559 /* Assign ITR register to q_vector */ 1560 q_vector->itr = (hw->mac.type == fm10k_mac_pf) ? 1561 &interface->uc_addr[FM10K_ITR(entry->entry)] : 1562 &interface->uc_addr[FM10K_VFITR(entry->entry)]; 1563 1564 /* request the IRQ */ 1565 err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0, 1566 q_vector->name, q_vector); 1567 if (err) { 1568 netif_err(interface, probe, dev, 1569 "request_irq failed for MSIX interrupt Error: %d\n", 1570 err); 1571 goto err_out; 1572 } 1573 1574 /* assign the mask for this irq */ 1575 irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); 1576 1577 /* Enable q_vector */ 1578 writel(FM10K_ITR_ENABLE, q_vector->itr); 1579 1580 entry++; 1581 } 1582 1583 return 0; 1584 1585 err_out: 1586 /* wind through the ring freeing all entries and vectors */ 1587 while (vector) { 1588 struct fm10k_q_vector *q_vector; 1589 1590 entry--; 1591 vector--; 1592 q_vector = interface->q_vector[vector]; 1593 1594 if (!q_vector->tx.count && !q_vector->rx.count) 1595 continue; 1596 1597 /* clear the affinity_mask in the IRQ descriptor */ 1598 irq_set_affinity_hint(entry->vector, NULL); 1599 1600 /* disable interrupts */ 1601 writel(FM10K_ITR_MASK_SET, q_vector->itr); 1602 1603 free_irq(entry->vector, q_vector); 1604 } 1605 1606 return err; 1607 } 1608 1609 void fm10k_up(struct fm10k_intfc *interface) 1610 { 1611 struct fm10k_hw *hw = &interface->hw; 1612 1613 /* Enable Tx/Rx DMA */ 1614 hw->mac.ops.start_hw(hw); 1615 1616 /* configure Tx descriptor rings */ 1617 fm10k_configure_tx(interface); 1618 1619 /* configure Rx descriptor rings */ 1620 fm10k_configure_rx(interface); 1621 1622 /* configure interrupts */ 1623 hw->mac.ops.update_int_moderator(hw); 1624 1625 /* clear down bit to indicate we are ready to go */ 1626 clear_bit(__FM10K_DOWN, &interface->state); 1627 1628 /* enable polling cleanups */ 1629 fm10k_napi_enable_all(interface); 1630 1631 /* re-establish Rx filters */ 1632 fm10k_restore_rx_state(interface); 1633 1634 /* enable transmits */ 1635 netif_tx_start_all_queues(interface->netdev); 1636 1637 /* kick off the service timer now */ 1638 hw->mac.get_host_state = true; 1639 mod_timer(&interface->service_timer, jiffies); 1640 } 1641 1642 static void fm10k_napi_disable_all(struct fm10k_intfc *interface) 1643 { 1644 struct fm10k_q_vector *q_vector; 1645 int q_idx; 1646 1647 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { 1648 q_vector = interface->q_vector[q_idx]; 1649 napi_disable(&q_vector->napi); 1650 } 1651 } 1652 1653 void fm10k_down(struct fm10k_intfc *interface) 1654 { 1655 struct net_device *netdev = interface->netdev; 1656 struct fm10k_hw *hw = &interface->hw; 1657 1658 /* signal that we are down to the interrupt handler and service task */ 1659 set_bit(__FM10K_DOWN, &interface->state); 1660 1661 /* call carrier off first to avoid false dev_watchdog timeouts */ 1662 netif_carrier_off(netdev); 1663 1664 /* disable transmits */ 1665 netif_tx_stop_all_queues(netdev); 1666 netif_tx_disable(netdev); 1667 1668 /* reset Rx filters */ 1669 fm10k_reset_rx_state(interface); 1670 1671 /* allow 10ms for device to quiesce */ 1672 usleep_range(10000, 20000); 1673 1674 /* disable polling routines */ 1675 fm10k_napi_disable_all(interface); 1676 1677 /* capture stats one last time before stopping interface */ 1678 fm10k_update_stats(interface); 1679 1680 /* Disable DMA engine for Tx/Rx */ 1681 hw->mac.ops.stop_hw(hw); 1682 1683 /* free any buffers still on the rings */ 1684 fm10k_clean_all_tx_rings(interface); 1685 fm10k_clean_all_rx_rings(interface); 1686 } 1687 1688 /** 1689 * fm10k_sw_init - Initialize general software structures 1690 * @interface: host interface private structure to initialize 1691 * 1692 * fm10k_sw_init initializes the interface private data structure. 1693 * Fields are initialized based on PCI device information and 1694 * OS network device settings (MTU size). 1695 **/ 1696 static int fm10k_sw_init(struct fm10k_intfc *interface, 1697 const struct pci_device_id *ent) 1698 { 1699 const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; 1700 struct fm10k_hw *hw = &interface->hw; 1701 struct pci_dev *pdev = interface->pdev; 1702 struct net_device *netdev = interface->netdev; 1703 u32 rss_key[FM10K_RSSRK_SIZE]; 1704 unsigned int rss; 1705 int err; 1706 1707 /* initialize back pointer */ 1708 hw->back = interface; 1709 hw->hw_addr = interface->uc_addr; 1710 1711 /* PCI config space info */ 1712 hw->vendor_id = pdev->vendor; 1713 hw->device_id = pdev->device; 1714 hw->revision_id = pdev->revision; 1715 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1716 hw->subsystem_device_id = pdev->subsystem_device; 1717 1718 /* Setup hw api */ 1719 memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops)); 1720 hw->mac.type = fi->mac; 1721 1722 /* Setup IOV handlers */ 1723 if (fi->iov_ops) 1724 memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops)); 1725 1726 /* Set common capability flags and settings */ 1727 rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus()); 1728 interface->ring_feature[RING_F_RSS].limit = rss; 1729 fi->get_invariants(hw); 1730 1731 /* pick up the PCIe bus settings for reporting later */ 1732 if (hw->mac.ops.get_bus_info) 1733 hw->mac.ops.get_bus_info(hw); 1734 1735 /* limit the usable DMA range */ 1736 if (hw->mac.ops.set_dma_mask) 1737 hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev)); 1738 1739 /* update netdev with DMA restrictions */ 1740 if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) { 1741 netdev->features |= NETIF_F_HIGHDMA; 1742 netdev->vlan_features |= NETIF_F_HIGHDMA; 1743 } 1744 1745 /* delay any future reset requests */ 1746 interface->last_reset = jiffies + (10 * HZ); 1747 1748 /* reset and initialize the hardware so it is in a known state */ 1749 err = hw->mac.ops.reset_hw(hw); 1750 if (err) { 1751 dev_err(&pdev->dev, "reset_hw failed: %d\n", err); 1752 return err; 1753 } 1754 1755 err = hw->mac.ops.init_hw(hw); 1756 if (err) { 1757 dev_err(&pdev->dev, "init_hw failed: %d\n", err); 1758 return err; 1759 } 1760 1761 /* initialize hardware statistics */ 1762 hw->mac.ops.update_hw_stats(hw, &interface->stats); 1763 1764 /* Set upper limit on IOV VFs that can be allocated */ 1765 pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs); 1766 1767 /* Start with random Ethernet address */ 1768 eth_random_addr(hw->mac.addr); 1769 1770 /* Initialize MAC address from hardware */ 1771 err = hw->mac.ops.read_mac_addr(hw); 1772 if (err) { 1773 dev_warn(&pdev->dev, 1774 "Failed to obtain MAC address defaulting to random\n"); 1775 /* tag address assignment as random */ 1776 netdev->addr_assign_type |= NET_ADDR_RANDOM; 1777 } 1778 1779 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 1780 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); 1781 1782 if (!is_valid_ether_addr(netdev->perm_addr)) { 1783 dev_err(&pdev->dev, "Invalid MAC Address\n"); 1784 return -EIO; 1785 } 1786 1787 /* assign BAR 4 resources for use with PTP */ 1788 if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED) 1789 interface->sw_addr = ioremap(pci_resource_start(pdev, 4), 1790 pci_resource_len(pdev, 4)); 1791 hw->sw_addr = interface->sw_addr; 1792 1793 /* initialize DCBNL interface */ 1794 fm10k_dcbnl_set_ops(netdev); 1795 1796 /* Initialize service timer and service task */ 1797 set_bit(__FM10K_SERVICE_DISABLE, &interface->state); 1798 setup_timer(&interface->service_timer, &fm10k_service_timer, 1799 (unsigned long)interface); 1800 INIT_WORK(&interface->service_task, fm10k_service_task); 1801 1802 /* kick off service timer now, even when interface is down */ 1803 mod_timer(&interface->service_timer, (HZ * 2) + jiffies); 1804 1805 /* Intitialize timestamp data */ 1806 fm10k_ts_init(interface); 1807 1808 /* set default ring sizes */ 1809 interface->tx_ring_count = FM10K_DEFAULT_TXD; 1810 interface->rx_ring_count = FM10K_DEFAULT_RXD; 1811 1812 /* set default interrupt moderation */ 1813 interface->tx_itr = FM10K_TX_ITR_DEFAULT; 1814 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; 1815 1816 /* initialize vxlan_port list */ 1817 INIT_LIST_HEAD(&interface->vxlan_port); 1818 1819 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 1820 memcpy(interface->rssrk, rss_key, sizeof(rss_key)); 1821 1822 /* Start off interface as being down */ 1823 set_bit(__FM10K_DOWN, &interface->state); 1824 1825 return 0; 1826 } 1827 1828 static void fm10k_slot_warn(struct fm10k_intfc *interface) 1829 { 1830 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 1831 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 1832 struct fm10k_hw *hw = &interface->hw; 1833 int max_gts = 0, expected_gts = 0; 1834 1835 if (pcie_get_minimum_link(interface->pdev, &speed, &width) || 1836 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { 1837 dev_warn(&interface->pdev->dev, 1838 "Unable to determine PCI Express bandwidth.\n"); 1839 return; 1840 } 1841 1842 switch (speed) { 1843 case PCIE_SPEED_2_5GT: 1844 /* 8b/10b encoding reduces max throughput by 20% */ 1845 max_gts = 2 * width; 1846 break; 1847 case PCIE_SPEED_5_0GT: 1848 /* 8b/10b encoding reduces max throughput by 20% */ 1849 max_gts = 4 * width; 1850 break; 1851 case PCIE_SPEED_8_0GT: 1852 /* 128b/130b encoding has less than 2% impact on throughput */ 1853 max_gts = 8 * width; 1854 break; 1855 default: 1856 dev_warn(&interface->pdev->dev, 1857 "Unable to determine PCI Express bandwidth.\n"); 1858 return; 1859 } 1860 1861 dev_info(&interface->pdev->dev, 1862 "PCI Express bandwidth of %dGT/s available\n", 1863 max_gts); 1864 dev_info(&interface->pdev->dev, 1865 "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n", 1866 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : 1867 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : 1868 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : 1869 "Unknown"), 1870 hw->bus.width, 1871 (speed == PCIE_SPEED_2_5GT ? "20%" : 1872 speed == PCIE_SPEED_5_0GT ? "20%" : 1873 speed == PCIE_SPEED_8_0GT ? "<2%" : 1874 "Unknown"), 1875 (hw->bus.payload == fm10k_bus_payload_128 ? "128B" : 1876 hw->bus.payload == fm10k_bus_payload_256 ? "256B" : 1877 hw->bus.payload == fm10k_bus_payload_512 ? "512B" : 1878 "Unknown")); 1879 1880 switch (hw->bus_caps.speed) { 1881 case fm10k_bus_speed_2500: 1882 /* 8b/10b encoding reduces max throughput by 20% */ 1883 expected_gts = 2 * hw->bus_caps.width; 1884 break; 1885 case fm10k_bus_speed_5000: 1886 /* 8b/10b encoding reduces max throughput by 20% */ 1887 expected_gts = 4 * hw->bus_caps.width; 1888 break; 1889 case fm10k_bus_speed_8000: 1890 /* 128b/130b encoding has less than 2% impact on throughput */ 1891 expected_gts = 8 * hw->bus_caps.width; 1892 break; 1893 default: 1894 dev_warn(&interface->pdev->dev, 1895 "Unable to determine expected PCI Express bandwidth.\n"); 1896 return; 1897 } 1898 1899 if (max_gts >= expected_gts) 1900 return; 1901 1902 dev_warn(&interface->pdev->dev, 1903 "This device requires %dGT/s of bandwidth for optimal performance.\n", 1904 expected_gts); 1905 dev_warn(&interface->pdev->dev, 1906 "A %sslot with x%d lanes is suggested.\n", 1907 (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " : 1908 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " : 1909 hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""), 1910 hw->bus_caps.width); 1911 } 1912 1913 /** 1914 * fm10k_probe - Device Initialization Routine 1915 * @pdev: PCI device information struct 1916 * @ent: entry in fm10k_pci_tbl 1917 * 1918 * Returns 0 on success, negative on failure 1919 * 1920 * fm10k_probe initializes an interface identified by a pci_dev structure. 1921 * The OS initialization, configuring of the interface private structure, 1922 * and a hardware reset occur. 1923 **/ 1924 static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1925 { 1926 struct net_device *netdev; 1927 struct fm10k_intfc *interface; 1928 int err; 1929 1930 err = pci_enable_device_mem(pdev); 1931 if (err) 1932 return err; 1933 1934 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 1935 if (err) 1936 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1937 if (err) { 1938 dev_err(&pdev->dev, 1939 "DMA configuration failed: %d\n", err); 1940 goto err_dma; 1941 } 1942 1943 err = pci_request_selected_regions(pdev, 1944 pci_select_bars(pdev, 1945 IORESOURCE_MEM), 1946 fm10k_driver_name); 1947 if (err) { 1948 dev_err(&pdev->dev, 1949 "pci_request_selected_regions failed: %d\n", err); 1950 goto err_pci_reg; 1951 } 1952 1953 pci_enable_pcie_error_reporting(pdev); 1954 1955 pci_set_master(pdev); 1956 pci_save_state(pdev); 1957 1958 netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]); 1959 if (!netdev) { 1960 err = -ENOMEM; 1961 goto err_alloc_netdev; 1962 } 1963 1964 SET_NETDEV_DEV(netdev, &pdev->dev); 1965 1966 interface = netdev_priv(netdev); 1967 pci_set_drvdata(pdev, interface); 1968 1969 interface->netdev = netdev; 1970 interface->pdev = pdev; 1971 1972 interface->uc_addr = ioremap(pci_resource_start(pdev, 0), 1973 FM10K_UC_ADDR_SIZE); 1974 if (!interface->uc_addr) { 1975 err = -EIO; 1976 goto err_ioremap; 1977 } 1978 1979 err = fm10k_sw_init(interface, ent); 1980 if (err) 1981 goto err_sw_init; 1982 1983 /* enable debugfs support */ 1984 fm10k_dbg_intfc_init(interface); 1985 1986 err = fm10k_init_queueing_scheme(interface); 1987 if (err) 1988 goto err_sw_init; 1989 1990 err = fm10k_mbx_request_irq(interface); 1991 if (err) 1992 goto err_mbx_interrupt; 1993 1994 /* final check of hardware state before registering the interface */ 1995 err = fm10k_hw_ready(interface); 1996 if (err) 1997 goto err_register; 1998 1999 err = register_netdev(netdev); 2000 if (err) 2001 goto err_register; 2002 2003 /* carrier off reporting is important to ethtool even BEFORE open */ 2004 netif_carrier_off(netdev); 2005 2006 /* stop all the transmit queues from transmitting until link is up */ 2007 netif_tx_stop_all_queues(netdev); 2008 2009 /* Register PTP interface */ 2010 fm10k_ptp_register(interface); 2011 2012 /* print warning for non-optimal configurations */ 2013 fm10k_slot_warn(interface); 2014 2015 /* report MAC address for logging */ 2016 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); 2017 2018 /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ 2019 fm10k_iov_configure(pdev, 0); 2020 2021 /* clear the service task disable bit to allow service task to start */ 2022 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); 2023 2024 return 0; 2025 2026 err_register: 2027 fm10k_mbx_free_irq(interface); 2028 err_mbx_interrupt: 2029 fm10k_clear_queueing_scheme(interface); 2030 err_sw_init: 2031 if (interface->sw_addr) 2032 iounmap(interface->sw_addr); 2033 iounmap(interface->uc_addr); 2034 err_ioremap: 2035 free_netdev(netdev); 2036 err_alloc_netdev: 2037 pci_release_selected_regions(pdev, 2038 pci_select_bars(pdev, IORESOURCE_MEM)); 2039 err_pci_reg: 2040 err_dma: 2041 pci_disable_device(pdev); 2042 return err; 2043 } 2044 2045 /** 2046 * fm10k_remove - Device Removal Routine 2047 * @pdev: PCI device information struct 2048 * 2049 * fm10k_remove is called by the PCI subsystem to alert the driver 2050 * that it should release a PCI device. The could be caused by a 2051 * Hot-Plug event, or because the driver is going to be removed from 2052 * memory. 2053 **/ 2054 static void fm10k_remove(struct pci_dev *pdev) 2055 { 2056 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2057 struct net_device *netdev = interface->netdev; 2058 2059 del_timer_sync(&interface->service_timer); 2060 2061 set_bit(__FM10K_SERVICE_DISABLE, &interface->state); 2062 cancel_work_sync(&interface->service_task); 2063 2064 /* free netdev, this may bounce the interrupts due to setup_tc */ 2065 if (netdev->reg_state == NETREG_REGISTERED) 2066 unregister_netdev(netdev); 2067 2068 /* cleanup timestamp handling */ 2069 fm10k_ptp_unregister(interface); 2070 2071 /* release VFs */ 2072 fm10k_iov_disable(pdev); 2073 2074 /* disable mailbox interrupt */ 2075 fm10k_mbx_free_irq(interface); 2076 2077 /* free interrupts */ 2078 fm10k_clear_queueing_scheme(interface); 2079 2080 /* remove any debugfs interfaces */ 2081 fm10k_dbg_intfc_exit(interface); 2082 2083 if (interface->sw_addr) 2084 iounmap(interface->sw_addr); 2085 iounmap(interface->uc_addr); 2086 2087 free_netdev(netdev); 2088 2089 pci_release_selected_regions(pdev, 2090 pci_select_bars(pdev, IORESOURCE_MEM)); 2091 2092 pci_disable_pcie_error_reporting(pdev); 2093 2094 pci_disable_device(pdev); 2095 } 2096 2097 #ifdef CONFIG_PM 2098 /** 2099 * fm10k_resume - Restore device to pre-sleep state 2100 * @pdev: PCI device information struct 2101 * 2102 * fm10k_resume is called after the system has powered back up from a sleep 2103 * state and is ready to resume operation. This function is meant to restore 2104 * the device back to its pre-sleep state. 2105 **/ 2106 static int fm10k_resume(struct pci_dev *pdev) 2107 { 2108 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2109 struct net_device *netdev = interface->netdev; 2110 struct fm10k_hw *hw = &interface->hw; 2111 u32 err; 2112 2113 pci_set_power_state(pdev, PCI_D0); 2114 pci_restore_state(pdev); 2115 2116 /* pci_restore_state clears dev->state_saved so call 2117 * pci_save_state to restore it. 2118 */ 2119 pci_save_state(pdev); 2120 2121 err = pci_enable_device_mem(pdev); 2122 if (err) { 2123 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 2124 return err; 2125 } 2126 pci_set_master(pdev); 2127 2128 pci_wake_from_d3(pdev, false); 2129 2130 /* refresh hw_addr in case it was dropped */ 2131 hw->hw_addr = interface->uc_addr; 2132 2133 /* reset hardware to known state */ 2134 err = hw->mac.ops.init_hw(&interface->hw); 2135 if (err) { 2136 dev_err(&pdev->dev, "init_hw failed: %d\n", err); 2137 return err; 2138 } 2139 2140 /* reset statistics starting values */ 2141 hw->mac.ops.rebind_hw_stats(hw, &interface->stats); 2142 2143 /* reset clock */ 2144 fm10k_ts_reset(interface); 2145 2146 rtnl_lock(); 2147 2148 err = fm10k_init_queueing_scheme(interface); 2149 if (err) 2150 goto err_queueing_scheme; 2151 2152 err = fm10k_mbx_request_irq(interface); 2153 if (err) 2154 goto err_mbx_irq; 2155 2156 err = fm10k_hw_ready(interface); 2157 if (err) 2158 goto err_open; 2159 2160 err = netif_running(netdev) ? fm10k_open(netdev) : 0; 2161 if (err) 2162 goto err_open; 2163 2164 rtnl_unlock(); 2165 2166 /* assume host is not ready, to prevent race with watchdog in case we 2167 * actually don't have connection to the switch 2168 */ 2169 interface->host_ready = false; 2170 fm10k_watchdog_host_not_ready(interface); 2171 2172 /* clear the service task disable bit to allow service task to start */ 2173 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); 2174 fm10k_service_event_schedule(interface); 2175 2176 /* restore SR-IOV interface */ 2177 fm10k_iov_resume(pdev); 2178 2179 netif_device_attach(netdev); 2180 2181 return 0; 2182 err_open: 2183 fm10k_mbx_free_irq(interface); 2184 err_mbx_irq: 2185 fm10k_clear_queueing_scheme(interface); 2186 err_queueing_scheme: 2187 rtnl_unlock(); 2188 2189 return err; 2190 } 2191 2192 /** 2193 * fm10k_suspend - Prepare the device for a system sleep state 2194 * @pdev: PCI device information struct 2195 * 2196 * fm10k_suspend is meant to shutdown the device prior to the system entering 2197 * a sleep state. The fm10k hardware does not support wake on lan so the 2198 * driver simply needs to shut down the device so it is in a low power state. 2199 **/ 2200 static int fm10k_suspend(struct pci_dev *pdev, 2201 pm_message_t __always_unused state) 2202 { 2203 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2204 struct net_device *netdev = interface->netdev; 2205 int err = 0; 2206 2207 netif_device_detach(netdev); 2208 2209 fm10k_iov_suspend(pdev); 2210 2211 /* the watchdog tasks may read registers, which will appear like a 2212 * surprise-remove event once the PCI device is disabled. This will 2213 * cause us to close the netdevice, so we don't retain the open/closed 2214 * state post-resume. Prevent this by disabling the service task while 2215 * suspended, until we actually resume. 2216 */ 2217 set_bit(__FM10K_SERVICE_DISABLE, &interface->state); 2218 cancel_work_sync(&interface->service_task); 2219 2220 rtnl_lock(); 2221 2222 if (netif_running(netdev)) 2223 fm10k_close(netdev); 2224 2225 fm10k_mbx_free_irq(interface); 2226 2227 fm10k_clear_queueing_scheme(interface); 2228 2229 rtnl_unlock(); 2230 2231 err = pci_save_state(pdev); 2232 if (err) 2233 return err; 2234 2235 pci_disable_device(pdev); 2236 pci_wake_from_d3(pdev, false); 2237 pci_set_power_state(pdev, PCI_D3hot); 2238 2239 return 0; 2240 } 2241 2242 #endif /* CONFIG_PM */ 2243 /** 2244 * fm10k_io_error_detected - called when PCI error is detected 2245 * @pdev: Pointer to PCI device 2246 * @state: The current pci connection state 2247 * 2248 * This function is called after a PCI bus error affecting 2249 * this device has been detected. 2250 */ 2251 static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, 2252 pci_channel_state_t state) 2253 { 2254 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2255 struct net_device *netdev = interface->netdev; 2256 2257 netif_device_detach(netdev); 2258 2259 if (state == pci_channel_io_perm_failure) 2260 return PCI_ERS_RESULT_DISCONNECT; 2261 2262 if (netif_running(netdev)) 2263 fm10k_close(netdev); 2264 2265 /* free interrupts */ 2266 fm10k_clear_queueing_scheme(interface); 2267 2268 fm10k_mbx_free_irq(interface); 2269 2270 pci_disable_device(pdev); 2271 2272 /* Request a slot reset. */ 2273 return PCI_ERS_RESULT_NEED_RESET; 2274 } 2275 2276 /** 2277 * fm10k_io_slot_reset - called after the pci bus has been reset. 2278 * @pdev: Pointer to PCI device 2279 * 2280 * Restart the card from scratch, as if from a cold-boot. 2281 */ 2282 static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) 2283 { 2284 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2285 pci_ers_result_t result; 2286 2287 if (pci_enable_device_mem(pdev)) { 2288 dev_err(&pdev->dev, 2289 "Cannot re-enable PCI device after reset.\n"); 2290 result = PCI_ERS_RESULT_DISCONNECT; 2291 } else { 2292 pci_set_master(pdev); 2293 pci_restore_state(pdev); 2294 2295 /* After second error pci->state_saved is false, this 2296 * resets it so EEH doesn't break. 2297 */ 2298 pci_save_state(pdev); 2299 2300 pci_wake_from_d3(pdev, false); 2301 2302 /* refresh hw_addr in case it was dropped */ 2303 interface->hw.hw_addr = interface->uc_addr; 2304 2305 interface->flags |= FM10K_FLAG_RESET_REQUESTED; 2306 fm10k_service_event_schedule(interface); 2307 2308 result = PCI_ERS_RESULT_RECOVERED; 2309 } 2310 2311 pci_cleanup_aer_uncorrect_error_status(pdev); 2312 2313 return result; 2314 } 2315 2316 /** 2317 * fm10k_io_resume - called when traffic can start flowing again. 2318 * @pdev: Pointer to PCI device 2319 * 2320 * This callback is called when the error recovery driver tells us that 2321 * its OK to resume normal operation. 2322 */ 2323 static void fm10k_io_resume(struct pci_dev *pdev) 2324 { 2325 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2326 struct net_device *netdev = interface->netdev; 2327 struct fm10k_hw *hw = &interface->hw; 2328 int err = 0; 2329 2330 /* reset hardware to known state */ 2331 err = hw->mac.ops.init_hw(&interface->hw); 2332 if (err) { 2333 dev_err(&pdev->dev, "init_hw failed: %d\n", err); 2334 return; 2335 } 2336 2337 /* reset statistics starting values */ 2338 hw->mac.ops.rebind_hw_stats(hw, &interface->stats); 2339 2340 err = fm10k_init_queueing_scheme(interface); 2341 if (err) { 2342 dev_err(&interface->pdev->dev, 2343 "init_queueing_scheme failed: %d\n", err); 2344 return; 2345 } 2346 2347 /* reassociate interrupts */ 2348 fm10k_mbx_request_irq(interface); 2349 2350 /* reset clock */ 2351 fm10k_ts_reset(interface); 2352 2353 if (netif_running(netdev)) 2354 err = fm10k_open(netdev); 2355 2356 /* final check of hardware state before registering the interface */ 2357 err = err ? : fm10k_hw_ready(interface); 2358 2359 if (!err) 2360 netif_device_attach(netdev); 2361 } 2362 2363 static const struct pci_error_handlers fm10k_err_handler = { 2364 .error_detected = fm10k_io_error_detected, 2365 .slot_reset = fm10k_io_slot_reset, 2366 .resume = fm10k_io_resume, 2367 }; 2368 2369 static struct pci_driver fm10k_driver = { 2370 .name = fm10k_driver_name, 2371 .id_table = fm10k_pci_tbl, 2372 .probe = fm10k_probe, 2373 .remove = fm10k_remove, 2374 #ifdef CONFIG_PM 2375 .suspend = fm10k_suspend, 2376 .resume = fm10k_resume, 2377 #endif 2378 .sriov_configure = fm10k_iov_configure, 2379 .err_handler = &fm10k_err_handler 2380 }; 2381 2382 /** 2383 * fm10k_register_pci_driver - register driver interface 2384 * 2385 * This funciton is called on module load in order to register the driver. 2386 **/ 2387 int fm10k_register_pci_driver(void) 2388 { 2389 return pci_register_driver(&fm10k_driver); 2390 } 2391 2392 /** 2393 * fm10k_unregister_pci_driver - unregister driver interface 2394 * 2395 * This funciton is called on module unload in order to remove the driver. 2396 **/ 2397 void fm10k_unregister_pci_driver(void) 2398 { 2399 pci_unregister_driver(&fm10k_driver); 2400 } 2401