1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static void iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_pdev_to_adapter - go from pci_dev to adapter 56 * @pdev: pci_dev pointer 57 */ 58 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) 59 { 60 return netdev_priv(pci_get_drvdata(pdev)); 61 } 62 63 /** 64 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 65 * @hw: pointer to the HW structure 66 * @mem: ptr to mem struct to fill out 67 * @size: size of memory requested 68 * @alignment: what to align the allocation to 69 **/ 70 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 71 struct iavf_dma_mem *mem, 72 u64 size, u32 alignment) 73 { 74 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 75 76 if (!mem) 77 return IAVF_ERR_PARAM; 78 79 mem->size = ALIGN(size, alignment); 80 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 81 (dma_addr_t *)&mem->pa, GFP_KERNEL); 82 if (mem->va) 83 return 0; 84 else 85 return IAVF_ERR_NO_MEMORY; 86 } 87 88 /** 89 * iavf_free_dma_mem_d - OS specific memory free for shared code 90 * @hw: pointer to the HW structure 91 * @mem: ptr to mem struct to free 92 **/ 93 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 94 struct iavf_dma_mem *mem) 95 { 96 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 97 98 if (!mem || !mem->va) 99 return IAVF_ERR_PARAM; 100 dma_free_coherent(&adapter->pdev->dev, mem->size, 101 mem->va, (dma_addr_t)mem->pa); 102 return 0; 103 } 104 105 /** 106 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 107 * @hw: pointer to the HW structure 108 * @mem: ptr to mem struct to fill out 109 * @size: size of memory requested 110 **/ 111 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 112 struct iavf_virt_mem *mem, u32 size) 113 { 114 if (!mem) 115 return IAVF_ERR_PARAM; 116 117 mem->size = size; 118 mem->va = kzalloc(size, GFP_KERNEL); 119 120 if (mem->va) 121 return 0; 122 else 123 return IAVF_ERR_NO_MEMORY; 124 } 125 126 /** 127 * iavf_free_virt_mem_d - OS specific memory free for shared code 128 * @hw: pointer to the HW structure 129 * @mem: ptr to mem struct to free 130 **/ 131 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 132 struct iavf_virt_mem *mem) 133 { 134 if (!mem) 135 return IAVF_ERR_PARAM; 136 137 /* it's ok to kfree a NULL pointer */ 138 kfree(mem->va); 139 140 return 0; 141 } 142 143 /** 144 * iavf_lock_timeout - try to lock mutex but give up after timeout 145 * @lock: mutex that should be locked 146 * @msecs: timeout in msecs 147 * 148 * Returns 0 on success, negative on failure 149 **/ 150 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) 151 { 152 unsigned int wait, delay = 10; 153 154 for (wait = 0; wait < msecs; wait += delay) { 155 if (mutex_trylock(lock)) 156 return 0; 157 158 msleep(delay); 159 } 160 161 return -1; 162 } 163 164 /** 165 * iavf_schedule_reset - Set the flags and schedule a reset event 166 * @adapter: board private structure 167 **/ 168 void iavf_schedule_reset(struct iavf_adapter *adapter) 169 { 170 if (!(adapter->flags & 171 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 172 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 173 queue_work(iavf_wq, &adapter->reset_task); 174 } 175 } 176 177 /** 178 * iavf_schedule_request_stats - Set the flags and schedule statistics request 179 * @adapter: board private structure 180 * 181 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly 182 * request and refresh ethtool stats 183 **/ 184 void iavf_schedule_request_stats(struct iavf_adapter *adapter) 185 { 186 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; 187 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 188 } 189 190 /** 191 * iavf_tx_timeout - Respond to a Tx Hang 192 * @netdev: network interface device structure 193 * @txqueue: queue number that is timing out 194 **/ 195 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 196 { 197 struct iavf_adapter *adapter = netdev_priv(netdev); 198 199 adapter->tx_timeout_count++; 200 iavf_schedule_reset(adapter); 201 } 202 203 /** 204 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 205 * @adapter: board private structure 206 **/ 207 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 208 { 209 struct iavf_hw *hw = &adapter->hw; 210 211 if (!adapter->msix_entries) 212 return; 213 214 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 215 216 iavf_flush(hw); 217 218 synchronize_irq(adapter->msix_entries[0].vector); 219 } 220 221 /** 222 * iavf_misc_irq_enable - Enable default interrupt generation settings 223 * @adapter: board private structure 224 **/ 225 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 226 { 227 struct iavf_hw *hw = &adapter->hw; 228 229 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 230 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 231 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 232 233 iavf_flush(hw); 234 } 235 236 /** 237 * iavf_irq_disable - Mask off interrupt generation on the NIC 238 * @adapter: board private structure 239 **/ 240 static void iavf_irq_disable(struct iavf_adapter *adapter) 241 { 242 int i; 243 struct iavf_hw *hw = &adapter->hw; 244 245 if (!adapter->msix_entries) 246 return; 247 248 for (i = 1; i < adapter->num_msix_vectors; i++) { 249 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 250 synchronize_irq(adapter->msix_entries[i].vector); 251 } 252 iavf_flush(hw); 253 } 254 255 /** 256 * iavf_irq_enable_queues - Enable interrupt for specified queues 257 * @adapter: board private structure 258 * @mask: bitmap of queues to enable 259 **/ 260 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 261 { 262 struct iavf_hw *hw = &adapter->hw; 263 int i; 264 265 for (i = 1; i < adapter->num_msix_vectors; i++) { 266 if (mask & BIT(i - 1)) { 267 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 268 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 269 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 270 } 271 } 272 } 273 274 /** 275 * iavf_irq_enable - Enable default interrupt generation settings 276 * @adapter: board private structure 277 * @flush: boolean value whether to run rd32() 278 **/ 279 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 280 { 281 struct iavf_hw *hw = &adapter->hw; 282 283 iavf_misc_irq_enable(adapter); 284 iavf_irq_enable_queues(adapter, ~0); 285 286 if (flush) 287 iavf_flush(hw); 288 } 289 290 /** 291 * iavf_msix_aq - Interrupt handler for vector 0 292 * @irq: interrupt number 293 * @data: pointer to netdev 294 **/ 295 static irqreturn_t iavf_msix_aq(int irq, void *data) 296 { 297 struct net_device *netdev = data; 298 struct iavf_adapter *adapter = netdev_priv(netdev); 299 struct iavf_hw *hw = &adapter->hw; 300 301 /* handle non-queue interrupts, these reads clear the registers */ 302 rd32(hw, IAVF_VFINT_ICR01); 303 rd32(hw, IAVF_VFINT_ICR0_ENA1); 304 305 /* schedule work on the private workqueue */ 306 queue_work(iavf_wq, &adapter->adminq_task); 307 308 return IRQ_HANDLED; 309 } 310 311 /** 312 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 313 * @irq: interrupt number 314 * @data: pointer to a q_vector 315 **/ 316 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 317 { 318 struct iavf_q_vector *q_vector = data; 319 320 if (!q_vector->tx.ring && !q_vector->rx.ring) 321 return IRQ_HANDLED; 322 323 napi_schedule_irqoff(&q_vector->napi); 324 325 return IRQ_HANDLED; 326 } 327 328 /** 329 * iavf_map_vector_to_rxq - associate irqs with rx queues 330 * @adapter: board private structure 331 * @v_idx: interrupt number 332 * @r_idx: queue number 333 **/ 334 static void 335 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 336 { 337 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 338 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 339 struct iavf_hw *hw = &adapter->hw; 340 341 rx_ring->q_vector = q_vector; 342 rx_ring->next = q_vector->rx.ring; 343 rx_ring->vsi = &adapter->vsi; 344 q_vector->rx.ring = rx_ring; 345 q_vector->rx.count++; 346 q_vector->rx.next_update = jiffies + 1; 347 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 348 q_vector->ring_mask |= BIT(r_idx); 349 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 350 q_vector->rx.current_itr >> 1); 351 q_vector->rx.current_itr = q_vector->rx.target_itr; 352 } 353 354 /** 355 * iavf_map_vector_to_txq - associate irqs with tx queues 356 * @adapter: board private structure 357 * @v_idx: interrupt number 358 * @t_idx: queue number 359 **/ 360 static void 361 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 362 { 363 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 364 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 365 struct iavf_hw *hw = &adapter->hw; 366 367 tx_ring->q_vector = q_vector; 368 tx_ring->next = q_vector->tx.ring; 369 tx_ring->vsi = &adapter->vsi; 370 q_vector->tx.ring = tx_ring; 371 q_vector->tx.count++; 372 q_vector->tx.next_update = jiffies + 1; 373 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 374 q_vector->num_ringpairs++; 375 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 376 q_vector->tx.target_itr >> 1); 377 q_vector->tx.current_itr = q_vector->tx.target_itr; 378 } 379 380 /** 381 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 382 * @adapter: board private structure to initialize 383 * 384 * This function maps descriptor rings to the queue-specific vectors 385 * we were allotted through the MSI-X enabling code. Ideally, we'd have 386 * one vector per ring/queue, but on a constrained vector budget, we 387 * group the rings as "efficiently" as possible. You would add new 388 * mapping configurations in here. 389 **/ 390 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 391 { 392 int rings_remaining = adapter->num_active_queues; 393 int ridx = 0, vidx = 0; 394 int q_vectors; 395 396 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 397 398 for (; ridx < rings_remaining; ridx++) { 399 iavf_map_vector_to_rxq(adapter, vidx, ridx); 400 iavf_map_vector_to_txq(adapter, vidx, ridx); 401 402 /* In the case where we have more queues than vectors, continue 403 * round-robin on vectors until all queues are mapped. 404 */ 405 if (++vidx >= q_vectors) 406 vidx = 0; 407 } 408 409 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 410 } 411 412 /** 413 * iavf_irq_affinity_notify - Callback for affinity changes 414 * @notify: context as to what irq was changed 415 * @mask: the new affinity mask 416 * 417 * This is a callback function used by the irq_set_affinity_notifier function 418 * so that we may register to receive changes to the irq affinity masks. 419 **/ 420 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 421 const cpumask_t *mask) 422 { 423 struct iavf_q_vector *q_vector = 424 container_of(notify, struct iavf_q_vector, affinity_notify); 425 426 cpumask_copy(&q_vector->affinity_mask, mask); 427 } 428 429 /** 430 * iavf_irq_affinity_release - Callback for affinity notifier release 431 * @ref: internal core kernel usage 432 * 433 * This is a callback function used by the irq_set_affinity_notifier function 434 * to inform the current notification subscriber that they will no longer 435 * receive notifications. 436 **/ 437 static void iavf_irq_affinity_release(struct kref *ref) {} 438 439 /** 440 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 441 * @adapter: board private structure 442 * @basename: device basename 443 * 444 * Allocates MSI-X vectors for tx and rx handling, and requests 445 * interrupts from the kernel. 446 **/ 447 static int 448 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 449 { 450 unsigned int vector, q_vectors; 451 unsigned int rx_int_idx = 0, tx_int_idx = 0; 452 int irq_num, err; 453 int cpu; 454 455 iavf_irq_disable(adapter); 456 /* Decrement for Other and TCP Timer vectors */ 457 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 458 459 for (vector = 0; vector < q_vectors; vector++) { 460 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 461 462 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 463 464 if (q_vector->tx.ring && q_vector->rx.ring) { 465 snprintf(q_vector->name, sizeof(q_vector->name), 466 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 467 tx_int_idx++; 468 } else if (q_vector->rx.ring) { 469 snprintf(q_vector->name, sizeof(q_vector->name), 470 "iavf-%s-rx-%d", basename, rx_int_idx++); 471 } else if (q_vector->tx.ring) { 472 snprintf(q_vector->name, sizeof(q_vector->name), 473 "iavf-%s-tx-%d", basename, tx_int_idx++); 474 } else { 475 /* skip this unused q_vector */ 476 continue; 477 } 478 err = request_irq(irq_num, 479 iavf_msix_clean_rings, 480 0, 481 q_vector->name, 482 q_vector); 483 if (err) { 484 dev_info(&adapter->pdev->dev, 485 "Request_irq failed, error: %d\n", err); 486 goto free_queue_irqs; 487 } 488 /* register for affinity change notifications */ 489 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 490 q_vector->affinity_notify.release = 491 iavf_irq_affinity_release; 492 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 493 /* Spread the IRQ affinity hints across online CPUs. Note that 494 * get_cpu_mask returns a mask with a permanent lifetime so 495 * it's safe to use as a hint for irq_set_affinity_hint. 496 */ 497 cpu = cpumask_local_spread(q_vector->v_idx, -1); 498 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 499 } 500 501 return 0; 502 503 free_queue_irqs: 504 while (vector) { 505 vector--; 506 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 507 irq_set_affinity_notifier(irq_num, NULL); 508 irq_set_affinity_hint(irq_num, NULL); 509 free_irq(irq_num, &adapter->q_vectors[vector]); 510 } 511 return err; 512 } 513 514 /** 515 * iavf_request_misc_irq - Initialize MSI-X interrupts 516 * @adapter: board private structure 517 * 518 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 519 * vector is only for the admin queue, and stays active even when the netdev 520 * is closed. 521 **/ 522 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 523 { 524 struct net_device *netdev = adapter->netdev; 525 int err; 526 527 snprintf(adapter->misc_vector_name, 528 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 529 dev_name(&adapter->pdev->dev)); 530 err = request_irq(adapter->msix_entries[0].vector, 531 &iavf_msix_aq, 0, 532 adapter->misc_vector_name, netdev); 533 if (err) { 534 dev_err(&adapter->pdev->dev, 535 "request_irq for %s failed: %d\n", 536 adapter->misc_vector_name, err); 537 free_irq(adapter->msix_entries[0].vector, netdev); 538 } 539 return err; 540 } 541 542 /** 543 * iavf_free_traffic_irqs - Free MSI-X interrupts 544 * @adapter: board private structure 545 * 546 * Frees all MSI-X vectors other than 0. 547 **/ 548 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 549 { 550 int vector, irq_num, q_vectors; 551 552 if (!adapter->msix_entries) 553 return; 554 555 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 556 557 for (vector = 0; vector < q_vectors; vector++) { 558 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 559 irq_set_affinity_notifier(irq_num, NULL); 560 irq_set_affinity_hint(irq_num, NULL); 561 free_irq(irq_num, &adapter->q_vectors[vector]); 562 } 563 } 564 565 /** 566 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 567 * @adapter: board private structure 568 * 569 * Frees MSI-X vector 0. 570 **/ 571 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 572 { 573 struct net_device *netdev = adapter->netdev; 574 575 if (!adapter->msix_entries) 576 return; 577 578 free_irq(adapter->msix_entries[0].vector, netdev); 579 } 580 581 /** 582 * iavf_configure_tx - Configure Transmit Unit after Reset 583 * @adapter: board private structure 584 * 585 * Configure the Tx unit of the MAC after a reset. 586 **/ 587 static void iavf_configure_tx(struct iavf_adapter *adapter) 588 { 589 struct iavf_hw *hw = &adapter->hw; 590 int i; 591 592 for (i = 0; i < adapter->num_active_queues; i++) 593 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 594 } 595 596 /** 597 * iavf_configure_rx - Configure Receive Unit after Reset 598 * @adapter: board private structure 599 * 600 * Configure the Rx unit of the MAC after a reset. 601 **/ 602 static void iavf_configure_rx(struct iavf_adapter *adapter) 603 { 604 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 605 struct iavf_hw *hw = &adapter->hw; 606 int i; 607 608 /* Legacy Rx will always default to a 2048 buffer size. */ 609 #if (PAGE_SIZE < 8192) 610 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 611 struct net_device *netdev = adapter->netdev; 612 613 /* For jumbo frames on systems with 4K pages we have to use 614 * an order 1 page, so we might as well increase the size 615 * of our Rx buffer to make better use of the available space 616 */ 617 rx_buf_len = IAVF_RXBUFFER_3072; 618 619 /* We use a 1536 buffer size for configurations with 620 * standard Ethernet mtu. On x86 this gives us enough room 621 * for shared info and 192 bytes of padding. 622 */ 623 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 624 (netdev->mtu <= ETH_DATA_LEN)) 625 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 626 } 627 #endif 628 629 for (i = 0; i < adapter->num_active_queues; i++) { 630 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 631 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 632 633 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 634 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 635 else 636 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 637 } 638 } 639 640 /** 641 * iavf_find_vlan - Search filter list for specific vlan filter 642 * @adapter: board private structure 643 * @vlan: vlan tag 644 * 645 * Returns ptr to the filter object or NULL. Must be called while holding the 646 * mac_vlan_list_lock. 647 **/ 648 static struct 649 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 650 { 651 struct iavf_vlan_filter *f; 652 653 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 654 if (vlan == f->vlan) 655 return f; 656 } 657 return NULL; 658 } 659 660 /** 661 * iavf_add_vlan - Add a vlan filter to the list 662 * @adapter: board private structure 663 * @vlan: VLAN tag 664 * 665 * Returns ptr to the filter object or NULL when no memory available. 666 **/ 667 static struct 668 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 669 { 670 struct iavf_vlan_filter *f = NULL; 671 672 spin_lock_bh(&adapter->mac_vlan_list_lock); 673 674 f = iavf_find_vlan(adapter, vlan); 675 if (!f) { 676 f = kzalloc(sizeof(*f), GFP_ATOMIC); 677 if (!f) 678 goto clearout; 679 680 f->vlan = vlan; 681 682 list_add_tail(&f->list, &adapter->vlan_filter_list); 683 f->add = true; 684 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 685 } 686 687 clearout: 688 spin_unlock_bh(&adapter->mac_vlan_list_lock); 689 return f; 690 } 691 692 /** 693 * iavf_del_vlan - Remove a vlan filter from the list 694 * @adapter: board private structure 695 * @vlan: VLAN tag 696 **/ 697 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 698 { 699 struct iavf_vlan_filter *f; 700 701 spin_lock_bh(&adapter->mac_vlan_list_lock); 702 703 f = iavf_find_vlan(adapter, vlan); 704 if (f) { 705 f->remove = true; 706 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 707 } 708 709 spin_unlock_bh(&adapter->mac_vlan_list_lock); 710 } 711 712 /** 713 * iavf_restore_filters 714 * @adapter: board private structure 715 * 716 * Restore existing non MAC filters when VF netdev comes back up 717 **/ 718 static void iavf_restore_filters(struct iavf_adapter *adapter) 719 { 720 u16 vid; 721 722 /* re-add all VLAN filters */ 723 for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) 724 iavf_add_vlan(adapter, vid); 725 } 726 727 /** 728 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 729 * @netdev: network device struct 730 * @proto: unused protocol data 731 * @vid: VLAN tag 732 **/ 733 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 734 __always_unused __be16 proto, u16 vid) 735 { 736 struct iavf_adapter *adapter = netdev_priv(netdev); 737 738 if (!VLAN_ALLOWED(adapter)) 739 return -EIO; 740 741 if (iavf_add_vlan(adapter, vid) == NULL) 742 return -ENOMEM; 743 744 set_bit(vid, adapter->vsi.active_vlans); 745 return 0; 746 } 747 748 /** 749 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 750 * @netdev: network device struct 751 * @proto: unused protocol data 752 * @vid: VLAN tag 753 **/ 754 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 755 __always_unused __be16 proto, u16 vid) 756 { 757 struct iavf_adapter *adapter = netdev_priv(netdev); 758 759 iavf_del_vlan(adapter, vid); 760 clear_bit(vid, adapter->vsi.active_vlans); 761 762 return 0; 763 } 764 765 /** 766 * iavf_find_filter - Search filter list for specific mac filter 767 * @adapter: board private structure 768 * @macaddr: the MAC address 769 * 770 * Returns ptr to the filter object or NULL. Must be called while holding the 771 * mac_vlan_list_lock. 772 **/ 773 static struct 774 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 775 const u8 *macaddr) 776 { 777 struct iavf_mac_filter *f; 778 779 if (!macaddr) 780 return NULL; 781 782 list_for_each_entry(f, &adapter->mac_filter_list, list) { 783 if (ether_addr_equal(macaddr, f->macaddr)) 784 return f; 785 } 786 return NULL; 787 } 788 789 /** 790 * iavf_add_filter - Add a mac filter to the filter list 791 * @adapter: board private structure 792 * @macaddr: the MAC address 793 * 794 * Returns ptr to the filter object or NULL when no memory available. 795 **/ 796 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 797 const u8 *macaddr) 798 { 799 struct iavf_mac_filter *f; 800 801 if (!macaddr) 802 return NULL; 803 804 f = iavf_find_filter(adapter, macaddr); 805 if (!f) { 806 f = kzalloc(sizeof(*f), GFP_ATOMIC); 807 if (!f) 808 return f; 809 810 ether_addr_copy(f->macaddr, macaddr); 811 812 list_add_tail(&f->list, &adapter->mac_filter_list); 813 f->add = true; 814 f->is_new_mac = true; 815 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 816 } else { 817 f->remove = false; 818 } 819 820 return f; 821 } 822 823 /** 824 * iavf_set_mac - NDO callback to set port mac address 825 * @netdev: network interface device structure 826 * @p: pointer to an address structure 827 * 828 * Returns 0 on success, negative on failure 829 **/ 830 static int iavf_set_mac(struct net_device *netdev, void *p) 831 { 832 struct iavf_adapter *adapter = netdev_priv(netdev); 833 struct iavf_hw *hw = &adapter->hw; 834 struct iavf_mac_filter *f; 835 struct sockaddr *addr = p; 836 837 if (!is_valid_ether_addr(addr->sa_data)) 838 return -EADDRNOTAVAIL; 839 840 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 841 return 0; 842 843 spin_lock_bh(&adapter->mac_vlan_list_lock); 844 845 f = iavf_find_filter(adapter, hw->mac.addr); 846 if (f) { 847 f->remove = true; 848 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 849 } 850 851 f = iavf_add_filter(adapter, addr->sa_data); 852 853 spin_unlock_bh(&adapter->mac_vlan_list_lock); 854 855 if (f) { 856 ether_addr_copy(hw->mac.addr, addr->sa_data); 857 } 858 859 return (f == NULL) ? -ENOMEM : 0; 860 } 861 862 /** 863 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 864 * @netdev: the netdevice 865 * @addr: address to add 866 * 867 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 868 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 869 */ 870 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 871 { 872 struct iavf_adapter *adapter = netdev_priv(netdev); 873 874 if (iavf_add_filter(adapter, addr)) 875 return 0; 876 else 877 return -ENOMEM; 878 } 879 880 /** 881 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 882 * @netdev: the netdevice 883 * @addr: address to add 884 * 885 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 886 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 887 */ 888 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 889 { 890 struct iavf_adapter *adapter = netdev_priv(netdev); 891 struct iavf_mac_filter *f; 892 893 /* Under some circumstances, we might receive a request to delete 894 * our own device address from our uc list. Because we store the 895 * device address in the VSI's MAC/VLAN filter list, we need to ignore 896 * such requests and not delete our device address from this list. 897 */ 898 if (ether_addr_equal(addr, netdev->dev_addr)) 899 return 0; 900 901 f = iavf_find_filter(adapter, addr); 902 if (f) { 903 f->remove = true; 904 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 905 } 906 return 0; 907 } 908 909 /** 910 * iavf_set_rx_mode - NDO callback to set the netdev filters 911 * @netdev: network interface device structure 912 **/ 913 static void iavf_set_rx_mode(struct net_device *netdev) 914 { 915 struct iavf_adapter *adapter = netdev_priv(netdev); 916 917 spin_lock_bh(&adapter->mac_vlan_list_lock); 918 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 919 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 920 spin_unlock_bh(&adapter->mac_vlan_list_lock); 921 922 if (netdev->flags & IFF_PROMISC && 923 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 924 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 925 else if (!(netdev->flags & IFF_PROMISC) && 926 adapter->flags & IAVF_FLAG_PROMISC_ON) 927 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 928 929 if (netdev->flags & IFF_ALLMULTI && 930 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 931 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 932 else if (!(netdev->flags & IFF_ALLMULTI) && 933 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 934 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 935 } 936 937 /** 938 * iavf_napi_enable_all - enable NAPI on all queue vectors 939 * @adapter: board private structure 940 **/ 941 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 942 { 943 int q_idx; 944 struct iavf_q_vector *q_vector; 945 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 946 947 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 948 struct napi_struct *napi; 949 950 q_vector = &adapter->q_vectors[q_idx]; 951 napi = &q_vector->napi; 952 napi_enable(napi); 953 } 954 } 955 956 /** 957 * iavf_napi_disable_all - disable NAPI on all queue vectors 958 * @adapter: board private structure 959 **/ 960 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 961 { 962 int q_idx; 963 struct iavf_q_vector *q_vector; 964 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 965 966 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 967 q_vector = &adapter->q_vectors[q_idx]; 968 napi_disable(&q_vector->napi); 969 } 970 } 971 972 /** 973 * iavf_configure - set up transmit and receive data structures 974 * @adapter: board private structure 975 **/ 976 static void iavf_configure(struct iavf_adapter *adapter) 977 { 978 struct net_device *netdev = adapter->netdev; 979 int i; 980 981 iavf_set_rx_mode(netdev); 982 983 iavf_configure_tx(adapter); 984 iavf_configure_rx(adapter); 985 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 986 987 for (i = 0; i < adapter->num_active_queues; i++) { 988 struct iavf_ring *ring = &adapter->rx_rings[i]; 989 990 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 991 } 992 } 993 994 /** 995 * iavf_up_complete - Finish the last steps of bringing up a connection 996 * @adapter: board private structure 997 * 998 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 999 **/ 1000 static void iavf_up_complete(struct iavf_adapter *adapter) 1001 { 1002 iavf_change_state(adapter, __IAVF_RUNNING); 1003 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1004 1005 iavf_napi_enable_all(adapter); 1006 1007 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 1008 if (CLIENT_ENABLED(adapter)) 1009 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 1010 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1011 } 1012 1013 /** 1014 * iavf_down - Shutdown the connection processing 1015 * @adapter: board private structure 1016 * 1017 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 1018 **/ 1019 void iavf_down(struct iavf_adapter *adapter) 1020 { 1021 struct net_device *netdev = adapter->netdev; 1022 struct iavf_vlan_filter *vlf; 1023 struct iavf_cloud_filter *cf; 1024 struct iavf_fdir_fltr *fdir; 1025 struct iavf_mac_filter *f; 1026 struct iavf_adv_rss *rss; 1027 1028 if (adapter->state <= __IAVF_DOWN_PENDING) 1029 return; 1030 1031 netif_carrier_off(netdev); 1032 netif_tx_disable(netdev); 1033 adapter->link_up = false; 1034 iavf_napi_disable_all(adapter); 1035 iavf_irq_disable(adapter); 1036 1037 spin_lock_bh(&adapter->mac_vlan_list_lock); 1038 1039 /* clear the sync flag on all filters */ 1040 __dev_uc_unsync(adapter->netdev, NULL); 1041 __dev_mc_unsync(adapter->netdev, NULL); 1042 1043 /* remove all MAC filters */ 1044 list_for_each_entry(f, &adapter->mac_filter_list, list) { 1045 f->remove = true; 1046 } 1047 1048 /* remove all VLAN filters */ 1049 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1050 vlf->remove = true; 1051 } 1052 1053 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1054 1055 /* remove all cloud filters */ 1056 spin_lock_bh(&adapter->cloud_filter_list_lock); 1057 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1058 cf->del = true; 1059 } 1060 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1061 1062 /* remove all Flow Director filters */ 1063 spin_lock_bh(&adapter->fdir_fltr_lock); 1064 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1065 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1066 } 1067 spin_unlock_bh(&adapter->fdir_fltr_lock); 1068 1069 /* remove all advance RSS configuration */ 1070 spin_lock_bh(&adapter->adv_rss_lock); 1071 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1072 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1073 spin_unlock_bh(&adapter->adv_rss_lock); 1074 1075 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1076 adapter->state != __IAVF_RESETTING) { 1077 /* cancel any current operation */ 1078 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1079 /* Schedule operations to close down the HW. Don't wait 1080 * here for this to complete. The watchdog is still running 1081 * and it will take care of this. 1082 */ 1083 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1084 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1085 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1086 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1087 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1088 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1089 } 1090 1091 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1092 } 1093 1094 /** 1095 * iavf_acquire_msix_vectors - Setup the MSIX capability 1096 * @adapter: board private structure 1097 * @vectors: number of vectors to request 1098 * 1099 * Work with the OS to set up the MSIX vectors needed. 1100 * 1101 * Returns 0 on success, negative on failure 1102 **/ 1103 static int 1104 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1105 { 1106 int err, vector_threshold; 1107 1108 /* We'll want at least 3 (vector_threshold): 1109 * 0) Other (Admin Queue and link, mostly) 1110 * 1) TxQ[0] Cleanup 1111 * 2) RxQ[0] Cleanup 1112 */ 1113 vector_threshold = MIN_MSIX_COUNT; 1114 1115 /* The more we get, the more we will assign to Tx/Rx Cleanup 1116 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1117 * Right now, we simply care about how many we'll get; we'll 1118 * set them up later while requesting irq's. 1119 */ 1120 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1121 vector_threshold, vectors); 1122 if (err < 0) { 1123 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1124 kfree(adapter->msix_entries); 1125 adapter->msix_entries = NULL; 1126 return err; 1127 } 1128 1129 /* Adjust for only the vectors we'll use, which is minimum 1130 * of max_msix_q_vectors + NONQ_VECS, or the number of 1131 * vectors we were allocated. 1132 */ 1133 adapter->num_msix_vectors = err; 1134 return 0; 1135 } 1136 1137 /** 1138 * iavf_free_queues - Free memory for all rings 1139 * @adapter: board private structure to initialize 1140 * 1141 * Free all of the memory associated with queue pairs. 1142 **/ 1143 static void iavf_free_queues(struct iavf_adapter *adapter) 1144 { 1145 if (!adapter->vsi_res) 1146 return; 1147 adapter->num_active_queues = 0; 1148 kfree(adapter->tx_rings); 1149 adapter->tx_rings = NULL; 1150 kfree(adapter->rx_rings); 1151 adapter->rx_rings = NULL; 1152 } 1153 1154 /** 1155 * iavf_alloc_queues - Allocate memory for all rings 1156 * @adapter: board private structure to initialize 1157 * 1158 * We allocate one ring per queue at run-time since we don't know the 1159 * number of queues at compile-time. The polling_netdev array is 1160 * intended for Multiqueue, but should work fine with a single queue. 1161 **/ 1162 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1163 { 1164 int i, num_active_queues; 1165 1166 /* If we're in reset reallocating queues we don't actually know yet for 1167 * certain the PF gave us the number of queues we asked for but we'll 1168 * assume it did. Once basic reset is finished we'll confirm once we 1169 * start negotiating config with PF. 1170 */ 1171 if (adapter->num_req_queues) 1172 num_active_queues = adapter->num_req_queues; 1173 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1174 adapter->num_tc) 1175 num_active_queues = adapter->ch_config.total_qps; 1176 else 1177 num_active_queues = min_t(int, 1178 adapter->vsi_res->num_queue_pairs, 1179 (int)(num_online_cpus())); 1180 1181 1182 adapter->tx_rings = kcalloc(num_active_queues, 1183 sizeof(struct iavf_ring), GFP_KERNEL); 1184 if (!adapter->tx_rings) 1185 goto err_out; 1186 adapter->rx_rings = kcalloc(num_active_queues, 1187 sizeof(struct iavf_ring), GFP_KERNEL); 1188 if (!adapter->rx_rings) 1189 goto err_out; 1190 1191 for (i = 0; i < num_active_queues; i++) { 1192 struct iavf_ring *tx_ring; 1193 struct iavf_ring *rx_ring; 1194 1195 tx_ring = &adapter->tx_rings[i]; 1196 1197 tx_ring->queue_index = i; 1198 tx_ring->netdev = adapter->netdev; 1199 tx_ring->dev = &adapter->pdev->dev; 1200 tx_ring->count = adapter->tx_desc_count; 1201 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1202 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1203 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1204 1205 rx_ring = &adapter->rx_rings[i]; 1206 rx_ring->queue_index = i; 1207 rx_ring->netdev = adapter->netdev; 1208 rx_ring->dev = &adapter->pdev->dev; 1209 rx_ring->count = adapter->rx_desc_count; 1210 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1211 } 1212 1213 adapter->num_active_queues = num_active_queues; 1214 1215 return 0; 1216 1217 err_out: 1218 iavf_free_queues(adapter); 1219 return -ENOMEM; 1220 } 1221 1222 /** 1223 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1224 * @adapter: board private structure to initialize 1225 * 1226 * Attempt to configure the interrupts using the best available 1227 * capabilities of the hardware and the kernel. 1228 **/ 1229 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1230 { 1231 int vector, v_budget; 1232 int pairs = 0; 1233 int err = 0; 1234 1235 if (!adapter->vsi_res) { 1236 err = -EIO; 1237 goto out; 1238 } 1239 pairs = adapter->num_active_queues; 1240 1241 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1242 * us much good if we have more vectors than CPUs. However, we already 1243 * limit the total number of queues by the number of CPUs so we do not 1244 * need any further limiting here. 1245 */ 1246 v_budget = min_t(int, pairs + NONQ_VECS, 1247 (int)adapter->vf_res->max_vectors); 1248 1249 adapter->msix_entries = kcalloc(v_budget, 1250 sizeof(struct msix_entry), GFP_KERNEL); 1251 if (!adapter->msix_entries) { 1252 err = -ENOMEM; 1253 goto out; 1254 } 1255 1256 for (vector = 0; vector < v_budget; vector++) 1257 adapter->msix_entries[vector].entry = vector; 1258 1259 err = iavf_acquire_msix_vectors(adapter, v_budget); 1260 1261 out: 1262 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1263 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1264 return err; 1265 } 1266 1267 /** 1268 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1269 * @adapter: board private structure 1270 * 1271 * Return 0 on success, negative on failure 1272 **/ 1273 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1274 { 1275 struct iavf_aqc_get_set_rss_key_data *rss_key = 1276 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1277 struct iavf_hw *hw = &adapter->hw; 1278 int ret = 0; 1279 1280 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1281 /* bail because we already have a command pending */ 1282 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1283 adapter->current_op); 1284 return -EBUSY; 1285 } 1286 1287 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1288 if (ret) { 1289 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1290 iavf_stat_str(hw, ret), 1291 iavf_aq_str(hw, hw->aq.asq_last_status)); 1292 return ret; 1293 1294 } 1295 1296 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1297 adapter->rss_lut, adapter->rss_lut_size); 1298 if (ret) { 1299 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1300 iavf_stat_str(hw, ret), 1301 iavf_aq_str(hw, hw->aq.asq_last_status)); 1302 } 1303 1304 return ret; 1305 1306 } 1307 1308 /** 1309 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1310 * @adapter: board private structure 1311 * 1312 * Returns 0 on success, negative on failure 1313 **/ 1314 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1315 { 1316 struct iavf_hw *hw = &adapter->hw; 1317 u32 *dw; 1318 u16 i; 1319 1320 dw = (u32 *)adapter->rss_key; 1321 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1322 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1323 1324 dw = (u32 *)adapter->rss_lut; 1325 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1326 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1327 1328 iavf_flush(hw); 1329 1330 return 0; 1331 } 1332 1333 /** 1334 * iavf_config_rss - Configure RSS keys and lut 1335 * @adapter: board private structure 1336 * 1337 * Returns 0 on success, negative on failure 1338 **/ 1339 int iavf_config_rss(struct iavf_adapter *adapter) 1340 { 1341 1342 if (RSS_PF(adapter)) { 1343 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1344 IAVF_FLAG_AQ_SET_RSS_KEY; 1345 return 0; 1346 } else if (RSS_AQ(adapter)) { 1347 return iavf_config_rss_aq(adapter); 1348 } else { 1349 return iavf_config_rss_reg(adapter); 1350 } 1351 } 1352 1353 /** 1354 * iavf_fill_rss_lut - Fill the lut with default values 1355 * @adapter: board private structure 1356 **/ 1357 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1358 { 1359 u16 i; 1360 1361 for (i = 0; i < adapter->rss_lut_size; i++) 1362 adapter->rss_lut[i] = i % adapter->num_active_queues; 1363 } 1364 1365 /** 1366 * iavf_init_rss - Prepare for RSS 1367 * @adapter: board private structure 1368 * 1369 * Return 0 on success, negative on failure 1370 **/ 1371 static int iavf_init_rss(struct iavf_adapter *adapter) 1372 { 1373 struct iavf_hw *hw = &adapter->hw; 1374 int ret; 1375 1376 if (!RSS_PF(adapter)) { 1377 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1378 if (adapter->vf_res->vf_cap_flags & 1379 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1380 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1381 else 1382 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1383 1384 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1385 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1386 } 1387 1388 iavf_fill_rss_lut(adapter); 1389 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1390 ret = iavf_config_rss(adapter); 1391 1392 return ret; 1393 } 1394 1395 /** 1396 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1397 * @adapter: board private structure to initialize 1398 * 1399 * We allocate one q_vector per queue interrupt. If allocation fails we 1400 * return -ENOMEM. 1401 **/ 1402 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1403 { 1404 int q_idx = 0, num_q_vectors; 1405 struct iavf_q_vector *q_vector; 1406 1407 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1408 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1409 GFP_KERNEL); 1410 if (!adapter->q_vectors) 1411 return -ENOMEM; 1412 1413 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1414 q_vector = &adapter->q_vectors[q_idx]; 1415 q_vector->adapter = adapter; 1416 q_vector->vsi = &adapter->vsi; 1417 q_vector->v_idx = q_idx; 1418 q_vector->reg_idx = q_idx; 1419 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1420 netif_napi_add(adapter->netdev, &q_vector->napi, 1421 iavf_napi_poll, NAPI_POLL_WEIGHT); 1422 } 1423 1424 return 0; 1425 } 1426 1427 /** 1428 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1429 * @adapter: board private structure to initialize 1430 * 1431 * This function frees the memory allocated to the q_vectors. In addition if 1432 * NAPI is enabled it will delete any references to the NAPI struct prior 1433 * to freeing the q_vector. 1434 **/ 1435 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1436 { 1437 int q_idx, num_q_vectors; 1438 int napi_vectors; 1439 1440 if (!adapter->q_vectors) 1441 return; 1442 1443 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1444 napi_vectors = adapter->num_active_queues; 1445 1446 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1447 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1448 1449 if (q_idx < napi_vectors) 1450 netif_napi_del(&q_vector->napi); 1451 } 1452 kfree(adapter->q_vectors); 1453 adapter->q_vectors = NULL; 1454 } 1455 1456 /** 1457 * iavf_reset_interrupt_capability - Reset MSIX setup 1458 * @adapter: board private structure 1459 * 1460 **/ 1461 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1462 { 1463 if (!adapter->msix_entries) 1464 return; 1465 1466 pci_disable_msix(adapter->pdev); 1467 kfree(adapter->msix_entries); 1468 adapter->msix_entries = NULL; 1469 } 1470 1471 /** 1472 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1473 * @adapter: board private structure to initialize 1474 * 1475 **/ 1476 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1477 { 1478 int err; 1479 1480 err = iavf_alloc_queues(adapter); 1481 if (err) { 1482 dev_err(&adapter->pdev->dev, 1483 "Unable to allocate memory for queues\n"); 1484 goto err_alloc_queues; 1485 } 1486 1487 rtnl_lock(); 1488 err = iavf_set_interrupt_capability(adapter); 1489 rtnl_unlock(); 1490 if (err) { 1491 dev_err(&adapter->pdev->dev, 1492 "Unable to setup interrupt capabilities\n"); 1493 goto err_set_interrupt; 1494 } 1495 1496 err = iavf_alloc_q_vectors(adapter); 1497 if (err) { 1498 dev_err(&adapter->pdev->dev, 1499 "Unable to allocate memory for queue vectors\n"); 1500 goto err_alloc_q_vectors; 1501 } 1502 1503 /* If we've made it so far while ADq flag being ON, then we haven't 1504 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1505 * resources have been allocated in the reset path. 1506 * Now we can truly claim that ADq is enabled. 1507 */ 1508 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1509 adapter->num_tc) 1510 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1511 adapter->num_tc); 1512 1513 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1514 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1515 adapter->num_active_queues); 1516 1517 return 0; 1518 err_alloc_q_vectors: 1519 iavf_reset_interrupt_capability(adapter); 1520 err_set_interrupt: 1521 iavf_free_queues(adapter); 1522 err_alloc_queues: 1523 return err; 1524 } 1525 1526 /** 1527 * iavf_free_rss - Free memory used by RSS structs 1528 * @adapter: board private structure 1529 **/ 1530 static void iavf_free_rss(struct iavf_adapter *adapter) 1531 { 1532 kfree(adapter->rss_key); 1533 adapter->rss_key = NULL; 1534 1535 kfree(adapter->rss_lut); 1536 adapter->rss_lut = NULL; 1537 } 1538 1539 /** 1540 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1541 * @adapter: board private structure 1542 * 1543 * Returns 0 on success, negative on failure 1544 **/ 1545 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1546 { 1547 struct net_device *netdev = adapter->netdev; 1548 int err; 1549 1550 if (netif_running(netdev)) 1551 iavf_free_traffic_irqs(adapter); 1552 iavf_free_misc_irq(adapter); 1553 iavf_reset_interrupt_capability(adapter); 1554 iavf_free_q_vectors(adapter); 1555 iavf_free_queues(adapter); 1556 1557 err = iavf_init_interrupt_scheme(adapter); 1558 if (err) 1559 goto err; 1560 1561 netif_tx_stop_all_queues(netdev); 1562 1563 err = iavf_request_misc_irq(adapter); 1564 if (err) 1565 goto err; 1566 1567 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1568 1569 iavf_map_rings_to_vectors(adapter); 1570 err: 1571 return err; 1572 } 1573 1574 /** 1575 * iavf_process_aq_command - process aq_required flags 1576 * and sends aq command 1577 * @adapter: pointer to iavf adapter structure 1578 * 1579 * Returns 0 on success 1580 * Returns error code if no command was sent 1581 * or error code if the command failed. 1582 **/ 1583 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1584 { 1585 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1586 return iavf_send_vf_config_msg(adapter); 1587 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1588 iavf_disable_queues(adapter); 1589 return 0; 1590 } 1591 1592 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1593 iavf_map_queues(adapter); 1594 return 0; 1595 } 1596 1597 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1598 iavf_add_ether_addrs(adapter); 1599 return 0; 1600 } 1601 1602 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1603 iavf_add_vlans(adapter); 1604 return 0; 1605 } 1606 1607 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1608 iavf_del_ether_addrs(adapter); 1609 return 0; 1610 } 1611 1612 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1613 iavf_del_vlans(adapter); 1614 return 0; 1615 } 1616 1617 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1618 iavf_enable_vlan_stripping(adapter); 1619 return 0; 1620 } 1621 1622 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1623 iavf_disable_vlan_stripping(adapter); 1624 return 0; 1625 } 1626 1627 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1628 iavf_configure_queues(adapter); 1629 return 0; 1630 } 1631 1632 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1633 iavf_enable_queues(adapter); 1634 return 0; 1635 } 1636 1637 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1638 /* This message goes straight to the firmware, not the 1639 * PF, so we don't have to set current_op as we will 1640 * not get a response through the ARQ. 1641 */ 1642 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1643 return 0; 1644 } 1645 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1646 iavf_get_hena(adapter); 1647 return 0; 1648 } 1649 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1650 iavf_set_hena(adapter); 1651 return 0; 1652 } 1653 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1654 iavf_set_rss_key(adapter); 1655 return 0; 1656 } 1657 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1658 iavf_set_rss_lut(adapter); 1659 return 0; 1660 } 1661 1662 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1663 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1664 FLAG_VF_MULTICAST_PROMISC); 1665 return 0; 1666 } 1667 1668 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1669 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1670 return 0; 1671 } 1672 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || 1673 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1674 iavf_set_promiscuous(adapter, 0); 1675 return 0; 1676 } 1677 1678 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1679 iavf_enable_channels(adapter); 1680 return 0; 1681 } 1682 1683 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1684 iavf_disable_channels(adapter); 1685 return 0; 1686 } 1687 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1688 iavf_add_cloud_filter(adapter); 1689 return 0; 1690 } 1691 1692 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1693 iavf_del_cloud_filter(adapter); 1694 return 0; 1695 } 1696 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1697 iavf_del_cloud_filter(adapter); 1698 return 0; 1699 } 1700 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1701 iavf_add_cloud_filter(adapter); 1702 return 0; 1703 } 1704 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1705 iavf_add_fdir_filter(adapter); 1706 return IAVF_SUCCESS; 1707 } 1708 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1709 iavf_del_fdir_filter(adapter); 1710 return IAVF_SUCCESS; 1711 } 1712 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1713 iavf_add_adv_rss_cfg(adapter); 1714 return 0; 1715 } 1716 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1717 iavf_del_adv_rss_cfg(adapter); 1718 return 0; 1719 } 1720 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { 1721 iavf_request_stats(adapter); 1722 return 0; 1723 } 1724 1725 return -EAGAIN; 1726 } 1727 1728 /** 1729 * iavf_startup - first step of driver startup 1730 * @adapter: board private structure 1731 * 1732 * Function process __IAVF_STARTUP driver state. 1733 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1734 * when fails the state is changed to __IAVF_INIT_FAILED 1735 **/ 1736 static void iavf_startup(struct iavf_adapter *adapter) 1737 { 1738 struct pci_dev *pdev = adapter->pdev; 1739 struct iavf_hw *hw = &adapter->hw; 1740 int err; 1741 1742 WARN_ON(adapter->state != __IAVF_STARTUP); 1743 1744 /* driver loaded, probe complete */ 1745 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1746 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1747 err = iavf_set_mac_type(hw); 1748 if (err) { 1749 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1750 goto err; 1751 } 1752 1753 err = iavf_check_reset_complete(hw); 1754 if (err) { 1755 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1756 err); 1757 goto err; 1758 } 1759 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1760 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1761 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1762 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1763 1764 err = iavf_init_adminq(hw); 1765 if (err) { 1766 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1767 goto err; 1768 } 1769 err = iavf_send_api_ver(adapter); 1770 if (err) { 1771 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1772 iavf_shutdown_adminq(hw); 1773 goto err; 1774 } 1775 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); 1776 return; 1777 err: 1778 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1779 } 1780 1781 /** 1782 * iavf_init_version_check - second step of driver startup 1783 * @adapter: board private structure 1784 * 1785 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1786 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1787 * when fails the state is changed to __IAVF_INIT_FAILED 1788 **/ 1789 static void iavf_init_version_check(struct iavf_adapter *adapter) 1790 { 1791 struct pci_dev *pdev = adapter->pdev; 1792 struct iavf_hw *hw = &adapter->hw; 1793 int err = -EAGAIN; 1794 1795 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1796 1797 if (!iavf_asq_done(hw)) { 1798 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1799 iavf_shutdown_adminq(hw); 1800 iavf_change_state(adapter, __IAVF_STARTUP); 1801 goto err; 1802 } 1803 1804 /* aq msg sent, awaiting reply */ 1805 err = iavf_verify_api_ver(adapter); 1806 if (err) { 1807 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1808 err = iavf_send_api_ver(adapter); 1809 else 1810 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1811 adapter->pf_version.major, 1812 adapter->pf_version.minor, 1813 VIRTCHNL_VERSION_MAJOR, 1814 VIRTCHNL_VERSION_MINOR); 1815 goto err; 1816 } 1817 err = iavf_send_vf_config_msg(adapter); 1818 if (err) { 1819 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1820 err); 1821 goto err; 1822 } 1823 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); 1824 return; 1825 err: 1826 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1827 } 1828 1829 /** 1830 * iavf_init_get_resources - third step of driver startup 1831 * @adapter: board private structure 1832 * 1833 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1834 * finishes driver initialization procedure. 1835 * When success the state is changed to __IAVF_DOWN 1836 * when fails the state is changed to __IAVF_INIT_FAILED 1837 **/ 1838 static void iavf_init_get_resources(struct iavf_adapter *adapter) 1839 { 1840 struct net_device *netdev = adapter->netdev; 1841 struct pci_dev *pdev = adapter->pdev; 1842 struct iavf_hw *hw = &adapter->hw; 1843 int err; 1844 1845 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1846 /* aq msg sent, awaiting reply */ 1847 if (!adapter->vf_res) { 1848 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1849 GFP_KERNEL); 1850 if (!adapter->vf_res) { 1851 err = -ENOMEM; 1852 goto err; 1853 } 1854 } 1855 err = iavf_get_vf_config(adapter); 1856 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1857 err = iavf_send_vf_config_msg(adapter); 1858 goto err; 1859 } else if (err == IAVF_ERR_PARAM) { 1860 /* We only get ERR_PARAM if the device is in a very bad 1861 * state or if we've been disabled for previous bad 1862 * behavior. Either way, we're done now. 1863 */ 1864 iavf_shutdown_adminq(hw); 1865 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1866 return; 1867 } 1868 if (err) { 1869 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1870 goto err_alloc; 1871 } 1872 1873 err = iavf_process_config(adapter); 1874 if (err) 1875 goto err_alloc; 1876 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1877 1878 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1879 1880 netdev->netdev_ops = &iavf_netdev_ops; 1881 iavf_set_ethtool_ops(netdev); 1882 netdev->watchdog_timeo = 5 * HZ; 1883 1884 /* MTU range: 68 - 9710 */ 1885 netdev->min_mtu = ETH_MIN_MTU; 1886 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1887 1888 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1889 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1890 adapter->hw.mac.addr); 1891 eth_hw_addr_random(netdev); 1892 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1893 } else { 1894 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 1895 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1896 } 1897 1898 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1899 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1900 err = iavf_init_interrupt_scheme(adapter); 1901 if (err) 1902 goto err_sw_init; 1903 iavf_map_rings_to_vectors(adapter); 1904 if (adapter->vf_res->vf_cap_flags & 1905 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1906 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1907 1908 err = iavf_request_misc_irq(adapter); 1909 if (err) 1910 goto err_sw_init; 1911 1912 netif_carrier_off(netdev); 1913 adapter->link_up = false; 1914 1915 /* set the semaphore to prevent any callbacks after device registration 1916 * up to time when state of driver will be set to __IAVF_DOWN 1917 */ 1918 rtnl_lock(); 1919 if (!adapter->netdev_registered) { 1920 err = register_netdevice(netdev); 1921 if (err) { 1922 rtnl_unlock(); 1923 goto err_register; 1924 } 1925 } 1926 1927 adapter->netdev_registered = true; 1928 1929 netif_tx_stop_all_queues(netdev); 1930 if (CLIENT_ALLOWED(adapter)) { 1931 err = iavf_lan_add_device(adapter); 1932 if (err) 1933 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1934 err); 1935 } 1936 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1937 if (netdev->features & NETIF_F_GRO) 1938 dev_info(&pdev->dev, "GRO is enabled\n"); 1939 1940 iavf_change_state(adapter, __IAVF_DOWN); 1941 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1942 rtnl_unlock(); 1943 1944 iavf_misc_irq_enable(adapter); 1945 wake_up(&adapter->down_waitqueue); 1946 1947 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1948 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1949 if (!adapter->rss_key || !adapter->rss_lut) { 1950 err = -ENOMEM; 1951 goto err_mem; 1952 } 1953 if (RSS_AQ(adapter)) 1954 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1955 else 1956 iavf_init_rss(adapter); 1957 1958 return; 1959 err_mem: 1960 iavf_free_rss(adapter); 1961 err_register: 1962 iavf_free_misc_irq(adapter); 1963 err_sw_init: 1964 iavf_reset_interrupt_capability(adapter); 1965 err_alloc: 1966 kfree(adapter->vf_res); 1967 adapter->vf_res = NULL; 1968 err: 1969 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1970 } 1971 1972 /** 1973 * iavf_watchdog_task - Periodic call-back task 1974 * @work: pointer to work_struct 1975 **/ 1976 static void iavf_watchdog_task(struct work_struct *work) 1977 { 1978 struct iavf_adapter *adapter = container_of(work, 1979 struct iavf_adapter, 1980 watchdog_task.work); 1981 struct iavf_hw *hw = &adapter->hw; 1982 u32 reg_val; 1983 1984 if (!mutex_trylock(&adapter->crit_lock)) 1985 goto restart_watchdog; 1986 1987 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1988 iavf_change_state(adapter, __IAVF_COMM_FAILED); 1989 1990 if (adapter->flags & IAVF_FLAG_RESET_NEEDED && 1991 adapter->state != __IAVF_RESETTING) { 1992 iavf_change_state(adapter, __IAVF_RESETTING); 1993 adapter->aq_required = 0; 1994 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1995 } 1996 1997 switch (adapter->state) { 1998 case __IAVF_STARTUP: 1999 iavf_startup(adapter); 2000 mutex_unlock(&adapter->crit_lock); 2001 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2002 msecs_to_jiffies(30)); 2003 return; 2004 case __IAVF_INIT_VERSION_CHECK: 2005 iavf_init_version_check(adapter); 2006 mutex_unlock(&adapter->crit_lock); 2007 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2008 msecs_to_jiffies(30)); 2009 return; 2010 case __IAVF_INIT_GET_RESOURCES: 2011 iavf_init_get_resources(adapter); 2012 mutex_unlock(&adapter->crit_lock); 2013 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2014 msecs_to_jiffies(1)); 2015 return; 2016 case __IAVF_INIT_FAILED: 2017 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 2018 dev_err(&adapter->pdev->dev, 2019 "Failed to communicate with PF; waiting before retry\n"); 2020 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2021 iavf_shutdown_adminq(hw); 2022 mutex_unlock(&adapter->crit_lock); 2023 queue_delayed_work(iavf_wq, 2024 &adapter->watchdog_task, (5 * HZ)); 2025 return; 2026 } 2027 /* Try again from failed step*/ 2028 iavf_change_state(adapter, adapter->last_state); 2029 mutex_unlock(&adapter->crit_lock); 2030 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); 2031 return; 2032 case __IAVF_COMM_FAILED: 2033 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2034 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2035 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 2036 reg_val == VIRTCHNL_VFR_COMPLETED) { 2037 /* A chance for redemption! */ 2038 dev_err(&adapter->pdev->dev, 2039 "Hardware came out of reset. Attempting reinit.\n"); 2040 /* When init task contacts the PF and 2041 * gets everything set up again, it'll restart the 2042 * watchdog for us. Down, boy. Sit. Stay. Woof. 2043 */ 2044 iavf_change_state(adapter, __IAVF_STARTUP); 2045 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2046 } 2047 adapter->aq_required = 0; 2048 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2049 queue_delayed_work(iavf_wq, 2050 &adapter->watchdog_task, 2051 msecs_to_jiffies(10)); 2052 return; 2053 case __IAVF_RESETTING: 2054 mutex_unlock(&adapter->crit_lock); 2055 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2056 return; 2057 case __IAVF_DOWN: 2058 case __IAVF_DOWN_PENDING: 2059 case __IAVF_TESTING: 2060 case __IAVF_RUNNING: 2061 if (adapter->current_op) { 2062 if (!iavf_asq_done(hw)) { 2063 dev_dbg(&adapter->pdev->dev, 2064 "Admin queue timeout\n"); 2065 iavf_send_api_ver(adapter); 2066 } 2067 } else { 2068 /* An error will be returned if no commands were 2069 * processed; use this opportunity to update stats 2070 */ 2071 if (iavf_process_aq_command(adapter) && 2072 adapter->state == __IAVF_RUNNING) 2073 iavf_request_stats(adapter); 2074 } 2075 if (adapter->state == __IAVF_RUNNING) 2076 iavf_detect_recover_hung(&adapter->vsi); 2077 break; 2078 case __IAVF_REMOVE: 2079 mutex_unlock(&adapter->crit_lock); 2080 return; 2081 default: 2082 return; 2083 } 2084 2085 /* check for hw reset */ 2086 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2087 if (!reg_val) { 2088 iavf_change_state(adapter, __IAVF_RESETTING); 2089 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2090 adapter->aq_required = 0; 2091 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2092 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2093 queue_work(iavf_wq, &adapter->reset_task); 2094 mutex_unlock(&adapter->crit_lock); 2095 queue_delayed_work(iavf_wq, 2096 &adapter->watchdog_task, HZ * 2); 2097 return; 2098 } 2099 2100 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2101 mutex_unlock(&adapter->crit_lock); 2102 restart_watchdog: 2103 queue_work(iavf_wq, &adapter->adminq_task); 2104 if (adapter->aq_required) 2105 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2106 msecs_to_jiffies(20)); 2107 else 2108 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2109 } 2110 2111 static void iavf_disable_vf(struct iavf_adapter *adapter) 2112 { 2113 struct iavf_mac_filter *f, *ftmp; 2114 struct iavf_vlan_filter *fv, *fvtmp; 2115 struct iavf_cloud_filter *cf, *cftmp; 2116 2117 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2118 2119 /* We don't use netif_running() because it may be true prior to 2120 * ndo_open() returning, so we can't assume it means all our open 2121 * tasks have finished, since we're not holding the rtnl_lock here. 2122 */ 2123 if (adapter->state == __IAVF_RUNNING) { 2124 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2125 netif_carrier_off(adapter->netdev); 2126 netif_tx_disable(adapter->netdev); 2127 adapter->link_up = false; 2128 iavf_napi_disable_all(adapter); 2129 iavf_irq_disable(adapter); 2130 iavf_free_traffic_irqs(adapter); 2131 iavf_free_all_tx_resources(adapter); 2132 iavf_free_all_rx_resources(adapter); 2133 } 2134 2135 spin_lock_bh(&adapter->mac_vlan_list_lock); 2136 2137 /* Delete all of the filters */ 2138 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2139 list_del(&f->list); 2140 kfree(f); 2141 } 2142 2143 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2144 list_del(&fv->list); 2145 kfree(fv); 2146 } 2147 2148 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2149 2150 spin_lock_bh(&adapter->cloud_filter_list_lock); 2151 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2152 list_del(&cf->list); 2153 kfree(cf); 2154 adapter->num_cloud_filters--; 2155 } 2156 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2157 2158 iavf_free_misc_irq(adapter); 2159 iavf_reset_interrupt_capability(adapter); 2160 iavf_free_q_vectors(adapter); 2161 iavf_free_queues(adapter); 2162 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2163 iavf_shutdown_adminq(&adapter->hw); 2164 adapter->netdev->flags &= ~IFF_UP; 2165 mutex_unlock(&adapter->crit_lock); 2166 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2167 iavf_change_state(adapter, __IAVF_DOWN); 2168 wake_up(&adapter->down_waitqueue); 2169 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2170 } 2171 2172 /** 2173 * iavf_reset_task - Call-back task to handle hardware reset 2174 * @work: pointer to work_struct 2175 * 2176 * During reset we need to shut down and reinitialize the admin queue 2177 * before we can use it to communicate with the PF again. We also clear 2178 * and reinit the rings because that context is lost as well. 2179 **/ 2180 static void iavf_reset_task(struct work_struct *work) 2181 { 2182 struct iavf_adapter *adapter = container_of(work, 2183 struct iavf_adapter, 2184 reset_task); 2185 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2186 struct net_device *netdev = adapter->netdev; 2187 struct iavf_hw *hw = &adapter->hw; 2188 struct iavf_mac_filter *f, *ftmp; 2189 struct iavf_cloud_filter *cf; 2190 u32 reg_val; 2191 int i = 0, err; 2192 bool running; 2193 2194 /* When device is being removed it doesn't make sense to run the reset 2195 * task, just return in such a case. 2196 */ 2197 if (mutex_is_locked(&adapter->remove_lock)) 2198 return; 2199 2200 if (iavf_lock_timeout(&adapter->crit_lock, 200)) { 2201 schedule_work(&adapter->reset_task); 2202 return; 2203 } 2204 while (!mutex_trylock(&adapter->client_lock)) 2205 usleep_range(500, 1000); 2206 if (CLIENT_ENABLED(adapter)) { 2207 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2208 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2209 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2210 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2211 cancel_delayed_work_sync(&adapter->client_task); 2212 iavf_notify_client_close(&adapter->vsi, true); 2213 } 2214 iavf_misc_irq_disable(adapter); 2215 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2216 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2217 /* Restart the AQ here. If we have been reset but didn't 2218 * detect it, or if the PF had to reinit, our AQ will be hosed. 2219 */ 2220 iavf_shutdown_adminq(hw); 2221 iavf_init_adminq(hw); 2222 iavf_request_reset(adapter); 2223 } 2224 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2225 2226 /* poll until we see the reset actually happen */ 2227 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2228 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2229 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2230 if (!reg_val) 2231 break; 2232 usleep_range(5000, 10000); 2233 } 2234 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2235 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2236 goto continue_reset; /* act like the reset happened */ 2237 } 2238 2239 /* wait until the reset is complete and the PF is responding to us */ 2240 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2241 /* sleep first to make sure a minimum wait time is met */ 2242 msleep(IAVF_RESET_WAIT_MS); 2243 2244 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2245 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2246 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2247 break; 2248 } 2249 2250 pci_set_master(adapter->pdev); 2251 2252 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2253 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2254 reg_val); 2255 iavf_disable_vf(adapter); 2256 mutex_unlock(&adapter->client_lock); 2257 return; /* Do not attempt to reinit. It's dead, Jim. */ 2258 } 2259 2260 continue_reset: 2261 /* We don't use netif_running() because it may be true prior to 2262 * ndo_open() returning, so we can't assume it means all our open 2263 * tasks have finished, since we're not holding the rtnl_lock here. 2264 */ 2265 running = ((adapter->state == __IAVF_RUNNING) || 2266 (adapter->state == __IAVF_RESETTING)); 2267 2268 if (running) { 2269 netdev->flags &= ~IFF_UP; 2270 netif_carrier_off(netdev); 2271 netif_tx_stop_all_queues(netdev); 2272 adapter->link_up = false; 2273 iavf_napi_disable_all(adapter); 2274 } 2275 iavf_irq_disable(adapter); 2276 2277 iavf_change_state(adapter, __IAVF_RESETTING); 2278 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2279 2280 /* free the Tx/Rx rings and descriptors, might be better to just 2281 * re-use them sometime in the future 2282 */ 2283 iavf_free_all_rx_resources(adapter); 2284 iavf_free_all_tx_resources(adapter); 2285 2286 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2287 /* kill and reinit the admin queue */ 2288 iavf_shutdown_adminq(hw); 2289 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2290 err = iavf_init_adminq(hw); 2291 if (err) 2292 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2293 err); 2294 adapter->aq_required = 0; 2295 2296 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2297 err = iavf_reinit_interrupt_scheme(adapter); 2298 if (err) 2299 goto reset_err; 2300 } 2301 2302 if (RSS_AQ(adapter)) { 2303 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2304 } else { 2305 err = iavf_init_rss(adapter); 2306 if (err) 2307 goto reset_err; 2308 } 2309 2310 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2311 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2312 2313 spin_lock_bh(&adapter->mac_vlan_list_lock); 2314 2315 /* Delete filter for the current MAC address, it could have 2316 * been changed by the PF via administratively set MAC. 2317 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2318 */ 2319 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2320 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2321 list_del(&f->list); 2322 kfree(f); 2323 } 2324 } 2325 /* re-add all MAC filters */ 2326 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2327 f->add = true; 2328 } 2329 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2330 2331 /* check if TCs are running and re-add all cloud filters */ 2332 spin_lock_bh(&adapter->cloud_filter_list_lock); 2333 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2334 adapter->num_tc) { 2335 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2336 cf->add = true; 2337 } 2338 } 2339 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2340 2341 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2342 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2343 iavf_misc_irq_enable(adapter); 2344 2345 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2346 2347 /* We were running when the reset started, so we need to restore some 2348 * state here. 2349 */ 2350 if (running) { 2351 /* allocate transmit descriptors */ 2352 err = iavf_setup_all_tx_resources(adapter); 2353 if (err) 2354 goto reset_err; 2355 2356 /* allocate receive descriptors */ 2357 err = iavf_setup_all_rx_resources(adapter); 2358 if (err) 2359 goto reset_err; 2360 2361 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2362 err = iavf_request_traffic_irqs(adapter, netdev->name); 2363 if (err) 2364 goto reset_err; 2365 2366 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2367 } 2368 2369 iavf_configure(adapter); 2370 2371 /* iavf_up_complete() will switch device back 2372 * to __IAVF_RUNNING 2373 */ 2374 iavf_up_complete(adapter); 2375 netdev->flags |= IFF_UP; 2376 iavf_irq_enable(adapter, true); 2377 } else { 2378 iavf_change_state(adapter, __IAVF_DOWN); 2379 wake_up(&adapter->down_waitqueue); 2380 } 2381 mutex_unlock(&adapter->client_lock); 2382 mutex_unlock(&adapter->crit_lock); 2383 2384 return; 2385 reset_err: 2386 mutex_unlock(&adapter->client_lock); 2387 mutex_unlock(&adapter->crit_lock); 2388 if (running) { 2389 iavf_change_state(adapter, __IAVF_RUNNING); 2390 netdev->flags |= IFF_UP; 2391 } 2392 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2393 iavf_close(netdev); 2394 } 2395 2396 /** 2397 * iavf_adminq_task - worker thread to clean the admin queue 2398 * @work: pointer to work_struct containing our data 2399 **/ 2400 static void iavf_adminq_task(struct work_struct *work) 2401 { 2402 struct iavf_adapter *adapter = 2403 container_of(work, struct iavf_adapter, adminq_task); 2404 struct iavf_hw *hw = &adapter->hw; 2405 struct iavf_arq_event_info event; 2406 enum virtchnl_ops v_op; 2407 enum iavf_status ret, v_ret; 2408 u32 val, oldval; 2409 u16 pending; 2410 2411 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2412 goto out; 2413 2414 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2415 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2416 if (!event.msg_buf) 2417 goto out; 2418 2419 if (iavf_lock_timeout(&adapter->crit_lock, 200)) 2420 goto freedom; 2421 do { 2422 ret = iavf_clean_arq_element(hw, &event, &pending); 2423 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2424 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2425 2426 if (ret || !v_op) 2427 break; /* No event to process or error cleaning ARQ */ 2428 2429 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2430 event.msg_len); 2431 if (pending != 0) 2432 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2433 } while (pending); 2434 mutex_unlock(&adapter->crit_lock); 2435 2436 if ((adapter->flags & 2437 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2438 adapter->state == __IAVF_RESETTING) 2439 goto freedom; 2440 2441 /* check for error indications */ 2442 val = rd32(hw, hw->aq.arq.len); 2443 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ 2444 goto freedom; 2445 oldval = val; 2446 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2447 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2448 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2449 } 2450 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2451 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2452 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2453 } 2454 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2455 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2456 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2457 } 2458 if (oldval != val) 2459 wr32(hw, hw->aq.arq.len, val); 2460 2461 val = rd32(hw, hw->aq.asq.len); 2462 oldval = val; 2463 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2464 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2465 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2466 } 2467 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2468 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2469 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2470 } 2471 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2472 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2473 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2474 } 2475 if (oldval != val) 2476 wr32(hw, hw->aq.asq.len, val); 2477 2478 freedom: 2479 kfree(event.msg_buf); 2480 out: 2481 /* re-enable Admin queue interrupt cause */ 2482 iavf_misc_irq_enable(adapter); 2483 } 2484 2485 /** 2486 * iavf_client_task - worker thread to perform client work 2487 * @work: pointer to work_struct containing our data 2488 * 2489 * This task handles client interactions. Because client calls can be 2490 * reentrant, we can't handle them in the watchdog. 2491 **/ 2492 static void iavf_client_task(struct work_struct *work) 2493 { 2494 struct iavf_adapter *adapter = 2495 container_of(work, struct iavf_adapter, client_task.work); 2496 2497 /* If we can't get the client bit, just give up. We'll be rescheduled 2498 * later. 2499 */ 2500 2501 if (!mutex_trylock(&adapter->client_lock)) 2502 return; 2503 2504 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2505 iavf_client_subtask(adapter); 2506 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2507 goto out; 2508 } 2509 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2510 iavf_notify_client_l2_params(&adapter->vsi); 2511 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2512 goto out; 2513 } 2514 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2515 iavf_notify_client_close(&adapter->vsi, false); 2516 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2517 goto out; 2518 } 2519 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2520 iavf_notify_client_open(&adapter->vsi); 2521 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2522 } 2523 out: 2524 mutex_unlock(&adapter->client_lock); 2525 } 2526 2527 /** 2528 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2529 * @adapter: board private structure 2530 * 2531 * Free all transmit software resources 2532 **/ 2533 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2534 { 2535 int i; 2536 2537 if (!adapter->tx_rings) 2538 return; 2539 2540 for (i = 0; i < adapter->num_active_queues; i++) 2541 if (adapter->tx_rings[i].desc) 2542 iavf_free_tx_resources(&adapter->tx_rings[i]); 2543 } 2544 2545 /** 2546 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2547 * @adapter: board private structure 2548 * 2549 * If this function returns with an error, then it's possible one or 2550 * more of the rings is populated (while the rest are not). It is the 2551 * callers duty to clean those orphaned rings. 2552 * 2553 * Return 0 on success, negative on failure 2554 **/ 2555 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2556 { 2557 int i, err = 0; 2558 2559 for (i = 0; i < adapter->num_active_queues; i++) { 2560 adapter->tx_rings[i].count = adapter->tx_desc_count; 2561 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2562 if (!err) 2563 continue; 2564 dev_err(&adapter->pdev->dev, 2565 "Allocation for Tx Queue %u failed\n", i); 2566 break; 2567 } 2568 2569 return err; 2570 } 2571 2572 /** 2573 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2574 * @adapter: board private structure 2575 * 2576 * If this function returns with an error, then it's possible one or 2577 * more of the rings is populated (while the rest are not). It is the 2578 * callers duty to clean those orphaned rings. 2579 * 2580 * Return 0 on success, negative on failure 2581 **/ 2582 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2583 { 2584 int i, err = 0; 2585 2586 for (i = 0; i < adapter->num_active_queues; i++) { 2587 adapter->rx_rings[i].count = adapter->rx_desc_count; 2588 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2589 if (!err) 2590 continue; 2591 dev_err(&adapter->pdev->dev, 2592 "Allocation for Rx Queue %u failed\n", i); 2593 break; 2594 } 2595 return err; 2596 } 2597 2598 /** 2599 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2600 * @adapter: board private structure 2601 * 2602 * Free all receive software resources 2603 **/ 2604 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2605 { 2606 int i; 2607 2608 if (!adapter->rx_rings) 2609 return; 2610 2611 for (i = 0; i < adapter->num_active_queues; i++) 2612 if (adapter->rx_rings[i].desc) 2613 iavf_free_rx_resources(&adapter->rx_rings[i]); 2614 } 2615 2616 /** 2617 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2618 * @adapter: board private structure 2619 * @max_tx_rate: max Tx bw for a tc 2620 **/ 2621 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2622 u64 max_tx_rate) 2623 { 2624 int speed = 0, ret = 0; 2625 2626 if (ADV_LINK_SUPPORT(adapter)) { 2627 if (adapter->link_speed_mbps < U32_MAX) { 2628 speed = adapter->link_speed_mbps; 2629 goto validate_bw; 2630 } else { 2631 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2632 return -EINVAL; 2633 } 2634 } 2635 2636 switch (adapter->link_speed) { 2637 case VIRTCHNL_LINK_SPEED_40GB: 2638 speed = SPEED_40000; 2639 break; 2640 case VIRTCHNL_LINK_SPEED_25GB: 2641 speed = SPEED_25000; 2642 break; 2643 case VIRTCHNL_LINK_SPEED_20GB: 2644 speed = SPEED_20000; 2645 break; 2646 case VIRTCHNL_LINK_SPEED_10GB: 2647 speed = SPEED_10000; 2648 break; 2649 case VIRTCHNL_LINK_SPEED_5GB: 2650 speed = SPEED_5000; 2651 break; 2652 case VIRTCHNL_LINK_SPEED_2_5GB: 2653 speed = SPEED_2500; 2654 break; 2655 case VIRTCHNL_LINK_SPEED_1GB: 2656 speed = SPEED_1000; 2657 break; 2658 case VIRTCHNL_LINK_SPEED_100MB: 2659 speed = SPEED_100; 2660 break; 2661 default: 2662 break; 2663 } 2664 2665 validate_bw: 2666 if (max_tx_rate > speed) { 2667 dev_err(&adapter->pdev->dev, 2668 "Invalid tx rate specified\n"); 2669 ret = -EINVAL; 2670 } 2671 2672 return ret; 2673 } 2674 2675 /** 2676 * iavf_validate_ch_config - validate queue mapping info 2677 * @adapter: board private structure 2678 * @mqprio_qopt: queue parameters 2679 * 2680 * This function validates if the config provided by the user to 2681 * configure queue channels is valid or not. Returns 0 on a valid 2682 * config. 2683 **/ 2684 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2685 struct tc_mqprio_qopt_offload *mqprio_qopt) 2686 { 2687 u64 total_max_rate = 0; 2688 int i, num_qps = 0; 2689 u64 tx_rate = 0; 2690 int ret = 0; 2691 2692 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2693 mqprio_qopt->qopt.num_tc < 1) 2694 return -EINVAL; 2695 2696 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2697 if (!mqprio_qopt->qopt.count[i] || 2698 mqprio_qopt->qopt.offset[i] != num_qps) 2699 return -EINVAL; 2700 if (mqprio_qopt->min_rate[i]) { 2701 dev_err(&adapter->pdev->dev, 2702 "Invalid min tx rate (greater than 0) specified\n"); 2703 return -EINVAL; 2704 } 2705 /*convert to Mbps */ 2706 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2707 IAVF_MBPS_DIVISOR); 2708 total_max_rate += tx_rate; 2709 num_qps += mqprio_qopt->qopt.count[i]; 2710 } 2711 if (num_qps > IAVF_MAX_REQ_QUEUES) 2712 return -EINVAL; 2713 2714 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2715 return ret; 2716 } 2717 2718 /** 2719 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2720 * @adapter: board private structure 2721 **/ 2722 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2723 { 2724 struct iavf_cloud_filter *cf, *cftmp; 2725 2726 spin_lock_bh(&adapter->cloud_filter_list_lock); 2727 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2728 list) { 2729 list_del(&cf->list); 2730 kfree(cf); 2731 adapter->num_cloud_filters--; 2732 } 2733 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2734 } 2735 2736 /** 2737 * __iavf_setup_tc - configure multiple traffic classes 2738 * @netdev: network interface device structure 2739 * @type_data: tc offload data 2740 * 2741 * This function processes the config information provided by the 2742 * user to configure traffic classes/queue channels and packages the 2743 * information to request the PF to setup traffic classes. 2744 * 2745 * Returns 0 on success. 2746 **/ 2747 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2748 { 2749 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2750 struct iavf_adapter *adapter = netdev_priv(netdev); 2751 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2752 u8 num_tc = 0, total_qps = 0; 2753 int ret = 0, netdev_tc = 0; 2754 u64 max_tx_rate; 2755 u16 mode; 2756 int i; 2757 2758 num_tc = mqprio_qopt->qopt.num_tc; 2759 mode = mqprio_qopt->mode; 2760 2761 /* delete queue_channel */ 2762 if (!mqprio_qopt->qopt.hw) { 2763 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2764 /* reset the tc configuration */ 2765 netdev_reset_tc(netdev); 2766 adapter->num_tc = 0; 2767 netif_tx_stop_all_queues(netdev); 2768 netif_tx_disable(netdev); 2769 iavf_del_all_cloud_filters(adapter); 2770 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2771 goto exit; 2772 } else { 2773 return -EINVAL; 2774 } 2775 } 2776 2777 /* add queue channel */ 2778 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2779 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2780 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2781 return -EOPNOTSUPP; 2782 } 2783 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2784 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2785 return -EINVAL; 2786 } 2787 2788 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2789 if (ret) 2790 return ret; 2791 /* Return if same TC config is requested */ 2792 if (adapter->num_tc == num_tc) 2793 return 0; 2794 adapter->num_tc = num_tc; 2795 2796 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2797 if (i < num_tc) { 2798 adapter->ch_config.ch_info[i].count = 2799 mqprio_qopt->qopt.count[i]; 2800 adapter->ch_config.ch_info[i].offset = 2801 mqprio_qopt->qopt.offset[i]; 2802 total_qps += mqprio_qopt->qopt.count[i]; 2803 max_tx_rate = mqprio_qopt->max_rate[i]; 2804 /* convert to Mbps */ 2805 max_tx_rate = div_u64(max_tx_rate, 2806 IAVF_MBPS_DIVISOR); 2807 adapter->ch_config.ch_info[i].max_tx_rate = 2808 max_tx_rate; 2809 } else { 2810 adapter->ch_config.ch_info[i].count = 1; 2811 adapter->ch_config.ch_info[i].offset = 0; 2812 } 2813 } 2814 adapter->ch_config.total_qps = total_qps; 2815 netif_tx_stop_all_queues(netdev); 2816 netif_tx_disable(netdev); 2817 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2818 netdev_reset_tc(netdev); 2819 /* Report the tc mapping up the stack */ 2820 netdev_set_num_tc(adapter->netdev, num_tc); 2821 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2822 u16 qcount = mqprio_qopt->qopt.count[i]; 2823 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2824 2825 if (i < num_tc) 2826 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2827 qoffset); 2828 } 2829 } 2830 exit: 2831 return ret; 2832 } 2833 2834 /** 2835 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2836 * @adapter: board private structure 2837 * @f: pointer to struct flow_cls_offload 2838 * @filter: pointer to cloud filter structure 2839 */ 2840 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2841 struct flow_cls_offload *f, 2842 struct iavf_cloud_filter *filter) 2843 { 2844 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2845 struct flow_dissector *dissector = rule->match.dissector; 2846 u16 n_proto_mask = 0; 2847 u16 n_proto_key = 0; 2848 u8 field_flags = 0; 2849 u16 addr_type = 0; 2850 u16 n_proto = 0; 2851 int i = 0; 2852 struct virtchnl_filter *vf = &filter->f; 2853 2854 if (dissector->used_keys & 2855 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2856 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2857 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2858 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2859 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2860 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2861 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2862 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2863 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2864 dissector->used_keys); 2865 return -EOPNOTSUPP; 2866 } 2867 2868 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2869 struct flow_match_enc_keyid match; 2870 2871 flow_rule_match_enc_keyid(rule, &match); 2872 if (match.mask->keyid != 0) 2873 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2874 } 2875 2876 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2877 struct flow_match_basic match; 2878 2879 flow_rule_match_basic(rule, &match); 2880 n_proto_key = ntohs(match.key->n_proto); 2881 n_proto_mask = ntohs(match.mask->n_proto); 2882 2883 if (n_proto_key == ETH_P_ALL) { 2884 n_proto_key = 0; 2885 n_proto_mask = 0; 2886 } 2887 n_proto = n_proto_key & n_proto_mask; 2888 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2889 return -EINVAL; 2890 if (n_proto == ETH_P_IPV6) { 2891 /* specify flow type as TCP IPv6 */ 2892 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2893 } 2894 2895 if (match.key->ip_proto != IPPROTO_TCP) { 2896 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2897 return -EINVAL; 2898 } 2899 } 2900 2901 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2902 struct flow_match_eth_addrs match; 2903 2904 flow_rule_match_eth_addrs(rule, &match); 2905 2906 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2907 if (!is_zero_ether_addr(match.mask->dst)) { 2908 if (is_broadcast_ether_addr(match.mask->dst)) { 2909 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2910 } else { 2911 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2912 match.mask->dst); 2913 return IAVF_ERR_CONFIG; 2914 } 2915 } 2916 2917 if (!is_zero_ether_addr(match.mask->src)) { 2918 if (is_broadcast_ether_addr(match.mask->src)) { 2919 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2920 } else { 2921 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2922 match.mask->src); 2923 return IAVF_ERR_CONFIG; 2924 } 2925 } 2926 2927 if (!is_zero_ether_addr(match.key->dst)) 2928 if (is_valid_ether_addr(match.key->dst) || 2929 is_multicast_ether_addr(match.key->dst)) { 2930 /* set the mask if a valid dst_mac address */ 2931 for (i = 0; i < ETH_ALEN; i++) 2932 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2933 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2934 match.key->dst); 2935 } 2936 2937 if (!is_zero_ether_addr(match.key->src)) 2938 if (is_valid_ether_addr(match.key->src) || 2939 is_multicast_ether_addr(match.key->src)) { 2940 /* set the mask if a valid dst_mac address */ 2941 for (i = 0; i < ETH_ALEN; i++) 2942 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2943 ether_addr_copy(vf->data.tcp_spec.src_mac, 2944 match.key->src); 2945 } 2946 } 2947 2948 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2949 struct flow_match_vlan match; 2950 2951 flow_rule_match_vlan(rule, &match); 2952 if (match.mask->vlan_id) { 2953 if (match.mask->vlan_id == VLAN_VID_MASK) { 2954 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2955 } else { 2956 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2957 match.mask->vlan_id); 2958 return IAVF_ERR_CONFIG; 2959 } 2960 } 2961 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2962 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2963 } 2964 2965 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2966 struct flow_match_control match; 2967 2968 flow_rule_match_control(rule, &match); 2969 addr_type = match.key->addr_type; 2970 } 2971 2972 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2973 struct flow_match_ipv4_addrs match; 2974 2975 flow_rule_match_ipv4_addrs(rule, &match); 2976 if (match.mask->dst) { 2977 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2978 field_flags |= IAVF_CLOUD_FIELD_IIP; 2979 } else { 2980 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2981 be32_to_cpu(match.mask->dst)); 2982 return IAVF_ERR_CONFIG; 2983 } 2984 } 2985 2986 if (match.mask->src) { 2987 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2988 field_flags |= IAVF_CLOUD_FIELD_IIP; 2989 } else { 2990 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2991 be32_to_cpu(match.mask->dst)); 2992 return IAVF_ERR_CONFIG; 2993 } 2994 } 2995 2996 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2997 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2998 return IAVF_ERR_CONFIG; 2999 } 3000 if (match.key->dst) { 3001 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 3002 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 3003 } 3004 if (match.key->src) { 3005 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 3006 vf->data.tcp_spec.src_ip[0] = match.key->src; 3007 } 3008 } 3009 3010 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 3011 struct flow_match_ipv6_addrs match; 3012 3013 flow_rule_match_ipv6_addrs(rule, &match); 3014 3015 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 3016 if (ipv6_addr_any(&match.mask->dst)) { 3017 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 3018 IPV6_ADDR_ANY); 3019 return IAVF_ERR_CONFIG; 3020 } 3021 3022 /* src and dest IPv6 address should not be LOOPBACK 3023 * (0:0:0:0:0:0:0:1) which can be represented as ::1 3024 */ 3025 if (ipv6_addr_loopback(&match.key->dst) || 3026 ipv6_addr_loopback(&match.key->src)) { 3027 dev_err(&adapter->pdev->dev, 3028 "ipv6 addr should not be loopback\n"); 3029 return IAVF_ERR_CONFIG; 3030 } 3031 if (!ipv6_addr_any(&match.mask->dst) || 3032 !ipv6_addr_any(&match.mask->src)) 3033 field_flags |= IAVF_CLOUD_FIELD_IIP; 3034 3035 for (i = 0; i < 4; i++) 3036 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 3037 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 3038 sizeof(vf->data.tcp_spec.dst_ip)); 3039 for (i = 0; i < 4; i++) 3040 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 3041 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 3042 sizeof(vf->data.tcp_spec.src_ip)); 3043 } 3044 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 3045 struct flow_match_ports match; 3046 3047 flow_rule_match_ports(rule, &match); 3048 if (match.mask->src) { 3049 if (match.mask->src == cpu_to_be16(0xffff)) { 3050 field_flags |= IAVF_CLOUD_FIELD_IIP; 3051 } else { 3052 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 3053 be16_to_cpu(match.mask->src)); 3054 return IAVF_ERR_CONFIG; 3055 } 3056 } 3057 3058 if (match.mask->dst) { 3059 if (match.mask->dst == cpu_to_be16(0xffff)) { 3060 field_flags |= IAVF_CLOUD_FIELD_IIP; 3061 } else { 3062 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 3063 be16_to_cpu(match.mask->dst)); 3064 return IAVF_ERR_CONFIG; 3065 } 3066 } 3067 if (match.key->dst) { 3068 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 3069 vf->data.tcp_spec.dst_port = match.key->dst; 3070 } 3071 3072 if (match.key->src) { 3073 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 3074 vf->data.tcp_spec.src_port = match.key->src; 3075 } 3076 } 3077 vf->field_flags = field_flags; 3078 3079 return 0; 3080 } 3081 3082 /** 3083 * iavf_handle_tclass - Forward to a traffic class on the device 3084 * @adapter: board private structure 3085 * @tc: traffic class index on the device 3086 * @filter: pointer to cloud filter structure 3087 */ 3088 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3089 struct iavf_cloud_filter *filter) 3090 { 3091 if (tc == 0) 3092 return 0; 3093 if (tc < adapter->num_tc) { 3094 if (!filter->f.data.tcp_spec.dst_port) { 3095 dev_err(&adapter->pdev->dev, 3096 "Specify destination port to redirect to traffic class other than TC0\n"); 3097 return -EINVAL; 3098 } 3099 } 3100 /* redirect to a traffic class on the same device */ 3101 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3102 filter->f.action_meta = tc; 3103 return 0; 3104 } 3105 3106 /** 3107 * iavf_configure_clsflower - Add tc flower filters 3108 * @adapter: board private structure 3109 * @cls_flower: Pointer to struct flow_cls_offload 3110 */ 3111 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3112 struct flow_cls_offload *cls_flower) 3113 { 3114 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3115 struct iavf_cloud_filter *filter = NULL; 3116 int err = -EINVAL, count = 50; 3117 3118 if (tc < 0) { 3119 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3120 return -EINVAL; 3121 } 3122 3123 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3124 if (!filter) 3125 return -ENOMEM; 3126 3127 while (!mutex_trylock(&adapter->crit_lock)) { 3128 if (--count == 0) { 3129 kfree(filter); 3130 return err; 3131 } 3132 udelay(1); 3133 } 3134 3135 filter->cookie = cls_flower->cookie; 3136 3137 /* set the mask to all zeroes to begin with */ 3138 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3139 /* start out with flow type and eth type IPv4 to begin with */ 3140 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3141 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3142 if (err) 3143 goto err; 3144 3145 err = iavf_handle_tclass(adapter, tc, filter); 3146 if (err) 3147 goto err; 3148 3149 /* add filter to the list */ 3150 spin_lock_bh(&adapter->cloud_filter_list_lock); 3151 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3152 adapter->num_cloud_filters++; 3153 filter->add = true; 3154 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3155 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3156 err: 3157 if (err) 3158 kfree(filter); 3159 3160 mutex_unlock(&adapter->crit_lock); 3161 return err; 3162 } 3163 3164 /* iavf_find_cf - Find the cloud filter in the list 3165 * @adapter: Board private structure 3166 * @cookie: filter specific cookie 3167 * 3168 * Returns ptr to the filter object or NULL. Must be called while holding the 3169 * cloud_filter_list_lock. 3170 */ 3171 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3172 unsigned long *cookie) 3173 { 3174 struct iavf_cloud_filter *filter = NULL; 3175 3176 if (!cookie) 3177 return NULL; 3178 3179 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3180 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3181 return filter; 3182 } 3183 return NULL; 3184 } 3185 3186 /** 3187 * iavf_delete_clsflower - Remove tc flower filters 3188 * @adapter: board private structure 3189 * @cls_flower: Pointer to struct flow_cls_offload 3190 */ 3191 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3192 struct flow_cls_offload *cls_flower) 3193 { 3194 struct iavf_cloud_filter *filter = NULL; 3195 int err = 0; 3196 3197 spin_lock_bh(&adapter->cloud_filter_list_lock); 3198 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3199 if (filter) { 3200 filter->del = true; 3201 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3202 } else { 3203 err = -EINVAL; 3204 } 3205 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3206 3207 return err; 3208 } 3209 3210 /** 3211 * iavf_setup_tc_cls_flower - flower classifier offloads 3212 * @adapter: board private structure 3213 * @cls_flower: pointer to flow_cls_offload struct with flow info 3214 */ 3215 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3216 struct flow_cls_offload *cls_flower) 3217 { 3218 switch (cls_flower->command) { 3219 case FLOW_CLS_REPLACE: 3220 return iavf_configure_clsflower(adapter, cls_flower); 3221 case FLOW_CLS_DESTROY: 3222 return iavf_delete_clsflower(adapter, cls_flower); 3223 case FLOW_CLS_STATS: 3224 return -EOPNOTSUPP; 3225 default: 3226 return -EOPNOTSUPP; 3227 } 3228 } 3229 3230 /** 3231 * iavf_setup_tc_block_cb - block callback for tc 3232 * @type: type of offload 3233 * @type_data: offload data 3234 * @cb_priv: 3235 * 3236 * This function is the block callback for traffic classes 3237 **/ 3238 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3239 void *cb_priv) 3240 { 3241 struct iavf_adapter *adapter = cb_priv; 3242 3243 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3244 return -EOPNOTSUPP; 3245 3246 switch (type) { 3247 case TC_SETUP_CLSFLOWER: 3248 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3249 default: 3250 return -EOPNOTSUPP; 3251 } 3252 } 3253 3254 static LIST_HEAD(iavf_block_cb_list); 3255 3256 /** 3257 * iavf_setup_tc - configure multiple traffic classes 3258 * @netdev: network interface device structure 3259 * @type: type of offload 3260 * @type_data: tc offload data 3261 * 3262 * This function is the callback to ndo_setup_tc in the 3263 * netdev_ops. 3264 * 3265 * Returns 0 on success 3266 **/ 3267 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3268 void *type_data) 3269 { 3270 struct iavf_adapter *adapter = netdev_priv(netdev); 3271 3272 switch (type) { 3273 case TC_SETUP_QDISC_MQPRIO: 3274 return __iavf_setup_tc(netdev, type_data); 3275 case TC_SETUP_BLOCK: 3276 return flow_block_cb_setup_simple(type_data, 3277 &iavf_block_cb_list, 3278 iavf_setup_tc_block_cb, 3279 adapter, adapter, true); 3280 default: 3281 return -EOPNOTSUPP; 3282 } 3283 } 3284 3285 /** 3286 * iavf_open - Called when a network interface is made active 3287 * @netdev: network interface device structure 3288 * 3289 * Returns 0 on success, negative value on failure 3290 * 3291 * The open entry point is called when a network interface is made 3292 * active by the system (IFF_UP). At this point all resources needed 3293 * for transmit and receive operations are allocated, the interrupt 3294 * handler is registered with the OS, the watchdog is started, 3295 * and the stack is notified that the interface is ready. 3296 **/ 3297 static int iavf_open(struct net_device *netdev) 3298 { 3299 struct iavf_adapter *adapter = netdev_priv(netdev); 3300 int err; 3301 3302 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3303 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3304 return -EIO; 3305 } 3306 3307 while (!mutex_trylock(&adapter->crit_lock)) 3308 usleep_range(500, 1000); 3309 3310 if (adapter->state != __IAVF_DOWN) { 3311 err = -EBUSY; 3312 goto err_unlock; 3313 } 3314 3315 if (adapter->state == __IAVF_RUNNING && 3316 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { 3317 dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); 3318 err = 0; 3319 goto err_unlock; 3320 } 3321 3322 /* allocate transmit descriptors */ 3323 err = iavf_setup_all_tx_resources(adapter); 3324 if (err) 3325 goto err_setup_tx; 3326 3327 /* allocate receive descriptors */ 3328 err = iavf_setup_all_rx_resources(adapter); 3329 if (err) 3330 goto err_setup_rx; 3331 3332 /* clear any pending interrupts, may auto mask */ 3333 err = iavf_request_traffic_irqs(adapter, netdev->name); 3334 if (err) 3335 goto err_req_irq; 3336 3337 spin_lock_bh(&adapter->mac_vlan_list_lock); 3338 3339 iavf_add_filter(adapter, adapter->hw.mac.addr); 3340 3341 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3342 3343 /* Restore VLAN filters that were removed with IFF_DOWN */ 3344 iavf_restore_filters(adapter); 3345 3346 iavf_configure(adapter); 3347 3348 iavf_up_complete(adapter); 3349 3350 iavf_irq_enable(adapter, true); 3351 3352 mutex_unlock(&adapter->crit_lock); 3353 3354 return 0; 3355 3356 err_req_irq: 3357 iavf_down(adapter); 3358 iavf_free_traffic_irqs(adapter); 3359 err_setup_rx: 3360 iavf_free_all_rx_resources(adapter); 3361 err_setup_tx: 3362 iavf_free_all_tx_resources(adapter); 3363 err_unlock: 3364 mutex_unlock(&adapter->crit_lock); 3365 3366 return err; 3367 } 3368 3369 /** 3370 * iavf_close - Disables a network interface 3371 * @netdev: network interface device structure 3372 * 3373 * Returns 0, this is not allowed to fail 3374 * 3375 * The close entry point is called when an interface is de-activated 3376 * by the OS. The hardware is still under the drivers control, but 3377 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3378 * are freed, along with all transmit and receive resources. 3379 **/ 3380 static int iavf_close(struct net_device *netdev) 3381 { 3382 struct iavf_adapter *adapter = netdev_priv(netdev); 3383 int status; 3384 3385 if (adapter->state <= __IAVF_DOWN_PENDING) 3386 return 0; 3387 3388 while (!mutex_trylock(&adapter->crit_lock)) 3389 usleep_range(500, 1000); 3390 3391 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3392 if (CLIENT_ENABLED(adapter)) 3393 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3394 3395 iavf_down(adapter); 3396 iavf_change_state(adapter, __IAVF_DOWN_PENDING); 3397 iavf_free_traffic_irqs(adapter); 3398 3399 mutex_unlock(&adapter->crit_lock); 3400 3401 /* We explicitly don't free resources here because the hardware is 3402 * still active and can DMA into memory. Resources are cleared in 3403 * iavf_virtchnl_completion() after we get confirmation from the PF 3404 * driver that the rings have been stopped. 3405 * 3406 * Also, we wait for state to transition to __IAVF_DOWN before 3407 * returning. State change occurs in iavf_virtchnl_completion() after 3408 * VF resources are released (which occurs after PF driver processes and 3409 * responds to admin queue commands). 3410 */ 3411 3412 status = wait_event_timeout(adapter->down_waitqueue, 3413 adapter->state == __IAVF_DOWN, 3414 msecs_to_jiffies(500)); 3415 if (!status) 3416 netdev_warn(netdev, "Device resources not yet released\n"); 3417 return 0; 3418 } 3419 3420 /** 3421 * iavf_change_mtu - Change the Maximum Transfer Unit 3422 * @netdev: network interface device structure 3423 * @new_mtu: new value for maximum frame size 3424 * 3425 * Returns 0 on success, negative on failure 3426 **/ 3427 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3428 { 3429 struct iavf_adapter *adapter = netdev_priv(netdev); 3430 3431 netdev->mtu = new_mtu; 3432 if (CLIENT_ENABLED(adapter)) { 3433 iavf_notify_client_l2_params(&adapter->vsi); 3434 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3435 } 3436 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3437 queue_work(iavf_wq, &adapter->reset_task); 3438 3439 return 0; 3440 } 3441 3442 /** 3443 * iavf_set_features - set the netdev feature flags 3444 * @netdev: ptr to the netdev being adjusted 3445 * @features: the feature set that the stack is suggesting 3446 * Note: expects to be called while under rtnl_lock() 3447 **/ 3448 static int iavf_set_features(struct net_device *netdev, 3449 netdev_features_t features) 3450 { 3451 struct iavf_adapter *adapter = netdev_priv(netdev); 3452 3453 /* Don't allow enabling VLAN features when adapter is not capable 3454 * of VLAN offload/filtering 3455 */ 3456 if (!VLAN_ALLOWED(adapter)) { 3457 netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 3458 NETIF_F_HW_VLAN_CTAG_TX | 3459 NETIF_F_HW_VLAN_CTAG_FILTER); 3460 if (features & (NETIF_F_HW_VLAN_CTAG_RX | 3461 NETIF_F_HW_VLAN_CTAG_TX | 3462 NETIF_F_HW_VLAN_CTAG_FILTER)) 3463 return -EINVAL; 3464 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3465 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3466 adapter->aq_required |= 3467 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3468 else 3469 adapter->aq_required |= 3470 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3471 } 3472 3473 return 0; 3474 } 3475 3476 /** 3477 * iavf_features_check - Validate encapsulated packet conforms to limits 3478 * @skb: skb buff 3479 * @dev: This physical port's netdev 3480 * @features: Offload features that the stack believes apply 3481 **/ 3482 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3483 struct net_device *dev, 3484 netdev_features_t features) 3485 { 3486 size_t len; 3487 3488 /* No point in doing any of this if neither checksum nor GSO are 3489 * being requested for this frame. We can rule out both by just 3490 * checking for CHECKSUM_PARTIAL 3491 */ 3492 if (skb->ip_summed != CHECKSUM_PARTIAL) 3493 return features; 3494 3495 /* We cannot support GSO if the MSS is going to be less than 3496 * 64 bytes. If it is then we need to drop support for GSO. 3497 */ 3498 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3499 features &= ~NETIF_F_GSO_MASK; 3500 3501 /* MACLEN can support at most 63 words */ 3502 len = skb_network_header(skb) - skb->data; 3503 if (len & ~(63 * 2)) 3504 goto out_err; 3505 3506 /* IPLEN and EIPLEN can support at most 127 dwords */ 3507 len = skb_transport_header(skb) - skb_network_header(skb); 3508 if (len & ~(127 * 4)) 3509 goto out_err; 3510 3511 if (skb->encapsulation) { 3512 /* L4TUNLEN can support 127 words */ 3513 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3514 if (len & ~(127 * 2)) 3515 goto out_err; 3516 3517 /* IPLEN can support at most 127 dwords */ 3518 len = skb_inner_transport_header(skb) - 3519 skb_inner_network_header(skb); 3520 if (len & ~(127 * 4)) 3521 goto out_err; 3522 } 3523 3524 /* No need to validate L4LEN as TCP is the only protocol with a 3525 * a flexible value and we support all possible values supported 3526 * by TCP, which is at most 15 dwords 3527 */ 3528 3529 return features; 3530 out_err: 3531 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3532 } 3533 3534 /** 3535 * iavf_fix_features - fix up the netdev feature bits 3536 * @netdev: our net device 3537 * @features: desired feature bits 3538 * 3539 * Returns fixed-up features bits 3540 **/ 3541 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3542 netdev_features_t features) 3543 { 3544 struct iavf_adapter *adapter = netdev_priv(netdev); 3545 3546 if (adapter->vf_res && 3547 !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3548 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3549 NETIF_F_HW_VLAN_CTAG_RX | 3550 NETIF_F_HW_VLAN_CTAG_FILTER); 3551 3552 return features; 3553 } 3554 3555 static const struct net_device_ops iavf_netdev_ops = { 3556 .ndo_open = iavf_open, 3557 .ndo_stop = iavf_close, 3558 .ndo_start_xmit = iavf_xmit_frame, 3559 .ndo_set_rx_mode = iavf_set_rx_mode, 3560 .ndo_validate_addr = eth_validate_addr, 3561 .ndo_set_mac_address = iavf_set_mac, 3562 .ndo_change_mtu = iavf_change_mtu, 3563 .ndo_tx_timeout = iavf_tx_timeout, 3564 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3565 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3566 .ndo_features_check = iavf_features_check, 3567 .ndo_fix_features = iavf_fix_features, 3568 .ndo_set_features = iavf_set_features, 3569 .ndo_setup_tc = iavf_setup_tc, 3570 }; 3571 3572 /** 3573 * iavf_check_reset_complete - check that VF reset is complete 3574 * @hw: pointer to hw struct 3575 * 3576 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3577 **/ 3578 static int iavf_check_reset_complete(struct iavf_hw *hw) 3579 { 3580 u32 rstat; 3581 int i; 3582 3583 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3584 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3585 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3586 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3587 (rstat == VIRTCHNL_VFR_COMPLETED)) 3588 return 0; 3589 usleep_range(10, 20); 3590 } 3591 return -EBUSY; 3592 } 3593 3594 /** 3595 * iavf_process_config - Process the config information we got from the PF 3596 * @adapter: board private structure 3597 * 3598 * Verify that we have a valid config struct, and set up our netdev features 3599 * and our VSI struct. 3600 **/ 3601 int iavf_process_config(struct iavf_adapter *adapter) 3602 { 3603 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3604 int i, num_req_queues = adapter->num_req_queues; 3605 struct net_device *netdev = adapter->netdev; 3606 struct iavf_vsi *vsi = &adapter->vsi; 3607 netdev_features_t hw_enc_features; 3608 netdev_features_t hw_features; 3609 3610 /* got VF config message back from PF, now we can parse it */ 3611 for (i = 0; i < vfres->num_vsis; i++) { 3612 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3613 adapter->vsi_res = &vfres->vsi_res[i]; 3614 } 3615 if (!adapter->vsi_res) { 3616 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3617 return -ENODEV; 3618 } 3619 3620 if (num_req_queues && 3621 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3622 /* Problem. The PF gave us fewer queues than what we had 3623 * negotiated in our request. Need a reset to see if we can't 3624 * get back to a working state. 3625 */ 3626 dev_err(&adapter->pdev->dev, 3627 "Requested %d queues, but PF only gave us %d.\n", 3628 num_req_queues, 3629 adapter->vsi_res->num_queue_pairs); 3630 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3631 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3632 iavf_schedule_reset(adapter); 3633 return -ENODEV; 3634 } 3635 adapter->num_req_queues = 0; 3636 3637 hw_enc_features = NETIF_F_SG | 3638 NETIF_F_IP_CSUM | 3639 NETIF_F_IPV6_CSUM | 3640 NETIF_F_HIGHDMA | 3641 NETIF_F_SOFT_FEATURES | 3642 NETIF_F_TSO | 3643 NETIF_F_TSO_ECN | 3644 NETIF_F_TSO6 | 3645 NETIF_F_SCTP_CRC | 3646 NETIF_F_RXHASH | 3647 NETIF_F_RXCSUM | 3648 0; 3649 3650 /* advertise to stack only if offloads for encapsulated packets is 3651 * supported 3652 */ 3653 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3654 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3655 NETIF_F_GSO_GRE | 3656 NETIF_F_GSO_GRE_CSUM | 3657 NETIF_F_GSO_IPXIP4 | 3658 NETIF_F_GSO_IPXIP6 | 3659 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3660 NETIF_F_GSO_PARTIAL | 3661 0; 3662 3663 if (!(vfres->vf_cap_flags & 3664 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3665 netdev->gso_partial_features |= 3666 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3667 3668 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3669 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3670 netdev->hw_enc_features |= hw_enc_features; 3671 } 3672 /* record features VLANs can make use of */ 3673 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3674 3675 /* Write features and hw_features separately to avoid polluting 3676 * with, or dropping, features that are set when we registered. 3677 */ 3678 hw_features = hw_enc_features; 3679 3680 /* Enable VLAN features if supported */ 3681 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3682 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3683 NETIF_F_HW_VLAN_CTAG_RX); 3684 /* Enable cloud filter if ADQ is supported */ 3685 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3686 hw_features |= NETIF_F_HW_TC; 3687 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3688 hw_features |= NETIF_F_GSO_UDP_L4; 3689 3690 netdev->hw_features |= hw_features; 3691 3692 netdev->features |= hw_features; 3693 3694 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3695 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3696 3697 netdev->priv_flags |= IFF_UNICAST_FLT; 3698 3699 /* Do not turn on offloads when they are requested to be turned off. 3700 * TSO needs minimum 576 bytes to work correctly. 3701 */ 3702 if (netdev->wanted_features) { 3703 if (!(netdev->wanted_features & NETIF_F_TSO) || 3704 netdev->mtu < 576) 3705 netdev->features &= ~NETIF_F_TSO; 3706 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3707 netdev->mtu < 576) 3708 netdev->features &= ~NETIF_F_TSO6; 3709 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3710 netdev->features &= ~NETIF_F_TSO_ECN; 3711 if (!(netdev->wanted_features & NETIF_F_GRO)) 3712 netdev->features &= ~NETIF_F_GRO; 3713 if (!(netdev->wanted_features & NETIF_F_GSO)) 3714 netdev->features &= ~NETIF_F_GSO; 3715 } 3716 3717 adapter->vsi.id = adapter->vsi_res->vsi_id; 3718 3719 adapter->vsi.back = adapter; 3720 adapter->vsi.base_vector = 1; 3721 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3722 vsi->netdev = adapter->netdev; 3723 vsi->qs_handle = adapter->vsi_res->qset_handle; 3724 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3725 adapter->rss_key_size = vfres->rss_key_size; 3726 adapter->rss_lut_size = vfres->rss_lut_size; 3727 } else { 3728 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3729 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3730 } 3731 3732 return 0; 3733 } 3734 3735 /** 3736 * iavf_shutdown - Shutdown the device in preparation for a reboot 3737 * @pdev: pci device structure 3738 **/ 3739 static void iavf_shutdown(struct pci_dev *pdev) 3740 { 3741 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3742 struct net_device *netdev = adapter->netdev; 3743 3744 netif_device_detach(netdev); 3745 3746 if (netif_running(netdev)) 3747 iavf_close(netdev); 3748 3749 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3750 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 3751 /* Prevent the watchdog from running. */ 3752 iavf_change_state(adapter, __IAVF_REMOVE); 3753 adapter->aq_required = 0; 3754 mutex_unlock(&adapter->crit_lock); 3755 3756 #ifdef CONFIG_PM 3757 pci_save_state(pdev); 3758 3759 #endif 3760 pci_disable_device(pdev); 3761 } 3762 3763 /** 3764 * iavf_probe - Device Initialization Routine 3765 * @pdev: PCI device information struct 3766 * @ent: entry in iavf_pci_tbl 3767 * 3768 * Returns 0 on success, negative on failure 3769 * 3770 * iavf_probe initializes an adapter identified by a pci_dev structure. 3771 * The OS initialization, configuring of the adapter private structure, 3772 * and a hardware reset occur. 3773 **/ 3774 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3775 { 3776 struct net_device *netdev; 3777 struct iavf_adapter *adapter = NULL; 3778 struct iavf_hw *hw = NULL; 3779 int err; 3780 3781 err = pci_enable_device(pdev); 3782 if (err) 3783 return err; 3784 3785 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3786 if (err) { 3787 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3788 if (err) { 3789 dev_err(&pdev->dev, 3790 "DMA configuration failed: 0x%x\n", err); 3791 goto err_dma; 3792 } 3793 } 3794 3795 err = pci_request_regions(pdev, iavf_driver_name); 3796 if (err) { 3797 dev_err(&pdev->dev, 3798 "pci_request_regions failed 0x%x\n", err); 3799 goto err_pci_reg; 3800 } 3801 3802 pci_enable_pcie_error_reporting(pdev); 3803 3804 pci_set_master(pdev); 3805 3806 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3807 IAVF_MAX_REQ_QUEUES); 3808 if (!netdev) { 3809 err = -ENOMEM; 3810 goto err_alloc_etherdev; 3811 } 3812 3813 SET_NETDEV_DEV(netdev, &pdev->dev); 3814 3815 pci_set_drvdata(pdev, netdev); 3816 adapter = netdev_priv(netdev); 3817 3818 adapter->netdev = netdev; 3819 adapter->pdev = pdev; 3820 3821 hw = &adapter->hw; 3822 hw->back = adapter; 3823 3824 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3825 iavf_change_state(adapter, __IAVF_STARTUP); 3826 3827 /* Call save state here because it relies on the adapter struct. */ 3828 pci_save_state(pdev); 3829 3830 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3831 pci_resource_len(pdev, 0)); 3832 if (!hw->hw_addr) { 3833 err = -EIO; 3834 goto err_ioremap; 3835 } 3836 hw->vendor_id = pdev->vendor; 3837 hw->device_id = pdev->device; 3838 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3839 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3840 hw->subsystem_device_id = pdev->subsystem_device; 3841 hw->bus.device = PCI_SLOT(pdev->devfn); 3842 hw->bus.func = PCI_FUNC(pdev->devfn); 3843 hw->bus.bus_id = pdev->bus->number; 3844 3845 /* set up the locks for the AQ, do this only once in probe 3846 * and destroy them only once in remove 3847 */ 3848 mutex_init(&adapter->crit_lock); 3849 mutex_init(&adapter->client_lock); 3850 mutex_init(&adapter->remove_lock); 3851 mutex_init(&hw->aq.asq_mutex); 3852 mutex_init(&hw->aq.arq_mutex); 3853 3854 spin_lock_init(&adapter->mac_vlan_list_lock); 3855 spin_lock_init(&adapter->cloud_filter_list_lock); 3856 spin_lock_init(&adapter->fdir_fltr_lock); 3857 spin_lock_init(&adapter->adv_rss_lock); 3858 3859 INIT_LIST_HEAD(&adapter->mac_filter_list); 3860 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3861 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3862 INIT_LIST_HEAD(&adapter->fdir_list_head); 3863 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3864 3865 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3866 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3867 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3868 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3869 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 3870 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3871 3872 /* Setup the wait queue for indicating transition to down status */ 3873 init_waitqueue_head(&adapter->down_waitqueue); 3874 3875 return 0; 3876 3877 err_ioremap: 3878 free_netdev(netdev); 3879 err_alloc_etherdev: 3880 pci_disable_pcie_error_reporting(pdev); 3881 pci_release_regions(pdev); 3882 err_pci_reg: 3883 err_dma: 3884 pci_disable_device(pdev); 3885 return err; 3886 } 3887 3888 /** 3889 * iavf_suspend - Power management suspend routine 3890 * @dev_d: device info pointer 3891 * 3892 * Called when the system (VM) is entering sleep/suspend. 3893 **/ 3894 static int __maybe_unused iavf_suspend(struct device *dev_d) 3895 { 3896 struct net_device *netdev = dev_get_drvdata(dev_d); 3897 struct iavf_adapter *adapter = netdev_priv(netdev); 3898 3899 netif_device_detach(netdev); 3900 3901 while (!mutex_trylock(&adapter->crit_lock)) 3902 usleep_range(500, 1000); 3903 3904 if (netif_running(netdev)) { 3905 rtnl_lock(); 3906 iavf_down(adapter); 3907 rtnl_unlock(); 3908 } 3909 iavf_free_misc_irq(adapter); 3910 iavf_reset_interrupt_capability(adapter); 3911 3912 mutex_unlock(&adapter->crit_lock); 3913 3914 return 0; 3915 } 3916 3917 /** 3918 * iavf_resume - Power management resume routine 3919 * @dev_d: device info pointer 3920 * 3921 * Called when the system (VM) is resumed from sleep/suspend. 3922 **/ 3923 static int __maybe_unused iavf_resume(struct device *dev_d) 3924 { 3925 struct pci_dev *pdev = to_pci_dev(dev_d); 3926 struct iavf_adapter *adapter; 3927 u32 err; 3928 3929 adapter = iavf_pdev_to_adapter(pdev); 3930 3931 pci_set_master(pdev); 3932 3933 rtnl_lock(); 3934 err = iavf_set_interrupt_capability(adapter); 3935 if (err) { 3936 rtnl_unlock(); 3937 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3938 return err; 3939 } 3940 err = iavf_request_misc_irq(adapter); 3941 rtnl_unlock(); 3942 if (err) { 3943 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3944 return err; 3945 } 3946 3947 queue_work(iavf_wq, &adapter->reset_task); 3948 3949 netif_device_attach(adapter->netdev); 3950 3951 return err; 3952 } 3953 3954 /** 3955 * iavf_remove - Device Removal Routine 3956 * @pdev: PCI device information struct 3957 * 3958 * iavf_remove is called by the PCI subsystem to alert the driver 3959 * that it should release a PCI device. The could be caused by a 3960 * Hot-Plug event, or because the driver is going to be removed from 3961 * memory. 3962 **/ 3963 static void iavf_remove(struct pci_dev *pdev) 3964 { 3965 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3966 enum iavf_state_t prev_state = adapter->last_state; 3967 struct net_device *netdev = adapter->netdev; 3968 struct iavf_fdir_fltr *fdir, *fdirtmp; 3969 struct iavf_vlan_filter *vlf, *vlftmp; 3970 struct iavf_adv_rss *rss, *rsstmp; 3971 struct iavf_mac_filter *f, *ftmp; 3972 struct iavf_cloud_filter *cf, *cftmp; 3973 struct iavf_hw *hw = &adapter->hw; 3974 int err; 3975 /* Indicate we are in remove and not to run reset_task */ 3976 mutex_lock(&adapter->remove_lock); 3977 cancel_work_sync(&adapter->reset_task); 3978 cancel_delayed_work_sync(&adapter->watchdog_task); 3979 cancel_delayed_work_sync(&adapter->client_task); 3980 if (adapter->netdev_registered) { 3981 unregister_netdev(netdev); 3982 adapter->netdev_registered = false; 3983 } 3984 if (CLIENT_ALLOWED(adapter)) { 3985 err = iavf_lan_del_device(adapter); 3986 if (err) 3987 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3988 err); 3989 } 3990 3991 iavf_request_reset(adapter); 3992 msleep(50); 3993 /* If the FW isn't responding, kick it once, but only once. */ 3994 if (!iavf_asq_done(hw)) { 3995 iavf_request_reset(adapter); 3996 msleep(50); 3997 } 3998 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3999 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 4000 4001 /* Shut down all the garbage mashers on the detention level */ 4002 iavf_change_state(adapter, __IAVF_REMOVE); 4003 adapter->aq_required = 0; 4004 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 4005 4006 iavf_free_all_tx_resources(adapter); 4007 iavf_free_all_rx_resources(adapter); 4008 iavf_misc_irq_disable(adapter); 4009 iavf_free_misc_irq(adapter); 4010 4011 /* In case we enter iavf_remove from erroneous state, free traffic irqs 4012 * here, so as to not cause a kernel crash, when calling 4013 * iavf_reset_interrupt_capability. 4014 */ 4015 if ((adapter->last_state == __IAVF_RESETTING && 4016 prev_state != __IAVF_DOWN) || 4017 (adapter->last_state == __IAVF_RUNNING && 4018 !(netdev->flags & IFF_UP))) 4019 iavf_free_traffic_irqs(adapter); 4020 4021 iavf_reset_interrupt_capability(adapter); 4022 iavf_free_q_vectors(adapter); 4023 4024 cancel_delayed_work_sync(&adapter->watchdog_task); 4025 4026 cancel_work_sync(&adapter->adminq_task); 4027 4028 iavf_free_rss(adapter); 4029 4030 if (hw->aq.asq.count) 4031 iavf_shutdown_adminq(hw); 4032 4033 /* destroy the locks only once, here */ 4034 mutex_destroy(&hw->aq.arq_mutex); 4035 mutex_destroy(&hw->aq.asq_mutex); 4036 mutex_destroy(&adapter->client_lock); 4037 mutex_unlock(&adapter->crit_lock); 4038 mutex_destroy(&adapter->crit_lock); 4039 mutex_unlock(&adapter->remove_lock); 4040 mutex_destroy(&adapter->remove_lock); 4041 4042 iounmap(hw->hw_addr); 4043 pci_release_regions(pdev); 4044 iavf_free_queues(adapter); 4045 kfree(adapter->vf_res); 4046 spin_lock_bh(&adapter->mac_vlan_list_lock); 4047 /* If we got removed before an up/down sequence, we've got a filter 4048 * hanging out there that we need to get rid of. 4049 */ 4050 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 4051 list_del(&f->list); 4052 kfree(f); 4053 } 4054 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 4055 list) { 4056 list_del(&vlf->list); 4057 kfree(vlf); 4058 } 4059 4060 spin_unlock_bh(&adapter->mac_vlan_list_lock); 4061 4062 spin_lock_bh(&adapter->cloud_filter_list_lock); 4063 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 4064 list_del(&cf->list); 4065 kfree(cf); 4066 } 4067 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4068 4069 spin_lock_bh(&adapter->fdir_fltr_lock); 4070 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 4071 list_del(&fdir->list); 4072 kfree(fdir); 4073 } 4074 spin_unlock_bh(&adapter->fdir_fltr_lock); 4075 4076 spin_lock_bh(&adapter->adv_rss_lock); 4077 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 4078 list) { 4079 list_del(&rss->list); 4080 kfree(rss); 4081 } 4082 spin_unlock_bh(&adapter->adv_rss_lock); 4083 4084 free_netdev(netdev); 4085 4086 pci_disable_pcie_error_reporting(pdev); 4087 4088 pci_disable_device(pdev); 4089 } 4090 4091 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 4092 4093 static struct pci_driver iavf_driver = { 4094 .name = iavf_driver_name, 4095 .id_table = iavf_pci_tbl, 4096 .probe = iavf_probe, 4097 .remove = iavf_remove, 4098 .driver.pm = &iavf_pm_ops, 4099 .shutdown = iavf_shutdown, 4100 }; 4101 4102 /** 4103 * iavf_init_module - Driver Registration Routine 4104 * 4105 * iavf_init_module is the first routine called when the driver is 4106 * loaded. All it does is register with the PCI subsystem. 4107 **/ 4108 static int __init iavf_init_module(void) 4109 { 4110 int ret; 4111 4112 pr_info("iavf: %s\n", iavf_driver_string); 4113 4114 pr_info("%s\n", iavf_copyright); 4115 4116 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4117 iavf_driver_name); 4118 if (!iavf_wq) { 4119 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4120 return -ENOMEM; 4121 } 4122 ret = pci_register_driver(&iavf_driver); 4123 return ret; 4124 } 4125 4126 module_init(iavf_init_module); 4127 4128 /** 4129 * iavf_exit_module - Driver Exit Cleanup Routine 4130 * 4131 * iavf_exit_module is called just before the driver is removed 4132 * from memory. 4133 **/ 4134 static void __exit iavf_exit_module(void) 4135 { 4136 pci_unregister_driver(&iavf_driver); 4137 destroy_workqueue(iavf_wq); 4138 } 4139 4140 module_exit(iavf_exit_module); 4141 4142 /* iavf_main.c */ 4143