1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static void iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_pdev_to_adapter - go from pci_dev to adapter 56 * @pdev: pci_dev pointer 57 */ 58 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) 59 { 60 return netdev_priv(pci_get_drvdata(pdev)); 61 } 62 63 /** 64 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 65 * @hw: pointer to the HW structure 66 * @mem: ptr to mem struct to fill out 67 * @size: size of memory requested 68 * @alignment: what to align the allocation to 69 **/ 70 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 71 struct iavf_dma_mem *mem, 72 u64 size, u32 alignment) 73 { 74 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 75 76 if (!mem) 77 return IAVF_ERR_PARAM; 78 79 mem->size = ALIGN(size, alignment); 80 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 81 (dma_addr_t *)&mem->pa, GFP_KERNEL); 82 if (mem->va) 83 return 0; 84 else 85 return IAVF_ERR_NO_MEMORY; 86 } 87 88 /** 89 * iavf_free_dma_mem_d - OS specific memory free for shared code 90 * @hw: pointer to the HW structure 91 * @mem: ptr to mem struct to free 92 **/ 93 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 94 struct iavf_dma_mem *mem) 95 { 96 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 97 98 if (!mem || !mem->va) 99 return IAVF_ERR_PARAM; 100 dma_free_coherent(&adapter->pdev->dev, mem->size, 101 mem->va, (dma_addr_t)mem->pa); 102 return 0; 103 } 104 105 /** 106 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 107 * @hw: pointer to the HW structure 108 * @mem: ptr to mem struct to fill out 109 * @size: size of memory requested 110 **/ 111 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 112 struct iavf_virt_mem *mem, u32 size) 113 { 114 if (!mem) 115 return IAVF_ERR_PARAM; 116 117 mem->size = size; 118 mem->va = kzalloc(size, GFP_KERNEL); 119 120 if (mem->va) 121 return 0; 122 else 123 return IAVF_ERR_NO_MEMORY; 124 } 125 126 /** 127 * iavf_free_virt_mem_d - OS specific memory free for shared code 128 * @hw: pointer to the HW structure 129 * @mem: ptr to mem struct to free 130 **/ 131 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 132 struct iavf_virt_mem *mem) 133 { 134 if (!mem) 135 return IAVF_ERR_PARAM; 136 137 /* it's ok to kfree a NULL pointer */ 138 kfree(mem->va); 139 140 return 0; 141 } 142 143 /** 144 * iavf_lock_timeout - try to lock mutex but give up after timeout 145 * @lock: mutex that should be locked 146 * @msecs: timeout in msecs 147 * 148 * Returns 0 on success, negative on failure 149 **/ 150 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) 151 { 152 unsigned int wait, delay = 10; 153 154 for (wait = 0; wait < msecs; wait += delay) { 155 if (mutex_trylock(lock)) 156 return 0; 157 158 msleep(delay); 159 } 160 161 return -1; 162 } 163 164 /** 165 * iavf_schedule_reset - Set the flags and schedule a reset event 166 * @adapter: board private structure 167 **/ 168 void iavf_schedule_reset(struct iavf_adapter *adapter) 169 { 170 if (!(adapter->flags & 171 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 172 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 173 queue_work(iavf_wq, &adapter->reset_task); 174 } 175 } 176 177 /** 178 * iavf_schedule_request_stats - Set the flags and schedule statistics request 179 * @adapter: board private structure 180 * 181 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly 182 * request and refresh ethtool stats 183 **/ 184 void iavf_schedule_request_stats(struct iavf_adapter *adapter) 185 { 186 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; 187 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 188 } 189 190 /** 191 * iavf_tx_timeout - Respond to a Tx Hang 192 * @netdev: network interface device structure 193 * @txqueue: queue number that is timing out 194 **/ 195 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 196 { 197 struct iavf_adapter *adapter = netdev_priv(netdev); 198 199 adapter->tx_timeout_count++; 200 iavf_schedule_reset(adapter); 201 } 202 203 /** 204 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 205 * @adapter: board private structure 206 **/ 207 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 208 { 209 struct iavf_hw *hw = &adapter->hw; 210 211 if (!adapter->msix_entries) 212 return; 213 214 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 215 216 iavf_flush(hw); 217 218 synchronize_irq(adapter->msix_entries[0].vector); 219 } 220 221 /** 222 * iavf_misc_irq_enable - Enable default interrupt generation settings 223 * @adapter: board private structure 224 **/ 225 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 226 { 227 struct iavf_hw *hw = &adapter->hw; 228 229 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 230 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 231 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 232 233 iavf_flush(hw); 234 } 235 236 /** 237 * iavf_irq_disable - Mask off interrupt generation on the NIC 238 * @adapter: board private structure 239 **/ 240 static void iavf_irq_disable(struct iavf_adapter *adapter) 241 { 242 int i; 243 struct iavf_hw *hw = &adapter->hw; 244 245 if (!adapter->msix_entries) 246 return; 247 248 for (i = 1; i < adapter->num_msix_vectors; i++) { 249 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 250 synchronize_irq(adapter->msix_entries[i].vector); 251 } 252 iavf_flush(hw); 253 } 254 255 /** 256 * iavf_irq_enable_queues - Enable interrupt for specified queues 257 * @adapter: board private structure 258 * @mask: bitmap of queues to enable 259 **/ 260 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 261 { 262 struct iavf_hw *hw = &adapter->hw; 263 int i; 264 265 for (i = 1; i < adapter->num_msix_vectors; i++) { 266 if (mask & BIT(i - 1)) { 267 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 268 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 269 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 270 } 271 } 272 } 273 274 /** 275 * iavf_irq_enable - Enable default interrupt generation settings 276 * @adapter: board private structure 277 * @flush: boolean value whether to run rd32() 278 **/ 279 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 280 { 281 struct iavf_hw *hw = &adapter->hw; 282 283 iavf_misc_irq_enable(adapter); 284 iavf_irq_enable_queues(adapter, ~0); 285 286 if (flush) 287 iavf_flush(hw); 288 } 289 290 /** 291 * iavf_msix_aq - Interrupt handler for vector 0 292 * @irq: interrupt number 293 * @data: pointer to netdev 294 **/ 295 static irqreturn_t iavf_msix_aq(int irq, void *data) 296 { 297 struct net_device *netdev = data; 298 struct iavf_adapter *adapter = netdev_priv(netdev); 299 struct iavf_hw *hw = &adapter->hw; 300 301 /* handle non-queue interrupts, these reads clear the registers */ 302 rd32(hw, IAVF_VFINT_ICR01); 303 rd32(hw, IAVF_VFINT_ICR0_ENA1); 304 305 /* schedule work on the private workqueue */ 306 queue_work(iavf_wq, &adapter->adminq_task); 307 308 return IRQ_HANDLED; 309 } 310 311 /** 312 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 313 * @irq: interrupt number 314 * @data: pointer to a q_vector 315 **/ 316 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 317 { 318 struct iavf_q_vector *q_vector = data; 319 320 if (!q_vector->tx.ring && !q_vector->rx.ring) 321 return IRQ_HANDLED; 322 323 napi_schedule_irqoff(&q_vector->napi); 324 325 return IRQ_HANDLED; 326 } 327 328 /** 329 * iavf_map_vector_to_rxq - associate irqs with rx queues 330 * @adapter: board private structure 331 * @v_idx: interrupt number 332 * @r_idx: queue number 333 **/ 334 static void 335 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 336 { 337 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 338 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 339 struct iavf_hw *hw = &adapter->hw; 340 341 rx_ring->q_vector = q_vector; 342 rx_ring->next = q_vector->rx.ring; 343 rx_ring->vsi = &adapter->vsi; 344 q_vector->rx.ring = rx_ring; 345 q_vector->rx.count++; 346 q_vector->rx.next_update = jiffies + 1; 347 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 348 q_vector->ring_mask |= BIT(r_idx); 349 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 350 q_vector->rx.current_itr >> 1); 351 q_vector->rx.current_itr = q_vector->rx.target_itr; 352 } 353 354 /** 355 * iavf_map_vector_to_txq - associate irqs with tx queues 356 * @adapter: board private structure 357 * @v_idx: interrupt number 358 * @t_idx: queue number 359 **/ 360 static void 361 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 362 { 363 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 364 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 365 struct iavf_hw *hw = &adapter->hw; 366 367 tx_ring->q_vector = q_vector; 368 tx_ring->next = q_vector->tx.ring; 369 tx_ring->vsi = &adapter->vsi; 370 q_vector->tx.ring = tx_ring; 371 q_vector->tx.count++; 372 q_vector->tx.next_update = jiffies + 1; 373 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 374 q_vector->num_ringpairs++; 375 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 376 q_vector->tx.target_itr >> 1); 377 q_vector->tx.current_itr = q_vector->tx.target_itr; 378 } 379 380 /** 381 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 382 * @adapter: board private structure to initialize 383 * 384 * This function maps descriptor rings to the queue-specific vectors 385 * we were allotted through the MSI-X enabling code. Ideally, we'd have 386 * one vector per ring/queue, but on a constrained vector budget, we 387 * group the rings as "efficiently" as possible. You would add new 388 * mapping configurations in here. 389 **/ 390 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 391 { 392 int rings_remaining = adapter->num_active_queues; 393 int ridx = 0, vidx = 0; 394 int q_vectors; 395 396 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 397 398 for (; ridx < rings_remaining; ridx++) { 399 iavf_map_vector_to_rxq(adapter, vidx, ridx); 400 iavf_map_vector_to_txq(adapter, vidx, ridx); 401 402 /* In the case where we have more queues than vectors, continue 403 * round-robin on vectors until all queues are mapped. 404 */ 405 if (++vidx >= q_vectors) 406 vidx = 0; 407 } 408 409 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 410 } 411 412 /** 413 * iavf_irq_affinity_notify - Callback for affinity changes 414 * @notify: context as to what irq was changed 415 * @mask: the new affinity mask 416 * 417 * This is a callback function used by the irq_set_affinity_notifier function 418 * so that we may register to receive changes to the irq affinity masks. 419 **/ 420 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 421 const cpumask_t *mask) 422 { 423 struct iavf_q_vector *q_vector = 424 container_of(notify, struct iavf_q_vector, affinity_notify); 425 426 cpumask_copy(&q_vector->affinity_mask, mask); 427 } 428 429 /** 430 * iavf_irq_affinity_release - Callback for affinity notifier release 431 * @ref: internal core kernel usage 432 * 433 * This is a callback function used by the irq_set_affinity_notifier function 434 * to inform the current notification subscriber that they will no longer 435 * receive notifications. 436 **/ 437 static void iavf_irq_affinity_release(struct kref *ref) {} 438 439 /** 440 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 441 * @adapter: board private structure 442 * @basename: device basename 443 * 444 * Allocates MSI-X vectors for tx and rx handling, and requests 445 * interrupts from the kernel. 446 **/ 447 static int 448 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 449 { 450 unsigned int vector, q_vectors; 451 unsigned int rx_int_idx = 0, tx_int_idx = 0; 452 int irq_num, err; 453 int cpu; 454 455 iavf_irq_disable(adapter); 456 /* Decrement for Other and TCP Timer vectors */ 457 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 458 459 for (vector = 0; vector < q_vectors; vector++) { 460 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 461 462 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 463 464 if (q_vector->tx.ring && q_vector->rx.ring) { 465 snprintf(q_vector->name, sizeof(q_vector->name), 466 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 467 tx_int_idx++; 468 } else if (q_vector->rx.ring) { 469 snprintf(q_vector->name, sizeof(q_vector->name), 470 "iavf-%s-rx-%d", basename, rx_int_idx++); 471 } else if (q_vector->tx.ring) { 472 snprintf(q_vector->name, sizeof(q_vector->name), 473 "iavf-%s-tx-%d", basename, tx_int_idx++); 474 } else { 475 /* skip this unused q_vector */ 476 continue; 477 } 478 err = request_irq(irq_num, 479 iavf_msix_clean_rings, 480 0, 481 q_vector->name, 482 q_vector); 483 if (err) { 484 dev_info(&adapter->pdev->dev, 485 "Request_irq failed, error: %d\n", err); 486 goto free_queue_irqs; 487 } 488 /* register for affinity change notifications */ 489 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 490 q_vector->affinity_notify.release = 491 iavf_irq_affinity_release; 492 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 493 /* Spread the IRQ affinity hints across online CPUs. Note that 494 * get_cpu_mask returns a mask with a permanent lifetime so 495 * it's safe to use as a hint for irq_set_affinity_hint. 496 */ 497 cpu = cpumask_local_spread(q_vector->v_idx, -1); 498 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 499 } 500 501 return 0; 502 503 free_queue_irqs: 504 while (vector) { 505 vector--; 506 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 507 irq_set_affinity_notifier(irq_num, NULL); 508 irq_set_affinity_hint(irq_num, NULL); 509 free_irq(irq_num, &adapter->q_vectors[vector]); 510 } 511 return err; 512 } 513 514 /** 515 * iavf_request_misc_irq - Initialize MSI-X interrupts 516 * @adapter: board private structure 517 * 518 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 519 * vector is only for the admin queue, and stays active even when the netdev 520 * is closed. 521 **/ 522 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 523 { 524 struct net_device *netdev = adapter->netdev; 525 int err; 526 527 snprintf(adapter->misc_vector_name, 528 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 529 dev_name(&adapter->pdev->dev)); 530 err = request_irq(adapter->msix_entries[0].vector, 531 &iavf_msix_aq, 0, 532 adapter->misc_vector_name, netdev); 533 if (err) { 534 dev_err(&adapter->pdev->dev, 535 "request_irq for %s failed: %d\n", 536 adapter->misc_vector_name, err); 537 free_irq(adapter->msix_entries[0].vector, netdev); 538 } 539 return err; 540 } 541 542 /** 543 * iavf_free_traffic_irqs - Free MSI-X interrupts 544 * @adapter: board private structure 545 * 546 * Frees all MSI-X vectors other than 0. 547 **/ 548 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 549 { 550 int vector, irq_num, q_vectors; 551 552 if (!adapter->msix_entries) 553 return; 554 555 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 556 557 for (vector = 0; vector < q_vectors; vector++) { 558 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 559 irq_set_affinity_notifier(irq_num, NULL); 560 irq_set_affinity_hint(irq_num, NULL); 561 free_irq(irq_num, &adapter->q_vectors[vector]); 562 } 563 } 564 565 /** 566 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 567 * @adapter: board private structure 568 * 569 * Frees MSI-X vector 0. 570 **/ 571 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 572 { 573 struct net_device *netdev = adapter->netdev; 574 575 if (!adapter->msix_entries) 576 return; 577 578 free_irq(adapter->msix_entries[0].vector, netdev); 579 } 580 581 /** 582 * iavf_configure_tx - Configure Transmit Unit after Reset 583 * @adapter: board private structure 584 * 585 * Configure the Tx unit of the MAC after a reset. 586 **/ 587 static void iavf_configure_tx(struct iavf_adapter *adapter) 588 { 589 struct iavf_hw *hw = &adapter->hw; 590 int i; 591 592 for (i = 0; i < adapter->num_active_queues; i++) 593 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 594 } 595 596 /** 597 * iavf_configure_rx - Configure Receive Unit after Reset 598 * @adapter: board private structure 599 * 600 * Configure the Rx unit of the MAC after a reset. 601 **/ 602 static void iavf_configure_rx(struct iavf_adapter *adapter) 603 { 604 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 605 struct iavf_hw *hw = &adapter->hw; 606 int i; 607 608 /* Legacy Rx will always default to a 2048 buffer size. */ 609 #if (PAGE_SIZE < 8192) 610 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 611 struct net_device *netdev = adapter->netdev; 612 613 /* For jumbo frames on systems with 4K pages we have to use 614 * an order 1 page, so we might as well increase the size 615 * of our Rx buffer to make better use of the available space 616 */ 617 rx_buf_len = IAVF_RXBUFFER_3072; 618 619 /* We use a 1536 buffer size for configurations with 620 * standard Ethernet mtu. On x86 this gives us enough room 621 * for shared info and 192 bytes of padding. 622 */ 623 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 624 (netdev->mtu <= ETH_DATA_LEN)) 625 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 626 } 627 #endif 628 629 for (i = 0; i < adapter->num_active_queues; i++) { 630 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 631 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 632 633 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 634 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 635 else 636 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 637 } 638 } 639 640 /** 641 * iavf_find_vlan - Search filter list for specific vlan filter 642 * @adapter: board private structure 643 * @vlan: vlan tag 644 * 645 * Returns ptr to the filter object or NULL. Must be called while holding the 646 * mac_vlan_list_lock. 647 **/ 648 static struct 649 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 650 { 651 struct iavf_vlan_filter *f; 652 653 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 654 if (vlan == f->vlan) 655 return f; 656 } 657 return NULL; 658 } 659 660 /** 661 * iavf_add_vlan - Add a vlan filter to the list 662 * @adapter: board private structure 663 * @vlan: VLAN tag 664 * 665 * Returns ptr to the filter object or NULL when no memory available. 666 **/ 667 static struct 668 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 669 { 670 struct iavf_vlan_filter *f = NULL; 671 672 spin_lock_bh(&adapter->mac_vlan_list_lock); 673 674 f = iavf_find_vlan(adapter, vlan); 675 if (!f) { 676 f = kzalloc(sizeof(*f), GFP_ATOMIC); 677 if (!f) 678 goto clearout; 679 680 f->vlan = vlan; 681 682 list_add_tail(&f->list, &adapter->vlan_filter_list); 683 f->add = true; 684 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 685 } 686 687 clearout: 688 spin_unlock_bh(&adapter->mac_vlan_list_lock); 689 return f; 690 } 691 692 /** 693 * iavf_del_vlan - Remove a vlan filter from the list 694 * @adapter: board private structure 695 * @vlan: VLAN tag 696 **/ 697 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 698 { 699 struct iavf_vlan_filter *f; 700 701 spin_lock_bh(&adapter->mac_vlan_list_lock); 702 703 f = iavf_find_vlan(adapter, vlan); 704 if (f) { 705 f->remove = true; 706 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 707 } 708 709 spin_unlock_bh(&adapter->mac_vlan_list_lock); 710 } 711 712 /** 713 * iavf_restore_filters 714 * @adapter: board private structure 715 * 716 * Restore existing non MAC filters when VF netdev comes back up 717 **/ 718 static void iavf_restore_filters(struct iavf_adapter *adapter) 719 { 720 u16 vid; 721 722 /* re-add all VLAN filters */ 723 for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) 724 iavf_add_vlan(adapter, vid); 725 } 726 727 /** 728 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 729 * @netdev: network device struct 730 * @proto: unused protocol data 731 * @vid: VLAN tag 732 **/ 733 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 734 __always_unused __be16 proto, u16 vid) 735 { 736 struct iavf_adapter *adapter = netdev_priv(netdev); 737 738 if (!VLAN_ALLOWED(adapter)) 739 return -EIO; 740 741 if (iavf_add_vlan(adapter, vid) == NULL) 742 return -ENOMEM; 743 744 set_bit(vid, adapter->vsi.active_vlans); 745 return 0; 746 } 747 748 /** 749 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 750 * @netdev: network device struct 751 * @proto: unused protocol data 752 * @vid: VLAN tag 753 **/ 754 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 755 __always_unused __be16 proto, u16 vid) 756 { 757 struct iavf_adapter *adapter = netdev_priv(netdev); 758 759 iavf_del_vlan(adapter, vid); 760 clear_bit(vid, adapter->vsi.active_vlans); 761 762 return 0; 763 } 764 765 /** 766 * iavf_find_filter - Search filter list for specific mac filter 767 * @adapter: board private structure 768 * @macaddr: the MAC address 769 * 770 * Returns ptr to the filter object or NULL. Must be called while holding the 771 * mac_vlan_list_lock. 772 **/ 773 static struct 774 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 775 const u8 *macaddr) 776 { 777 struct iavf_mac_filter *f; 778 779 if (!macaddr) 780 return NULL; 781 782 list_for_each_entry(f, &adapter->mac_filter_list, list) { 783 if (ether_addr_equal(macaddr, f->macaddr)) 784 return f; 785 } 786 return NULL; 787 } 788 789 /** 790 * iavf_add_filter - Add a mac filter to the filter list 791 * @adapter: board private structure 792 * @macaddr: the MAC address 793 * 794 * Returns ptr to the filter object or NULL when no memory available. 795 **/ 796 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 797 const u8 *macaddr) 798 { 799 struct iavf_mac_filter *f; 800 801 if (!macaddr) 802 return NULL; 803 804 f = iavf_find_filter(adapter, macaddr); 805 if (!f) { 806 f = kzalloc(sizeof(*f), GFP_ATOMIC); 807 if (!f) 808 return f; 809 810 ether_addr_copy(f->macaddr, macaddr); 811 812 list_add_tail(&f->list, &adapter->mac_filter_list); 813 f->add = true; 814 f->is_new_mac = true; 815 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 816 } else { 817 f->remove = false; 818 } 819 820 return f; 821 } 822 823 /** 824 * iavf_set_mac - NDO callback to set port mac address 825 * @netdev: network interface device structure 826 * @p: pointer to an address structure 827 * 828 * Returns 0 on success, negative on failure 829 **/ 830 static int iavf_set_mac(struct net_device *netdev, void *p) 831 { 832 struct iavf_adapter *adapter = netdev_priv(netdev); 833 struct iavf_hw *hw = &adapter->hw; 834 struct iavf_mac_filter *f; 835 struct sockaddr *addr = p; 836 837 if (!is_valid_ether_addr(addr->sa_data)) 838 return -EADDRNOTAVAIL; 839 840 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 841 return 0; 842 843 spin_lock_bh(&adapter->mac_vlan_list_lock); 844 845 f = iavf_find_filter(adapter, hw->mac.addr); 846 if (f) { 847 f->remove = true; 848 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 849 } 850 851 f = iavf_add_filter(adapter, addr->sa_data); 852 853 spin_unlock_bh(&adapter->mac_vlan_list_lock); 854 855 if (f) { 856 ether_addr_copy(hw->mac.addr, addr->sa_data); 857 } 858 859 return (f == NULL) ? -ENOMEM : 0; 860 } 861 862 /** 863 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 864 * @netdev: the netdevice 865 * @addr: address to add 866 * 867 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 868 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 869 */ 870 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 871 { 872 struct iavf_adapter *adapter = netdev_priv(netdev); 873 874 if (iavf_add_filter(adapter, addr)) 875 return 0; 876 else 877 return -ENOMEM; 878 } 879 880 /** 881 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 882 * @netdev: the netdevice 883 * @addr: address to add 884 * 885 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 886 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 887 */ 888 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 889 { 890 struct iavf_adapter *adapter = netdev_priv(netdev); 891 struct iavf_mac_filter *f; 892 893 /* Under some circumstances, we might receive a request to delete 894 * our own device address from our uc list. Because we store the 895 * device address in the VSI's MAC/VLAN filter list, we need to ignore 896 * such requests and not delete our device address from this list. 897 */ 898 if (ether_addr_equal(addr, netdev->dev_addr)) 899 return 0; 900 901 f = iavf_find_filter(adapter, addr); 902 if (f) { 903 f->remove = true; 904 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 905 } 906 return 0; 907 } 908 909 /** 910 * iavf_set_rx_mode - NDO callback to set the netdev filters 911 * @netdev: network interface device structure 912 **/ 913 static void iavf_set_rx_mode(struct net_device *netdev) 914 { 915 struct iavf_adapter *adapter = netdev_priv(netdev); 916 917 spin_lock_bh(&adapter->mac_vlan_list_lock); 918 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 919 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 920 spin_unlock_bh(&adapter->mac_vlan_list_lock); 921 922 if (netdev->flags & IFF_PROMISC && 923 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 924 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 925 else if (!(netdev->flags & IFF_PROMISC) && 926 adapter->flags & IAVF_FLAG_PROMISC_ON) 927 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 928 929 if (netdev->flags & IFF_ALLMULTI && 930 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 931 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 932 else if (!(netdev->flags & IFF_ALLMULTI) && 933 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 934 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 935 } 936 937 /** 938 * iavf_napi_enable_all - enable NAPI on all queue vectors 939 * @adapter: board private structure 940 **/ 941 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 942 { 943 int q_idx; 944 struct iavf_q_vector *q_vector; 945 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 946 947 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 948 struct napi_struct *napi; 949 950 q_vector = &adapter->q_vectors[q_idx]; 951 napi = &q_vector->napi; 952 napi_enable(napi); 953 } 954 } 955 956 /** 957 * iavf_napi_disable_all - disable NAPI on all queue vectors 958 * @adapter: board private structure 959 **/ 960 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 961 { 962 int q_idx; 963 struct iavf_q_vector *q_vector; 964 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 965 966 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 967 q_vector = &adapter->q_vectors[q_idx]; 968 napi_disable(&q_vector->napi); 969 } 970 } 971 972 /** 973 * iavf_configure - set up transmit and receive data structures 974 * @adapter: board private structure 975 **/ 976 static void iavf_configure(struct iavf_adapter *adapter) 977 { 978 struct net_device *netdev = adapter->netdev; 979 int i; 980 981 iavf_set_rx_mode(netdev); 982 983 iavf_configure_tx(adapter); 984 iavf_configure_rx(adapter); 985 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 986 987 for (i = 0; i < adapter->num_active_queues; i++) { 988 struct iavf_ring *ring = &adapter->rx_rings[i]; 989 990 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 991 } 992 } 993 994 /** 995 * iavf_up_complete - Finish the last steps of bringing up a connection 996 * @adapter: board private structure 997 * 998 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 999 **/ 1000 static void iavf_up_complete(struct iavf_adapter *adapter) 1001 { 1002 iavf_change_state(adapter, __IAVF_RUNNING); 1003 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1004 1005 iavf_napi_enable_all(adapter); 1006 1007 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 1008 if (CLIENT_ENABLED(adapter)) 1009 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 1010 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1011 } 1012 1013 /** 1014 * iavf_down - Shutdown the connection processing 1015 * @adapter: board private structure 1016 * 1017 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 1018 **/ 1019 void iavf_down(struct iavf_adapter *adapter) 1020 { 1021 struct net_device *netdev = adapter->netdev; 1022 struct iavf_vlan_filter *vlf; 1023 struct iavf_cloud_filter *cf; 1024 struct iavf_fdir_fltr *fdir; 1025 struct iavf_mac_filter *f; 1026 struct iavf_adv_rss *rss; 1027 1028 if (adapter->state <= __IAVF_DOWN_PENDING) 1029 return; 1030 1031 netif_carrier_off(netdev); 1032 netif_tx_disable(netdev); 1033 adapter->link_up = false; 1034 iavf_napi_disable_all(adapter); 1035 iavf_irq_disable(adapter); 1036 1037 spin_lock_bh(&adapter->mac_vlan_list_lock); 1038 1039 /* clear the sync flag on all filters */ 1040 __dev_uc_unsync(adapter->netdev, NULL); 1041 __dev_mc_unsync(adapter->netdev, NULL); 1042 1043 /* remove all MAC filters */ 1044 list_for_each_entry(f, &adapter->mac_filter_list, list) { 1045 f->remove = true; 1046 } 1047 1048 /* remove all VLAN filters */ 1049 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1050 vlf->remove = true; 1051 } 1052 1053 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1054 1055 /* remove all cloud filters */ 1056 spin_lock_bh(&adapter->cloud_filter_list_lock); 1057 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1058 cf->del = true; 1059 } 1060 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1061 1062 /* remove all Flow Director filters */ 1063 spin_lock_bh(&adapter->fdir_fltr_lock); 1064 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1065 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1066 } 1067 spin_unlock_bh(&adapter->fdir_fltr_lock); 1068 1069 /* remove all advance RSS configuration */ 1070 spin_lock_bh(&adapter->adv_rss_lock); 1071 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1072 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1073 spin_unlock_bh(&adapter->adv_rss_lock); 1074 1075 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1076 adapter->state != __IAVF_RESETTING) { 1077 /* cancel any current operation */ 1078 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1079 /* Schedule operations to close down the HW. Don't wait 1080 * here for this to complete. The watchdog is still running 1081 * and it will take care of this. 1082 */ 1083 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1084 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1085 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1086 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1087 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1088 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1089 } 1090 1091 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1092 } 1093 1094 /** 1095 * iavf_acquire_msix_vectors - Setup the MSIX capability 1096 * @adapter: board private structure 1097 * @vectors: number of vectors to request 1098 * 1099 * Work with the OS to set up the MSIX vectors needed. 1100 * 1101 * Returns 0 on success, negative on failure 1102 **/ 1103 static int 1104 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1105 { 1106 int err, vector_threshold; 1107 1108 /* We'll want at least 3 (vector_threshold): 1109 * 0) Other (Admin Queue and link, mostly) 1110 * 1) TxQ[0] Cleanup 1111 * 2) RxQ[0] Cleanup 1112 */ 1113 vector_threshold = MIN_MSIX_COUNT; 1114 1115 /* The more we get, the more we will assign to Tx/Rx Cleanup 1116 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1117 * Right now, we simply care about how many we'll get; we'll 1118 * set them up later while requesting irq's. 1119 */ 1120 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1121 vector_threshold, vectors); 1122 if (err < 0) { 1123 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1124 kfree(adapter->msix_entries); 1125 adapter->msix_entries = NULL; 1126 return err; 1127 } 1128 1129 /* Adjust for only the vectors we'll use, which is minimum 1130 * of max_msix_q_vectors + NONQ_VECS, or the number of 1131 * vectors we were allocated. 1132 */ 1133 adapter->num_msix_vectors = err; 1134 return 0; 1135 } 1136 1137 /** 1138 * iavf_free_queues - Free memory for all rings 1139 * @adapter: board private structure to initialize 1140 * 1141 * Free all of the memory associated with queue pairs. 1142 **/ 1143 static void iavf_free_queues(struct iavf_adapter *adapter) 1144 { 1145 if (!adapter->vsi_res) 1146 return; 1147 adapter->num_active_queues = 0; 1148 kfree(adapter->tx_rings); 1149 adapter->tx_rings = NULL; 1150 kfree(adapter->rx_rings); 1151 adapter->rx_rings = NULL; 1152 } 1153 1154 /** 1155 * iavf_alloc_queues - Allocate memory for all rings 1156 * @adapter: board private structure to initialize 1157 * 1158 * We allocate one ring per queue at run-time since we don't know the 1159 * number of queues at compile-time. The polling_netdev array is 1160 * intended for Multiqueue, but should work fine with a single queue. 1161 **/ 1162 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1163 { 1164 int i, num_active_queues; 1165 1166 /* If we're in reset reallocating queues we don't actually know yet for 1167 * certain the PF gave us the number of queues we asked for but we'll 1168 * assume it did. Once basic reset is finished we'll confirm once we 1169 * start negotiating config with PF. 1170 */ 1171 if (adapter->num_req_queues) 1172 num_active_queues = adapter->num_req_queues; 1173 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1174 adapter->num_tc) 1175 num_active_queues = adapter->ch_config.total_qps; 1176 else 1177 num_active_queues = min_t(int, 1178 adapter->vsi_res->num_queue_pairs, 1179 (int)(num_online_cpus())); 1180 1181 1182 adapter->tx_rings = kcalloc(num_active_queues, 1183 sizeof(struct iavf_ring), GFP_KERNEL); 1184 if (!adapter->tx_rings) 1185 goto err_out; 1186 adapter->rx_rings = kcalloc(num_active_queues, 1187 sizeof(struct iavf_ring), GFP_KERNEL); 1188 if (!adapter->rx_rings) 1189 goto err_out; 1190 1191 for (i = 0; i < num_active_queues; i++) { 1192 struct iavf_ring *tx_ring; 1193 struct iavf_ring *rx_ring; 1194 1195 tx_ring = &adapter->tx_rings[i]; 1196 1197 tx_ring->queue_index = i; 1198 tx_ring->netdev = adapter->netdev; 1199 tx_ring->dev = &adapter->pdev->dev; 1200 tx_ring->count = adapter->tx_desc_count; 1201 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1202 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1203 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1204 1205 rx_ring = &adapter->rx_rings[i]; 1206 rx_ring->queue_index = i; 1207 rx_ring->netdev = adapter->netdev; 1208 rx_ring->dev = &adapter->pdev->dev; 1209 rx_ring->count = adapter->rx_desc_count; 1210 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1211 } 1212 1213 adapter->num_active_queues = num_active_queues; 1214 1215 return 0; 1216 1217 err_out: 1218 iavf_free_queues(adapter); 1219 return -ENOMEM; 1220 } 1221 1222 /** 1223 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1224 * @adapter: board private structure to initialize 1225 * 1226 * Attempt to configure the interrupts using the best available 1227 * capabilities of the hardware and the kernel. 1228 **/ 1229 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1230 { 1231 int vector, v_budget; 1232 int pairs = 0; 1233 int err = 0; 1234 1235 if (!adapter->vsi_res) { 1236 err = -EIO; 1237 goto out; 1238 } 1239 pairs = adapter->num_active_queues; 1240 1241 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1242 * us much good if we have more vectors than CPUs. However, we already 1243 * limit the total number of queues by the number of CPUs so we do not 1244 * need any further limiting here. 1245 */ 1246 v_budget = min_t(int, pairs + NONQ_VECS, 1247 (int)adapter->vf_res->max_vectors); 1248 1249 adapter->msix_entries = kcalloc(v_budget, 1250 sizeof(struct msix_entry), GFP_KERNEL); 1251 if (!adapter->msix_entries) { 1252 err = -ENOMEM; 1253 goto out; 1254 } 1255 1256 for (vector = 0; vector < v_budget; vector++) 1257 adapter->msix_entries[vector].entry = vector; 1258 1259 err = iavf_acquire_msix_vectors(adapter, v_budget); 1260 1261 out: 1262 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1263 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1264 return err; 1265 } 1266 1267 /** 1268 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1269 * @adapter: board private structure 1270 * 1271 * Return 0 on success, negative on failure 1272 **/ 1273 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1274 { 1275 struct iavf_aqc_get_set_rss_key_data *rss_key = 1276 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1277 struct iavf_hw *hw = &adapter->hw; 1278 int ret = 0; 1279 1280 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1281 /* bail because we already have a command pending */ 1282 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1283 adapter->current_op); 1284 return -EBUSY; 1285 } 1286 1287 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1288 if (ret) { 1289 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1290 iavf_stat_str(hw, ret), 1291 iavf_aq_str(hw, hw->aq.asq_last_status)); 1292 return ret; 1293 1294 } 1295 1296 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1297 adapter->rss_lut, adapter->rss_lut_size); 1298 if (ret) { 1299 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1300 iavf_stat_str(hw, ret), 1301 iavf_aq_str(hw, hw->aq.asq_last_status)); 1302 } 1303 1304 return ret; 1305 1306 } 1307 1308 /** 1309 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1310 * @adapter: board private structure 1311 * 1312 * Returns 0 on success, negative on failure 1313 **/ 1314 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1315 { 1316 struct iavf_hw *hw = &adapter->hw; 1317 u32 *dw; 1318 u16 i; 1319 1320 dw = (u32 *)adapter->rss_key; 1321 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1322 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1323 1324 dw = (u32 *)adapter->rss_lut; 1325 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1326 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1327 1328 iavf_flush(hw); 1329 1330 return 0; 1331 } 1332 1333 /** 1334 * iavf_config_rss - Configure RSS keys and lut 1335 * @adapter: board private structure 1336 * 1337 * Returns 0 on success, negative on failure 1338 **/ 1339 int iavf_config_rss(struct iavf_adapter *adapter) 1340 { 1341 1342 if (RSS_PF(adapter)) { 1343 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1344 IAVF_FLAG_AQ_SET_RSS_KEY; 1345 return 0; 1346 } else if (RSS_AQ(adapter)) { 1347 return iavf_config_rss_aq(adapter); 1348 } else { 1349 return iavf_config_rss_reg(adapter); 1350 } 1351 } 1352 1353 /** 1354 * iavf_fill_rss_lut - Fill the lut with default values 1355 * @adapter: board private structure 1356 **/ 1357 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1358 { 1359 u16 i; 1360 1361 for (i = 0; i < adapter->rss_lut_size; i++) 1362 adapter->rss_lut[i] = i % adapter->num_active_queues; 1363 } 1364 1365 /** 1366 * iavf_init_rss - Prepare for RSS 1367 * @adapter: board private structure 1368 * 1369 * Return 0 on success, negative on failure 1370 **/ 1371 static int iavf_init_rss(struct iavf_adapter *adapter) 1372 { 1373 struct iavf_hw *hw = &adapter->hw; 1374 int ret; 1375 1376 if (!RSS_PF(adapter)) { 1377 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1378 if (adapter->vf_res->vf_cap_flags & 1379 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1380 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1381 else 1382 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1383 1384 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1385 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1386 } 1387 1388 iavf_fill_rss_lut(adapter); 1389 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1390 ret = iavf_config_rss(adapter); 1391 1392 return ret; 1393 } 1394 1395 /** 1396 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1397 * @adapter: board private structure to initialize 1398 * 1399 * We allocate one q_vector per queue interrupt. If allocation fails we 1400 * return -ENOMEM. 1401 **/ 1402 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1403 { 1404 int q_idx = 0, num_q_vectors; 1405 struct iavf_q_vector *q_vector; 1406 1407 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1408 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1409 GFP_KERNEL); 1410 if (!adapter->q_vectors) 1411 return -ENOMEM; 1412 1413 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1414 q_vector = &adapter->q_vectors[q_idx]; 1415 q_vector->adapter = adapter; 1416 q_vector->vsi = &adapter->vsi; 1417 q_vector->v_idx = q_idx; 1418 q_vector->reg_idx = q_idx; 1419 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1420 netif_napi_add(adapter->netdev, &q_vector->napi, 1421 iavf_napi_poll, NAPI_POLL_WEIGHT); 1422 } 1423 1424 return 0; 1425 } 1426 1427 /** 1428 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1429 * @adapter: board private structure to initialize 1430 * 1431 * This function frees the memory allocated to the q_vectors. In addition if 1432 * NAPI is enabled it will delete any references to the NAPI struct prior 1433 * to freeing the q_vector. 1434 **/ 1435 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1436 { 1437 int q_idx, num_q_vectors; 1438 int napi_vectors; 1439 1440 if (!adapter->q_vectors) 1441 return; 1442 1443 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1444 napi_vectors = adapter->num_active_queues; 1445 1446 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1447 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1448 1449 if (q_idx < napi_vectors) 1450 netif_napi_del(&q_vector->napi); 1451 } 1452 kfree(adapter->q_vectors); 1453 adapter->q_vectors = NULL; 1454 } 1455 1456 /** 1457 * iavf_reset_interrupt_capability - Reset MSIX setup 1458 * @adapter: board private structure 1459 * 1460 **/ 1461 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1462 { 1463 if (!adapter->msix_entries) 1464 return; 1465 1466 pci_disable_msix(adapter->pdev); 1467 kfree(adapter->msix_entries); 1468 adapter->msix_entries = NULL; 1469 } 1470 1471 /** 1472 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1473 * @adapter: board private structure to initialize 1474 * 1475 **/ 1476 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1477 { 1478 int err; 1479 1480 err = iavf_alloc_queues(adapter); 1481 if (err) { 1482 dev_err(&adapter->pdev->dev, 1483 "Unable to allocate memory for queues\n"); 1484 goto err_alloc_queues; 1485 } 1486 1487 rtnl_lock(); 1488 err = iavf_set_interrupt_capability(adapter); 1489 rtnl_unlock(); 1490 if (err) { 1491 dev_err(&adapter->pdev->dev, 1492 "Unable to setup interrupt capabilities\n"); 1493 goto err_set_interrupt; 1494 } 1495 1496 err = iavf_alloc_q_vectors(adapter); 1497 if (err) { 1498 dev_err(&adapter->pdev->dev, 1499 "Unable to allocate memory for queue vectors\n"); 1500 goto err_alloc_q_vectors; 1501 } 1502 1503 /* If we've made it so far while ADq flag being ON, then we haven't 1504 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1505 * resources have been allocated in the reset path. 1506 * Now we can truly claim that ADq is enabled. 1507 */ 1508 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1509 adapter->num_tc) 1510 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1511 adapter->num_tc); 1512 1513 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1514 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1515 adapter->num_active_queues); 1516 1517 return 0; 1518 err_alloc_q_vectors: 1519 iavf_reset_interrupt_capability(adapter); 1520 err_set_interrupt: 1521 iavf_free_queues(adapter); 1522 err_alloc_queues: 1523 return err; 1524 } 1525 1526 /** 1527 * iavf_free_rss - Free memory used by RSS structs 1528 * @adapter: board private structure 1529 **/ 1530 static void iavf_free_rss(struct iavf_adapter *adapter) 1531 { 1532 kfree(adapter->rss_key); 1533 adapter->rss_key = NULL; 1534 1535 kfree(adapter->rss_lut); 1536 adapter->rss_lut = NULL; 1537 } 1538 1539 /** 1540 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1541 * @adapter: board private structure 1542 * 1543 * Returns 0 on success, negative on failure 1544 **/ 1545 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1546 { 1547 struct net_device *netdev = adapter->netdev; 1548 int err; 1549 1550 if (netif_running(netdev)) 1551 iavf_free_traffic_irqs(adapter); 1552 iavf_free_misc_irq(adapter); 1553 iavf_reset_interrupt_capability(adapter); 1554 iavf_free_q_vectors(adapter); 1555 iavf_free_queues(adapter); 1556 1557 err = iavf_init_interrupt_scheme(adapter); 1558 if (err) 1559 goto err; 1560 1561 netif_tx_stop_all_queues(netdev); 1562 1563 err = iavf_request_misc_irq(adapter); 1564 if (err) 1565 goto err; 1566 1567 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1568 1569 iavf_map_rings_to_vectors(adapter); 1570 err: 1571 return err; 1572 } 1573 1574 /** 1575 * iavf_process_aq_command - process aq_required flags 1576 * and sends aq command 1577 * @adapter: pointer to iavf adapter structure 1578 * 1579 * Returns 0 on success 1580 * Returns error code if no command was sent 1581 * or error code if the command failed. 1582 **/ 1583 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1584 { 1585 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1586 return iavf_send_vf_config_msg(adapter); 1587 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1588 iavf_disable_queues(adapter); 1589 return 0; 1590 } 1591 1592 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1593 iavf_map_queues(adapter); 1594 return 0; 1595 } 1596 1597 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1598 iavf_add_ether_addrs(adapter); 1599 return 0; 1600 } 1601 1602 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1603 iavf_add_vlans(adapter); 1604 return 0; 1605 } 1606 1607 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1608 iavf_del_ether_addrs(adapter); 1609 return 0; 1610 } 1611 1612 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1613 iavf_del_vlans(adapter); 1614 return 0; 1615 } 1616 1617 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1618 iavf_enable_vlan_stripping(adapter); 1619 return 0; 1620 } 1621 1622 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1623 iavf_disable_vlan_stripping(adapter); 1624 return 0; 1625 } 1626 1627 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1628 iavf_configure_queues(adapter); 1629 return 0; 1630 } 1631 1632 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1633 iavf_enable_queues(adapter); 1634 return 0; 1635 } 1636 1637 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1638 /* This message goes straight to the firmware, not the 1639 * PF, so we don't have to set current_op as we will 1640 * not get a response through the ARQ. 1641 */ 1642 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1643 return 0; 1644 } 1645 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1646 iavf_get_hena(adapter); 1647 return 0; 1648 } 1649 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1650 iavf_set_hena(adapter); 1651 return 0; 1652 } 1653 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1654 iavf_set_rss_key(adapter); 1655 return 0; 1656 } 1657 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1658 iavf_set_rss_lut(adapter); 1659 return 0; 1660 } 1661 1662 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1663 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1664 FLAG_VF_MULTICAST_PROMISC); 1665 return 0; 1666 } 1667 1668 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1669 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1670 return 0; 1671 } 1672 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || 1673 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1674 iavf_set_promiscuous(adapter, 0); 1675 return 0; 1676 } 1677 1678 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1679 iavf_enable_channels(adapter); 1680 return 0; 1681 } 1682 1683 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1684 iavf_disable_channels(adapter); 1685 return 0; 1686 } 1687 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1688 iavf_add_cloud_filter(adapter); 1689 return 0; 1690 } 1691 1692 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1693 iavf_del_cloud_filter(adapter); 1694 return 0; 1695 } 1696 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1697 iavf_del_cloud_filter(adapter); 1698 return 0; 1699 } 1700 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1701 iavf_add_cloud_filter(adapter); 1702 return 0; 1703 } 1704 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1705 iavf_add_fdir_filter(adapter); 1706 return IAVF_SUCCESS; 1707 } 1708 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1709 iavf_del_fdir_filter(adapter); 1710 return IAVF_SUCCESS; 1711 } 1712 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1713 iavf_add_adv_rss_cfg(adapter); 1714 return 0; 1715 } 1716 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1717 iavf_del_adv_rss_cfg(adapter); 1718 return 0; 1719 } 1720 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { 1721 iavf_request_stats(adapter); 1722 return 0; 1723 } 1724 1725 return -EAGAIN; 1726 } 1727 1728 /** 1729 * iavf_startup - first step of driver startup 1730 * @adapter: board private structure 1731 * 1732 * Function process __IAVF_STARTUP driver state. 1733 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1734 * when fails the state is changed to __IAVF_INIT_FAILED 1735 **/ 1736 static void iavf_startup(struct iavf_adapter *adapter) 1737 { 1738 struct pci_dev *pdev = adapter->pdev; 1739 struct iavf_hw *hw = &adapter->hw; 1740 int err; 1741 1742 WARN_ON(adapter->state != __IAVF_STARTUP); 1743 1744 /* driver loaded, probe complete */ 1745 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1746 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1747 err = iavf_set_mac_type(hw); 1748 if (err) { 1749 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1750 goto err; 1751 } 1752 1753 err = iavf_check_reset_complete(hw); 1754 if (err) { 1755 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1756 err); 1757 goto err; 1758 } 1759 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1760 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1761 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1762 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1763 1764 err = iavf_init_adminq(hw); 1765 if (err) { 1766 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1767 goto err; 1768 } 1769 err = iavf_send_api_ver(adapter); 1770 if (err) { 1771 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1772 iavf_shutdown_adminq(hw); 1773 goto err; 1774 } 1775 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); 1776 return; 1777 err: 1778 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1779 } 1780 1781 /** 1782 * iavf_init_version_check - second step of driver startup 1783 * @adapter: board private structure 1784 * 1785 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1786 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1787 * when fails the state is changed to __IAVF_INIT_FAILED 1788 **/ 1789 static void iavf_init_version_check(struct iavf_adapter *adapter) 1790 { 1791 struct pci_dev *pdev = adapter->pdev; 1792 struct iavf_hw *hw = &adapter->hw; 1793 int err = -EAGAIN; 1794 1795 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1796 1797 if (!iavf_asq_done(hw)) { 1798 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1799 iavf_shutdown_adminq(hw); 1800 iavf_change_state(adapter, __IAVF_STARTUP); 1801 goto err; 1802 } 1803 1804 /* aq msg sent, awaiting reply */ 1805 err = iavf_verify_api_ver(adapter); 1806 if (err) { 1807 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1808 err = iavf_send_api_ver(adapter); 1809 else 1810 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1811 adapter->pf_version.major, 1812 adapter->pf_version.minor, 1813 VIRTCHNL_VERSION_MAJOR, 1814 VIRTCHNL_VERSION_MINOR); 1815 goto err; 1816 } 1817 err = iavf_send_vf_config_msg(adapter); 1818 if (err) { 1819 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1820 err); 1821 goto err; 1822 } 1823 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); 1824 return; 1825 err: 1826 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1827 } 1828 1829 /** 1830 * iavf_init_get_resources - third step of driver startup 1831 * @adapter: board private structure 1832 * 1833 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1834 * finishes driver initialization procedure. 1835 * When success the state is changed to __IAVF_DOWN 1836 * when fails the state is changed to __IAVF_INIT_FAILED 1837 **/ 1838 static void iavf_init_get_resources(struct iavf_adapter *adapter) 1839 { 1840 struct net_device *netdev = adapter->netdev; 1841 struct pci_dev *pdev = adapter->pdev; 1842 struct iavf_hw *hw = &adapter->hw; 1843 int err; 1844 1845 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1846 /* aq msg sent, awaiting reply */ 1847 if (!adapter->vf_res) { 1848 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1849 GFP_KERNEL); 1850 if (!adapter->vf_res) { 1851 err = -ENOMEM; 1852 goto err; 1853 } 1854 } 1855 err = iavf_get_vf_config(adapter); 1856 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1857 err = iavf_send_vf_config_msg(adapter); 1858 goto err; 1859 } else if (err == IAVF_ERR_PARAM) { 1860 /* We only get ERR_PARAM if the device is in a very bad 1861 * state or if we've been disabled for previous bad 1862 * behavior. Either way, we're done now. 1863 */ 1864 iavf_shutdown_adminq(hw); 1865 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1866 return; 1867 } 1868 if (err) { 1869 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1870 goto err_alloc; 1871 } 1872 1873 err = iavf_process_config(adapter); 1874 if (err) 1875 goto err_alloc; 1876 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1877 1878 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1879 1880 netdev->netdev_ops = &iavf_netdev_ops; 1881 iavf_set_ethtool_ops(netdev); 1882 netdev->watchdog_timeo = 5 * HZ; 1883 1884 /* MTU range: 68 - 9710 */ 1885 netdev->min_mtu = ETH_MIN_MTU; 1886 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1887 1888 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1889 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1890 adapter->hw.mac.addr); 1891 eth_hw_addr_random(netdev); 1892 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1893 } else { 1894 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 1895 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1896 } 1897 1898 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1899 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1900 err = iavf_init_interrupt_scheme(adapter); 1901 if (err) 1902 goto err_sw_init; 1903 iavf_map_rings_to_vectors(adapter); 1904 if (adapter->vf_res->vf_cap_flags & 1905 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1906 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1907 1908 err = iavf_request_misc_irq(adapter); 1909 if (err) 1910 goto err_sw_init; 1911 1912 netif_carrier_off(netdev); 1913 adapter->link_up = false; 1914 1915 /* set the semaphore to prevent any callbacks after device registration 1916 * up to time when state of driver will be set to __IAVF_DOWN 1917 */ 1918 rtnl_lock(); 1919 if (!adapter->netdev_registered) { 1920 err = register_netdevice(netdev); 1921 if (err) { 1922 rtnl_unlock(); 1923 goto err_register; 1924 } 1925 } 1926 1927 adapter->netdev_registered = true; 1928 1929 netif_tx_stop_all_queues(netdev); 1930 if (CLIENT_ALLOWED(adapter)) { 1931 err = iavf_lan_add_device(adapter); 1932 if (err) 1933 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1934 err); 1935 } 1936 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1937 if (netdev->features & NETIF_F_GRO) 1938 dev_info(&pdev->dev, "GRO is enabled\n"); 1939 1940 iavf_change_state(adapter, __IAVF_DOWN); 1941 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1942 rtnl_unlock(); 1943 1944 iavf_misc_irq_enable(adapter); 1945 wake_up(&adapter->down_waitqueue); 1946 1947 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1948 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1949 if (!adapter->rss_key || !adapter->rss_lut) { 1950 err = -ENOMEM; 1951 goto err_mem; 1952 } 1953 if (RSS_AQ(adapter)) 1954 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1955 else 1956 iavf_init_rss(adapter); 1957 1958 return; 1959 err_mem: 1960 iavf_free_rss(adapter); 1961 err_register: 1962 iavf_free_misc_irq(adapter); 1963 err_sw_init: 1964 iavf_reset_interrupt_capability(adapter); 1965 err_alloc: 1966 kfree(adapter->vf_res); 1967 adapter->vf_res = NULL; 1968 err: 1969 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1970 } 1971 1972 /** 1973 * iavf_watchdog_task - Periodic call-back task 1974 * @work: pointer to work_struct 1975 **/ 1976 static void iavf_watchdog_task(struct work_struct *work) 1977 { 1978 struct iavf_adapter *adapter = container_of(work, 1979 struct iavf_adapter, 1980 watchdog_task.work); 1981 struct iavf_hw *hw = &adapter->hw; 1982 u32 reg_val; 1983 1984 if (!mutex_trylock(&adapter->crit_lock)) 1985 goto restart_watchdog; 1986 1987 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1988 iavf_change_state(adapter, __IAVF_COMM_FAILED); 1989 1990 if (adapter->flags & IAVF_FLAG_RESET_NEEDED && 1991 adapter->state != __IAVF_RESETTING) { 1992 iavf_change_state(adapter, __IAVF_RESETTING); 1993 adapter->aq_required = 0; 1994 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1995 } 1996 1997 switch (adapter->state) { 1998 case __IAVF_STARTUP: 1999 iavf_startup(adapter); 2000 mutex_unlock(&adapter->crit_lock); 2001 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2002 msecs_to_jiffies(30)); 2003 return; 2004 case __IAVF_INIT_VERSION_CHECK: 2005 iavf_init_version_check(adapter); 2006 mutex_unlock(&adapter->crit_lock); 2007 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2008 msecs_to_jiffies(30)); 2009 return; 2010 case __IAVF_INIT_GET_RESOURCES: 2011 iavf_init_get_resources(adapter); 2012 mutex_unlock(&adapter->crit_lock); 2013 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2014 msecs_to_jiffies(1)); 2015 return; 2016 case __IAVF_INIT_FAILED: 2017 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 2018 dev_err(&adapter->pdev->dev, 2019 "Failed to communicate with PF; waiting before retry\n"); 2020 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2021 iavf_shutdown_adminq(hw); 2022 mutex_unlock(&adapter->crit_lock); 2023 queue_delayed_work(iavf_wq, 2024 &adapter->watchdog_task, (5 * HZ)); 2025 return; 2026 } 2027 /* Try again from failed step*/ 2028 iavf_change_state(adapter, adapter->last_state); 2029 mutex_unlock(&adapter->crit_lock); 2030 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); 2031 return; 2032 case __IAVF_COMM_FAILED: 2033 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2034 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2035 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 2036 reg_val == VIRTCHNL_VFR_COMPLETED) { 2037 /* A chance for redemption! */ 2038 dev_err(&adapter->pdev->dev, 2039 "Hardware came out of reset. Attempting reinit.\n"); 2040 /* When init task contacts the PF and 2041 * gets everything set up again, it'll restart the 2042 * watchdog for us. Down, boy. Sit. Stay. Woof. 2043 */ 2044 iavf_change_state(adapter, __IAVF_STARTUP); 2045 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2046 } 2047 adapter->aq_required = 0; 2048 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2049 queue_delayed_work(iavf_wq, 2050 &adapter->watchdog_task, 2051 msecs_to_jiffies(10)); 2052 return; 2053 case __IAVF_RESETTING: 2054 mutex_unlock(&adapter->crit_lock); 2055 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2056 return; 2057 case __IAVF_DOWN: 2058 case __IAVF_DOWN_PENDING: 2059 case __IAVF_TESTING: 2060 case __IAVF_RUNNING: 2061 if (adapter->current_op) { 2062 if (!iavf_asq_done(hw)) { 2063 dev_dbg(&adapter->pdev->dev, 2064 "Admin queue timeout\n"); 2065 iavf_send_api_ver(adapter); 2066 } 2067 } else { 2068 /* An error will be returned if no commands were 2069 * processed; use this opportunity to update stats 2070 */ 2071 if (iavf_process_aq_command(adapter) && 2072 adapter->state == __IAVF_RUNNING) 2073 iavf_request_stats(adapter); 2074 } 2075 if (adapter->state == __IAVF_RUNNING) 2076 iavf_detect_recover_hung(&adapter->vsi); 2077 break; 2078 case __IAVF_REMOVE: 2079 mutex_unlock(&adapter->crit_lock); 2080 return; 2081 default: 2082 return; 2083 } 2084 2085 /* check for hw reset */ 2086 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2087 if (!reg_val) { 2088 iavf_change_state(adapter, __IAVF_RESETTING); 2089 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2090 adapter->aq_required = 0; 2091 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2092 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2093 queue_work(iavf_wq, &adapter->reset_task); 2094 mutex_unlock(&adapter->crit_lock); 2095 queue_delayed_work(iavf_wq, 2096 &adapter->watchdog_task, HZ * 2); 2097 return; 2098 } 2099 2100 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2101 mutex_unlock(&adapter->crit_lock); 2102 restart_watchdog: 2103 queue_work(iavf_wq, &adapter->adminq_task); 2104 if (adapter->aq_required) 2105 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2106 msecs_to_jiffies(20)); 2107 else 2108 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2109 } 2110 2111 static void iavf_disable_vf(struct iavf_adapter *adapter) 2112 { 2113 struct iavf_mac_filter *f, *ftmp; 2114 struct iavf_vlan_filter *fv, *fvtmp; 2115 struct iavf_cloud_filter *cf, *cftmp; 2116 2117 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2118 2119 /* We don't use netif_running() because it may be true prior to 2120 * ndo_open() returning, so we can't assume it means all our open 2121 * tasks have finished, since we're not holding the rtnl_lock here. 2122 */ 2123 if (adapter->state == __IAVF_RUNNING) { 2124 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2125 netif_carrier_off(adapter->netdev); 2126 netif_tx_disable(adapter->netdev); 2127 adapter->link_up = false; 2128 iavf_napi_disable_all(adapter); 2129 iavf_irq_disable(adapter); 2130 iavf_free_traffic_irqs(adapter); 2131 iavf_free_all_tx_resources(adapter); 2132 iavf_free_all_rx_resources(adapter); 2133 } 2134 2135 spin_lock_bh(&adapter->mac_vlan_list_lock); 2136 2137 /* Delete all of the filters */ 2138 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2139 list_del(&f->list); 2140 kfree(f); 2141 } 2142 2143 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2144 list_del(&fv->list); 2145 kfree(fv); 2146 } 2147 2148 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2149 2150 spin_lock_bh(&adapter->cloud_filter_list_lock); 2151 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2152 list_del(&cf->list); 2153 kfree(cf); 2154 adapter->num_cloud_filters--; 2155 } 2156 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2157 2158 iavf_free_misc_irq(adapter); 2159 iavf_reset_interrupt_capability(adapter); 2160 iavf_free_q_vectors(adapter); 2161 iavf_free_queues(adapter); 2162 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2163 iavf_shutdown_adminq(&adapter->hw); 2164 adapter->netdev->flags &= ~IFF_UP; 2165 mutex_unlock(&adapter->crit_lock); 2166 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2167 iavf_change_state(adapter, __IAVF_DOWN); 2168 wake_up(&adapter->down_waitqueue); 2169 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2170 } 2171 2172 /** 2173 * iavf_reset_task - Call-back task to handle hardware reset 2174 * @work: pointer to work_struct 2175 * 2176 * During reset we need to shut down and reinitialize the admin queue 2177 * before we can use it to communicate with the PF again. We also clear 2178 * and reinit the rings because that context is lost as well. 2179 **/ 2180 static void iavf_reset_task(struct work_struct *work) 2181 { 2182 struct iavf_adapter *adapter = container_of(work, 2183 struct iavf_adapter, 2184 reset_task); 2185 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2186 struct net_device *netdev = adapter->netdev; 2187 struct iavf_hw *hw = &adapter->hw; 2188 struct iavf_mac_filter *f, *ftmp; 2189 struct iavf_cloud_filter *cf; 2190 u32 reg_val; 2191 int i = 0, err; 2192 bool running; 2193 2194 /* When device is being removed it doesn't make sense to run the reset 2195 * task, just return in such a case. 2196 */ 2197 if (mutex_is_locked(&adapter->remove_lock)) 2198 return; 2199 2200 if (iavf_lock_timeout(&adapter->crit_lock, 200)) { 2201 schedule_work(&adapter->reset_task); 2202 return; 2203 } 2204 while (!mutex_trylock(&adapter->client_lock)) 2205 usleep_range(500, 1000); 2206 if (CLIENT_ENABLED(adapter)) { 2207 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2208 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2209 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2210 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2211 cancel_delayed_work_sync(&adapter->client_task); 2212 iavf_notify_client_close(&adapter->vsi, true); 2213 } 2214 iavf_misc_irq_disable(adapter); 2215 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2216 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2217 /* Restart the AQ here. If we have been reset but didn't 2218 * detect it, or if the PF had to reinit, our AQ will be hosed. 2219 */ 2220 iavf_shutdown_adminq(hw); 2221 iavf_init_adminq(hw); 2222 iavf_request_reset(adapter); 2223 } 2224 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2225 2226 /* poll until we see the reset actually happen */ 2227 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2228 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2229 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2230 if (!reg_val) 2231 break; 2232 usleep_range(5000, 10000); 2233 } 2234 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2235 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2236 goto continue_reset; /* act like the reset happened */ 2237 } 2238 2239 /* wait until the reset is complete and the PF is responding to us */ 2240 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2241 /* sleep first to make sure a minimum wait time is met */ 2242 msleep(IAVF_RESET_WAIT_MS); 2243 2244 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2245 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2246 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2247 break; 2248 } 2249 2250 pci_set_master(adapter->pdev); 2251 pci_restore_msi_state(adapter->pdev); 2252 2253 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2254 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2255 reg_val); 2256 iavf_disable_vf(adapter); 2257 mutex_unlock(&adapter->client_lock); 2258 return; /* Do not attempt to reinit. It's dead, Jim. */ 2259 } 2260 2261 continue_reset: 2262 /* We don't use netif_running() because it may be true prior to 2263 * ndo_open() returning, so we can't assume it means all our open 2264 * tasks have finished, since we're not holding the rtnl_lock here. 2265 */ 2266 running = ((adapter->state == __IAVF_RUNNING) || 2267 (adapter->state == __IAVF_RESETTING)); 2268 2269 if (running) { 2270 netdev->flags &= ~IFF_UP; 2271 netif_carrier_off(netdev); 2272 netif_tx_stop_all_queues(netdev); 2273 adapter->link_up = false; 2274 iavf_napi_disable_all(adapter); 2275 } 2276 iavf_irq_disable(adapter); 2277 2278 iavf_change_state(adapter, __IAVF_RESETTING); 2279 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2280 2281 /* free the Tx/Rx rings and descriptors, might be better to just 2282 * re-use them sometime in the future 2283 */ 2284 iavf_free_all_rx_resources(adapter); 2285 iavf_free_all_tx_resources(adapter); 2286 2287 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2288 /* kill and reinit the admin queue */ 2289 iavf_shutdown_adminq(hw); 2290 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2291 err = iavf_init_adminq(hw); 2292 if (err) 2293 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2294 err); 2295 adapter->aq_required = 0; 2296 2297 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2298 err = iavf_reinit_interrupt_scheme(adapter); 2299 if (err) 2300 goto reset_err; 2301 } 2302 2303 if (RSS_AQ(adapter)) { 2304 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2305 } else { 2306 err = iavf_init_rss(adapter); 2307 if (err) 2308 goto reset_err; 2309 } 2310 2311 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2312 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2313 2314 spin_lock_bh(&adapter->mac_vlan_list_lock); 2315 2316 /* Delete filter for the current MAC address, it could have 2317 * been changed by the PF via administratively set MAC. 2318 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2319 */ 2320 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2321 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2322 list_del(&f->list); 2323 kfree(f); 2324 } 2325 } 2326 /* re-add all MAC filters */ 2327 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2328 f->add = true; 2329 } 2330 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2331 2332 /* check if TCs are running and re-add all cloud filters */ 2333 spin_lock_bh(&adapter->cloud_filter_list_lock); 2334 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2335 adapter->num_tc) { 2336 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2337 cf->add = true; 2338 } 2339 } 2340 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2341 2342 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2343 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2344 iavf_misc_irq_enable(adapter); 2345 2346 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2347 2348 /* We were running when the reset started, so we need to restore some 2349 * state here. 2350 */ 2351 if (running) { 2352 /* allocate transmit descriptors */ 2353 err = iavf_setup_all_tx_resources(adapter); 2354 if (err) 2355 goto reset_err; 2356 2357 /* allocate receive descriptors */ 2358 err = iavf_setup_all_rx_resources(adapter); 2359 if (err) 2360 goto reset_err; 2361 2362 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2363 err = iavf_request_traffic_irqs(adapter, netdev->name); 2364 if (err) 2365 goto reset_err; 2366 2367 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2368 } 2369 2370 iavf_configure(adapter); 2371 2372 /* iavf_up_complete() will switch device back 2373 * to __IAVF_RUNNING 2374 */ 2375 iavf_up_complete(adapter); 2376 netdev->flags |= IFF_UP; 2377 iavf_irq_enable(adapter, true); 2378 } else { 2379 iavf_change_state(adapter, __IAVF_DOWN); 2380 wake_up(&adapter->down_waitqueue); 2381 } 2382 mutex_unlock(&adapter->client_lock); 2383 mutex_unlock(&adapter->crit_lock); 2384 2385 return; 2386 reset_err: 2387 mutex_unlock(&adapter->client_lock); 2388 mutex_unlock(&adapter->crit_lock); 2389 if (running) { 2390 iavf_change_state(adapter, __IAVF_RUNNING); 2391 netdev->flags |= IFF_UP; 2392 } 2393 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2394 iavf_close(netdev); 2395 } 2396 2397 /** 2398 * iavf_adminq_task - worker thread to clean the admin queue 2399 * @work: pointer to work_struct containing our data 2400 **/ 2401 static void iavf_adminq_task(struct work_struct *work) 2402 { 2403 struct iavf_adapter *adapter = 2404 container_of(work, struct iavf_adapter, adminq_task); 2405 struct iavf_hw *hw = &adapter->hw; 2406 struct iavf_arq_event_info event; 2407 enum virtchnl_ops v_op; 2408 enum iavf_status ret, v_ret; 2409 u32 val, oldval; 2410 u16 pending; 2411 2412 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2413 goto out; 2414 2415 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2416 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2417 if (!event.msg_buf) 2418 goto out; 2419 2420 if (iavf_lock_timeout(&adapter->crit_lock, 200)) 2421 goto freedom; 2422 do { 2423 ret = iavf_clean_arq_element(hw, &event, &pending); 2424 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2425 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2426 2427 if (ret || !v_op) 2428 break; /* No event to process or error cleaning ARQ */ 2429 2430 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2431 event.msg_len); 2432 if (pending != 0) 2433 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2434 } while (pending); 2435 mutex_unlock(&adapter->crit_lock); 2436 2437 if ((adapter->flags & 2438 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2439 adapter->state == __IAVF_RESETTING) 2440 goto freedom; 2441 2442 /* check for error indications */ 2443 val = rd32(hw, hw->aq.arq.len); 2444 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ 2445 goto freedom; 2446 oldval = val; 2447 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2448 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2449 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2450 } 2451 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2452 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2453 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2454 } 2455 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2456 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2457 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2458 } 2459 if (oldval != val) 2460 wr32(hw, hw->aq.arq.len, val); 2461 2462 val = rd32(hw, hw->aq.asq.len); 2463 oldval = val; 2464 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2465 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2466 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2467 } 2468 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2469 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2470 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2471 } 2472 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2473 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2474 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2475 } 2476 if (oldval != val) 2477 wr32(hw, hw->aq.asq.len, val); 2478 2479 freedom: 2480 kfree(event.msg_buf); 2481 out: 2482 /* re-enable Admin queue interrupt cause */ 2483 iavf_misc_irq_enable(adapter); 2484 } 2485 2486 /** 2487 * iavf_client_task - worker thread to perform client work 2488 * @work: pointer to work_struct containing our data 2489 * 2490 * This task handles client interactions. Because client calls can be 2491 * reentrant, we can't handle them in the watchdog. 2492 **/ 2493 static void iavf_client_task(struct work_struct *work) 2494 { 2495 struct iavf_adapter *adapter = 2496 container_of(work, struct iavf_adapter, client_task.work); 2497 2498 /* If we can't get the client bit, just give up. We'll be rescheduled 2499 * later. 2500 */ 2501 2502 if (!mutex_trylock(&adapter->client_lock)) 2503 return; 2504 2505 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2506 iavf_client_subtask(adapter); 2507 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2508 goto out; 2509 } 2510 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2511 iavf_notify_client_l2_params(&adapter->vsi); 2512 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2513 goto out; 2514 } 2515 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2516 iavf_notify_client_close(&adapter->vsi, false); 2517 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2518 goto out; 2519 } 2520 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2521 iavf_notify_client_open(&adapter->vsi); 2522 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2523 } 2524 out: 2525 mutex_unlock(&adapter->client_lock); 2526 } 2527 2528 /** 2529 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2530 * @adapter: board private structure 2531 * 2532 * Free all transmit software resources 2533 **/ 2534 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2535 { 2536 int i; 2537 2538 if (!adapter->tx_rings) 2539 return; 2540 2541 for (i = 0; i < adapter->num_active_queues; i++) 2542 if (adapter->tx_rings[i].desc) 2543 iavf_free_tx_resources(&adapter->tx_rings[i]); 2544 } 2545 2546 /** 2547 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2548 * @adapter: board private structure 2549 * 2550 * If this function returns with an error, then it's possible one or 2551 * more of the rings is populated (while the rest are not). It is the 2552 * callers duty to clean those orphaned rings. 2553 * 2554 * Return 0 on success, negative on failure 2555 **/ 2556 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2557 { 2558 int i, err = 0; 2559 2560 for (i = 0; i < adapter->num_active_queues; i++) { 2561 adapter->tx_rings[i].count = adapter->tx_desc_count; 2562 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2563 if (!err) 2564 continue; 2565 dev_err(&adapter->pdev->dev, 2566 "Allocation for Tx Queue %u failed\n", i); 2567 break; 2568 } 2569 2570 return err; 2571 } 2572 2573 /** 2574 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2575 * @adapter: board private structure 2576 * 2577 * If this function returns with an error, then it's possible one or 2578 * more of the rings is populated (while the rest are not). It is the 2579 * callers duty to clean those orphaned rings. 2580 * 2581 * Return 0 on success, negative on failure 2582 **/ 2583 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2584 { 2585 int i, err = 0; 2586 2587 for (i = 0; i < adapter->num_active_queues; i++) { 2588 adapter->rx_rings[i].count = adapter->rx_desc_count; 2589 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2590 if (!err) 2591 continue; 2592 dev_err(&adapter->pdev->dev, 2593 "Allocation for Rx Queue %u failed\n", i); 2594 break; 2595 } 2596 return err; 2597 } 2598 2599 /** 2600 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2601 * @adapter: board private structure 2602 * 2603 * Free all receive software resources 2604 **/ 2605 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2606 { 2607 int i; 2608 2609 if (!adapter->rx_rings) 2610 return; 2611 2612 for (i = 0; i < adapter->num_active_queues; i++) 2613 if (adapter->rx_rings[i].desc) 2614 iavf_free_rx_resources(&adapter->rx_rings[i]); 2615 } 2616 2617 /** 2618 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2619 * @adapter: board private structure 2620 * @max_tx_rate: max Tx bw for a tc 2621 **/ 2622 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2623 u64 max_tx_rate) 2624 { 2625 int speed = 0, ret = 0; 2626 2627 if (ADV_LINK_SUPPORT(adapter)) { 2628 if (adapter->link_speed_mbps < U32_MAX) { 2629 speed = adapter->link_speed_mbps; 2630 goto validate_bw; 2631 } else { 2632 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2633 return -EINVAL; 2634 } 2635 } 2636 2637 switch (adapter->link_speed) { 2638 case VIRTCHNL_LINK_SPEED_40GB: 2639 speed = SPEED_40000; 2640 break; 2641 case VIRTCHNL_LINK_SPEED_25GB: 2642 speed = SPEED_25000; 2643 break; 2644 case VIRTCHNL_LINK_SPEED_20GB: 2645 speed = SPEED_20000; 2646 break; 2647 case VIRTCHNL_LINK_SPEED_10GB: 2648 speed = SPEED_10000; 2649 break; 2650 case VIRTCHNL_LINK_SPEED_5GB: 2651 speed = SPEED_5000; 2652 break; 2653 case VIRTCHNL_LINK_SPEED_2_5GB: 2654 speed = SPEED_2500; 2655 break; 2656 case VIRTCHNL_LINK_SPEED_1GB: 2657 speed = SPEED_1000; 2658 break; 2659 case VIRTCHNL_LINK_SPEED_100MB: 2660 speed = SPEED_100; 2661 break; 2662 default: 2663 break; 2664 } 2665 2666 validate_bw: 2667 if (max_tx_rate > speed) { 2668 dev_err(&adapter->pdev->dev, 2669 "Invalid tx rate specified\n"); 2670 ret = -EINVAL; 2671 } 2672 2673 return ret; 2674 } 2675 2676 /** 2677 * iavf_validate_ch_config - validate queue mapping info 2678 * @adapter: board private structure 2679 * @mqprio_qopt: queue parameters 2680 * 2681 * This function validates if the config provided by the user to 2682 * configure queue channels is valid or not. Returns 0 on a valid 2683 * config. 2684 **/ 2685 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2686 struct tc_mqprio_qopt_offload *mqprio_qopt) 2687 { 2688 u64 total_max_rate = 0; 2689 int i, num_qps = 0; 2690 u64 tx_rate = 0; 2691 int ret = 0; 2692 2693 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2694 mqprio_qopt->qopt.num_tc < 1) 2695 return -EINVAL; 2696 2697 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2698 if (!mqprio_qopt->qopt.count[i] || 2699 mqprio_qopt->qopt.offset[i] != num_qps) 2700 return -EINVAL; 2701 if (mqprio_qopt->min_rate[i]) { 2702 dev_err(&adapter->pdev->dev, 2703 "Invalid min tx rate (greater than 0) specified\n"); 2704 return -EINVAL; 2705 } 2706 /*convert to Mbps */ 2707 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2708 IAVF_MBPS_DIVISOR); 2709 total_max_rate += tx_rate; 2710 num_qps += mqprio_qopt->qopt.count[i]; 2711 } 2712 if (num_qps > IAVF_MAX_REQ_QUEUES) 2713 return -EINVAL; 2714 2715 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2716 return ret; 2717 } 2718 2719 /** 2720 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2721 * @adapter: board private structure 2722 **/ 2723 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2724 { 2725 struct iavf_cloud_filter *cf, *cftmp; 2726 2727 spin_lock_bh(&adapter->cloud_filter_list_lock); 2728 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2729 list) { 2730 list_del(&cf->list); 2731 kfree(cf); 2732 adapter->num_cloud_filters--; 2733 } 2734 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2735 } 2736 2737 /** 2738 * __iavf_setup_tc - configure multiple traffic classes 2739 * @netdev: network interface device structure 2740 * @type_data: tc offload data 2741 * 2742 * This function processes the config information provided by the 2743 * user to configure traffic classes/queue channels and packages the 2744 * information to request the PF to setup traffic classes. 2745 * 2746 * Returns 0 on success. 2747 **/ 2748 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2749 { 2750 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2751 struct iavf_adapter *adapter = netdev_priv(netdev); 2752 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2753 u8 num_tc = 0, total_qps = 0; 2754 int ret = 0, netdev_tc = 0; 2755 u64 max_tx_rate; 2756 u16 mode; 2757 int i; 2758 2759 num_tc = mqprio_qopt->qopt.num_tc; 2760 mode = mqprio_qopt->mode; 2761 2762 /* delete queue_channel */ 2763 if (!mqprio_qopt->qopt.hw) { 2764 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2765 /* reset the tc configuration */ 2766 netdev_reset_tc(netdev); 2767 adapter->num_tc = 0; 2768 netif_tx_stop_all_queues(netdev); 2769 netif_tx_disable(netdev); 2770 iavf_del_all_cloud_filters(adapter); 2771 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2772 goto exit; 2773 } else { 2774 return -EINVAL; 2775 } 2776 } 2777 2778 /* add queue channel */ 2779 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2780 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2781 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2782 return -EOPNOTSUPP; 2783 } 2784 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2785 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2786 return -EINVAL; 2787 } 2788 2789 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2790 if (ret) 2791 return ret; 2792 /* Return if same TC config is requested */ 2793 if (adapter->num_tc == num_tc) 2794 return 0; 2795 adapter->num_tc = num_tc; 2796 2797 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2798 if (i < num_tc) { 2799 adapter->ch_config.ch_info[i].count = 2800 mqprio_qopt->qopt.count[i]; 2801 adapter->ch_config.ch_info[i].offset = 2802 mqprio_qopt->qopt.offset[i]; 2803 total_qps += mqprio_qopt->qopt.count[i]; 2804 max_tx_rate = mqprio_qopt->max_rate[i]; 2805 /* convert to Mbps */ 2806 max_tx_rate = div_u64(max_tx_rate, 2807 IAVF_MBPS_DIVISOR); 2808 adapter->ch_config.ch_info[i].max_tx_rate = 2809 max_tx_rate; 2810 } else { 2811 adapter->ch_config.ch_info[i].count = 1; 2812 adapter->ch_config.ch_info[i].offset = 0; 2813 } 2814 } 2815 adapter->ch_config.total_qps = total_qps; 2816 netif_tx_stop_all_queues(netdev); 2817 netif_tx_disable(netdev); 2818 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2819 netdev_reset_tc(netdev); 2820 /* Report the tc mapping up the stack */ 2821 netdev_set_num_tc(adapter->netdev, num_tc); 2822 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2823 u16 qcount = mqprio_qopt->qopt.count[i]; 2824 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2825 2826 if (i < num_tc) 2827 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2828 qoffset); 2829 } 2830 } 2831 exit: 2832 return ret; 2833 } 2834 2835 /** 2836 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2837 * @adapter: board private structure 2838 * @f: pointer to struct flow_cls_offload 2839 * @filter: pointer to cloud filter structure 2840 */ 2841 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2842 struct flow_cls_offload *f, 2843 struct iavf_cloud_filter *filter) 2844 { 2845 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2846 struct flow_dissector *dissector = rule->match.dissector; 2847 u16 n_proto_mask = 0; 2848 u16 n_proto_key = 0; 2849 u8 field_flags = 0; 2850 u16 addr_type = 0; 2851 u16 n_proto = 0; 2852 int i = 0; 2853 struct virtchnl_filter *vf = &filter->f; 2854 2855 if (dissector->used_keys & 2856 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2857 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2858 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2859 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2860 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2861 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2862 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2863 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2864 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2865 dissector->used_keys); 2866 return -EOPNOTSUPP; 2867 } 2868 2869 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2870 struct flow_match_enc_keyid match; 2871 2872 flow_rule_match_enc_keyid(rule, &match); 2873 if (match.mask->keyid != 0) 2874 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2875 } 2876 2877 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2878 struct flow_match_basic match; 2879 2880 flow_rule_match_basic(rule, &match); 2881 n_proto_key = ntohs(match.key->n_proto); 2882 n_proto_mask = ntohs(match.mask->n_proto); 2883 2884 if (n_proto_key == ETH_P_ALL) { 2885 n_proto_key = 0; 2886 n_proto_mask = 0; 2887 } 2888 n_proto = n_proto_key & n_proto_mask; 2889 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2890 return -EINVAL; 2891 if (n_proto == ETH_P_IPV6) { 2892 /* specify flow type as TCP IPv6 */ 2893 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2894 } 2895 2896 if (match.key->ip_proto != IPPROTO_TCP) { 2897 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2898 return -EINVAL; 2899 } 2900 } 2901 2902 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2903 struct flow_match_eth_addrs match; 2904 2905 flow_rule_match_eth_addrs(rule, &match); 2906 2907 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2908 if (!is_zero_ether_addr(match.mask->dst)) { 2909 if (is_broadcast_ether_addr(match.mask->dst)) { 2910 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2911 } else { 2912 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2913 match.mask->dst); 2914 return IAVF_ERR_CONFIG; 2915 } 2916 } 2917 2918 if (!is_zero_ether_addr(match.mask->src)) { 2919 if (is_broadcast_ether_addr(match.mask->src)) { 2920 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2921 } else { 2922 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2923 match.mask->src); 2924 return IAVF_ERR_CONFIG; 2925 } 2926 } 2927 2928 if (!is_zero_ether_addr(match.key->dst)) 2929 if (is_valid_ether_addr(match.key->dst) || 2930 is_multicast_ether_addr(match.key->dst)) { 2931 /* set the mask if a valid dst_mac address */ 2932 for (i = 0; i < ETH_ALEN; i++) 2933 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2934 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2935 match.key->dst); 2936 } 2937 2938 if (!is_zero_ether_addr(match.key->src)) 2939 if (is_valid_ether_addr(match.key->src) || 2940 is_multicast_ether_addr(match.key->src)) { 2941 /* set the mask if a valid dst_mac address */ 2942 for (i = 0; i < ETH_ALEN; i++) 2943 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2944 ether_addr_copy(vf->data.tcp_spec.src_mac, 2945 match.key->src); 2946 } 2947 } 2948 2949 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2950 struct flow_match_vlan match; 2951 2952 flow_rule_match_vlan(rule, &match); 2953 if (match.mask->vlan_id) { 2954 if (match.mask->vlan_id == VLAN_VID_MASK) { 2955 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2956 } else { 2957 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2958 match.mask->vlan_id); 2959 return IAVF_ERR_CONFIG; 2960 } 2961 } 2962 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2963 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2964 } 2965 2966 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2967 struct flow_match_control match; 2968 2969 flow_rule_match_control(rule, &match); 2970 addr_type = match.key->addr_type; 2971 } 2972 2973 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2974 struct flow_match_ipv4_addrs match; 2975 2976 flow_rule_match_ipv4_addrs(rule, &match); 2977 if (match.mask->dst) { 2978 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2979 field_flags |= IAVF_CLOUD_FIELD_IIP; 2980 } else { 2981 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2982 be32_to_cpu(match.mask->dst)); 2983 return IAVF_ERR_CONFIG; 2984 } 2985 } 2986 2987 if (match.mask->src) { 2988 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2989 field_flags |= IAVF_CLOUD_FIELD_IIP; 2990 } else { 2991 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2992 be32_to_cpu(match.mask->dst)); 2993 return IAVF_ERR_CONFIG; 2994 } 2995 } 2996 2997 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2998 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2999 return IAVF_ERR_CONFIG; 3000 } 3001 if (match.key->dst) { 3002 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 3003 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 3004 } 3005 if (match.key->src) { 3006 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 3007 vf->data.tcp_spec.src_ip[0] = match.key->src; 3008 } 3009 } 3010 3011 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 3012 struct flow_match_ipv6_addrs match; 3013 3014 flow_rule_match_ipv6_addrs(rule, &match); 3015 3016 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 3017 if (ipv6_addr_any(&match.mask->dst)) { 3018 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 3019 IPV6_ADDR_ANY); 3020 return IAVF_ERR_CONFIG; 3021 } 3022 3023 /* src and dest IPv6 address should not be LOOPBACK 3024 * (0:0:0:0:0:0:0:1) which can be represented as ::1 3025 */ 3026 if (ipv6_addr_loopback(&match.key->dst) || 3027 ipv6_addr_loopback(&match.key->src)) { 3028 dev_err(&adapter->pdev->dev, 3029 "ipv6 addr should not be loopback\n"); 3030 return IAVF_ERR_CONFIG; 3031 } 3032 if (!ipv6_addr_any(&match.mask->dst) || 3033 !ipv6_addr_any(&match.mask->src)) 3034 field_flags |= IAVF_CLOUD_FIELD_IIP; 3035 3036 for (i = 0; i < 4; i++) 3037 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 3038 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 3039 sizeof(vf->data.tcp_spec.dst_ip)); 3040 for (i = 0; i < 4; i++) 3041 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 3042 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 3043 sizeof(vf->data.tcp_spec.src_ip)); 3044 } 3045 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 3046 struct flow_match_ports match; 3047 3048 flow_rule_match_ports(rule, &match); 3049 if (match.mask->src) { 3050 if (match.mask->src == cpu_to_be16(0xffff)) { 3051 field_flags |= IAVF_CLOUD_FIELD_IIP; 3052 } else { 3053 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 3054 be16_to_cpu(match.mask->src)); 3055 return IAVF_ERR_CONFIG; 3056 } 3057 } 3058 3059 if (match.mask->dst) { 3060 if (match.mask->dst == cpu_to_be16(0xffff)) { 3061 field_flags |= IAVF_CLOUD_FIELD_IIP; 3062 } else { 3063 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 3064 be16_to_cpu(match.mask->dst)); 3065 return IAVF_ERR_CONFIG; 3066 } 3067 } 3068 if (match.key->dst) { 3069 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 3070 vf->data.tcp_spec.dst_port = match.key->dst; 3071 } 3072 3073 if (match.key->src) { 3074 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 3075 vf->data.tcp_spec.src_port = match.key->src; 3076 } 3077 } 3078 vf->field_flags = field_flags; 3079 3080 return 0; 3081 } 3082 3083 /** 3084 * iavf_handle_tclass - Forward to a traffic class on the device 3085 * @adapter: board private structure 3086 * @tc: traffic class index on the device 3087 * @filter: pointer to cloud filter structure 3088 */ 3089 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3090 struct iavf_cloud_filter *filter) 3091 { 3092 if (tc == 0) 3093 return 0; 3094 if (tc < adapter->num_tc) { 3095 if (!filter->f.data.tcp_spec.dst_port) { 3096 dev_err(&adapter->pdev->dev, 3097 "Specify destination port to redirect to traffic class other than TC0\n"); 3098 return -EINVAL; 3099 } 3100 } 3101 /* redirect to a traffic class on the same device */ 3102 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3103 filter->f.action_meta = tc; 3104 return 0; 3105 } 3106 3107 /** 3108 * iavf_configure_clsflower - Add tc flower filters 3109 * @adapter: board private structure 3110 * @cls_flower: Pointer to struct flow_cls_offload 3111 */ 3112 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3113 struct flow_cls_offload *cls_flower) 3114 { 3115 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3116 struct iavf_cloud_filter *filter = NULL; 3117 int err = -EINVAL, count = 50; 3118 3119 if (tc < 0) { 3120 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3121 return -EINVAL; 3122 } 3123 3124 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3125 if (!filter) 3126 return -ENOMEM; 3127 3128 while (!mutex_trylock(&adapter->crit_lock)) { 3129 if (--count == 0) { 3130 kfree(filter); 3131 return err; 3132 } 3133 udelay(1); 3134 } 3135 3136 filter->cookie = cls_flower->cookie; 3137 3138 /* set the mask to all zeroes to begin with */ 3139 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3140 /* start out with flow type and eth type IPv4 to begin with */ 3141 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3142 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3143 if (err) 3144 goto err; 3145 3146 err = iavf_handle_tclass(adapter, tc, filter); 3147 if (err) 3148 goto err; 3149 3150 /* add filter to the list */ 3151 spin_lock_bh(&adapter->cloud_filter_list_lock); 3152 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3153 adapter->num_cloud_filters++; 3154 filter->add = true; 3155 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3156 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3157 err: 3158 if (err) 3159 kfree(filter); 3160 3161 mutex_unlock(&adapter->crit_lock); 3162 return err; 3163 } 3164 3165 /* iavf_find_cf - Find the cloud filter in the list 3166 * @adapter: Board private structure 3167 * @cookie: filter specific cookie 3168 * 3169 * Returns ptr to the filter object or NULL. Must be called while holding the 3170 * cloud_filter_list_lock. 3171 */ 3172 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3173 unsigned long *cookie) 3174 { 3175 struct iavf_cloud_filter *filter = NULL; 3176 3177 if (!cookie) 3178 return NULL; 3179 3180 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3181 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3182 return filter; 3183 } 3184 return NULL; 3185 } 3186 3187 /** 3188 * iavf_delete_clsflower - Remove tc flower filters 3189 * @adapter: board private structure 3190 * @cls_flower: Pointer to struct flow_cls_offload 3191 */ 3192 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3193 struct flow_cls_offload *cls_flower) 3194 { 3195 struct iavf_cloud_filter *filter = NULL; 3196 int err = 0; 3197 3198 spin_lock_bh(&adapter->cloud_filter_list_lock); 3199 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3200 if (filter) { 3201 filter->del = true; 3202 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3203 } else { 3204 err = -EINVAL; 3205 } 3206 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3207 3208 return err; 3209 } 3210 3211 /** 3212 * iavf_setup_tc_cls_flower - flower classifier offloads 3213 * @adapter: board private structure 3214 * @cls_flower: pointer to flow_cls_offload struct with flow info 3215 */ 3216 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3217 struct flow_cls_offload *cls_flower) 3218 { 3219 switch (cls_flower->command) { 3220 case FLOW_CLS_REPLACE: 3221 return iavf_configure_clsflower(adapter, cls_flower); 3222 case FLOW_CLS_DESTROY: 3223 return iavf_delete_clsflower(adapter, cls_flower); 3224 case FLOW_CLS_STATS: 3225 return -EOPNOTSUPP; 3226 default: 3227 return -EOPNOTSUPP; 3228 } 3229 } 3230 3231 /** 3232 * iavf_setup_tc_block_cb - block callback for tc 3233 * @type: type of offload 3234 * @type_data: offload data 3235 * @cb_priv: 3236 * 3237 * This function is the block callback for traffic classes 3238 **/ 3239 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3240 void *cb_priv) 3241 { 3242 struct iavf_adapter *adapter = cb_priv; 3243 3244 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3245 return -EOPNOTSUPP; 3246 3247 switch (type) { 3248 case TC_SETUP_CLSFLOWER: 3249 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3250 default: 3251 return -EOPNOTSUPP; 3252 } 3253 } 3254 3255 static LIST_HEAD(iavf_block_cb_list); 3256 3257 /** 3258 * iavf_setup_tc - configure multiple traffic classes 3259 * @netdev: network interface device structure 3260 * @type: type of offload 3261 * @type_data: tc offload data 3262 * 3263 * This function is the callback to ndo_setup_tc in the 3264 * netdev_ops. 3265 * 3266 * Returns 0 on success 3267 **/ 3268 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3269 void *type_data) 3270 { 3271 struct iavf_adapter *adapter = netdev_priv(netdev); 3272 3273 switch (type) { 3274 case TC_SETUP_QDISC_MQPRIO: 3275 return __iavf_setup_tc(netdev, type_data); 3276 case TC_SETUP_BLOCK: 3277 return flow_block_cb_setup_simple(type_data, 3278 &iavf_block_cb_list, 3279 iavf_setup_tc_block_cb, 3280 adapter, adapter, true); 3281 default: 3282 return -EOPNOTSUPP; 3283 } 3284 } 3285 3286 /** 3287 * iavf_open - Called when a network interface is made active 3288 * @netdev: network interface device structure 3289 * 3290 * Returns 0 on success, negative value on failure 3291 * 3292 * The open entry point is called when a network interface is made 3293 * active by the system (IFF_UP). At this point all resources needed 3294 * for transmit and receive operations are allocated, the interrupt 3295 * handler is registered with the OS, the watchdog is started, 3296 * and the stack is notified that the interface is ready. 3297 **/ 3298 static int iavf_open(struct net_device *netdev) 3299 { 3300 struct iavf_adapter *adapter = netdev_priv(netdev); 3301 int err; 3302 3303 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3304 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3305 return -EIO; 3306 } 3307 3308 while (!mutex_trylock(&adapter->crit_lock)) 3309 usleep_range(500, 1000); 3310 3311 if (adapter->state != __IAVF_DOWN) { 3312 err = -EBUSY; 3313 goto err_unlock; 3314 } 3315 3316 if (adapter->state == __IAVF_RUNNING && 3317 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { 3318 dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); 3319 err = 0; 3320 goto err_unlock; 3321 } 3322 3323 /* allocate transmit descriptors */ 3324 err = iavf_setup_all_tx_resources(adapter); 3325 if (err) 3326 goto err_setup_tx; 3327 3328 /* allocate receive descriptors */ 3329 err = iavf_setup_all_rx_resources(adapter); 3330 if (err) 3331 goto err_setup_rx; 3332 3333 /* clear any pending interrupts, may auto mask */ 3334 err = iavf_request_traffic_irqs(adapter, netdev->name); 3335 if (err) 3336 goto err_req_irq; 3337 3338 spin_lock_bh(&adapter->mac_vlan_list_lock); 3339 3340 iavf_add_filter(adapter, adapter->hw.mac.addr); 3341 3342 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3343 3344 /* Restore VLAN filters that were removed with IFF_DOWN */ 3345 iavf_restore_filters(adapter); 3346 3347 iavf_configure(adapter); 3348 3349 iavf_up_complete(adapter); 3350 3351 iavf_irq_enable(adapter, true); 3352 3353 mutex_unlock(&adapter->crit_lock); 3354 3355 return 0; 3356 3357 err_req_irq: 3358 iavf_down(adapter); 3359 iavf_free_traffic_irqs(adapter); 3360 err_setup_rx: 3361 iavf_free_all_rx_resources(adapter); 3362 err_setup_tx: 3363 iavf_free_all_tx_resources(adapter); 3364 err_unlock: 3365 mutex_unlock(&adapter->crit_lock); 3366 3367 return err; 3368 } 3369 3370 /** 3371 * iavf_close - Disables a network interface 3372 * @netdev: network interface device structure 3373 * 3374 * Returns 0, this is not allowed to fail 3375 * 3376 * The close entry point is called when an interface is de-activated 3377 * by the OS. The hardware is still under the drivers control, but 3378 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3379 * are freed, along with all transmit and receive resources. 3380 **/ 3381 static int iavf_close(struct net_device *netdev) 3382 { 3383 struct iavf_adapter *adapter = netdev_priv(netdev); 3384 int status; 3385 3386 if (adapter->state <= __IAVF_DOWN_PENDING) 3387 return 0; 3388 3389 while (!mutex_trylock(&adapter->crit_lock)) 3390 usleep_range(500, 1000); 3391 3392 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3393 if (CLIENT_ENABLED(adapter)) 3394 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3395 3396 iavf_down(adapter); 3397 iavf_change_state(adapter, __IAVF_DOWN_PENDING); 3398 iavf_free_traffic_irqs(adapter); 3399 3400 mutex_unlock(&adapter->crit_lock); 3401 3402 /* We explicitly don't free resources here because the hardware is 3403 * still active and can DMA into memory. Resources are cleared in 3404 * iavf_virtchnl_completion() after we get confirmation from the PF 3405 * driver that the rings have been stopped. 3406 * 3407 * Also, we wait for state to transition to __IAVF_DOWN before 3408 * returning. State change occurs in iavf_virtchnl_completion() after 3409 * VF resources are released (which occurs after PF driver processes and 3410 * responds to admin queue commands). 3411 */ 3412 3413 status = wait_event_timeout(adapter->down_waitqueue, 3414 adapter->state == __IAVF_DOWN, 3415 msecs_to_jiffies(500)); 3416 if (!status) 3417 netdev_warn(netdev, "Device resources not yet released\n"); 3418 return 0; 3419 } 3420 3421 /** 3422 * iavf_change_mtu - Change the Maximum Transfer Unit 3423 * @netdev: network interface device structure 3424 * @new_mtu: new value for maximum frame size 3425 * 3426 * Returns 0 on success, negative on failure 3427 **/ 3428 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3429 { 3430 struct iavf_adapter *adapter = netdev_priv(netdev); 3431 3432 netdev->mtu = new_mtu; 3433 if (CLIENT_ENABLED(adapter)) { 3434 iavf_notify_client_l2_params(&adapter->vsi); 3435 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3436 } 3437 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3438 queue_work(iavf_wq, &adapter->reset_task); 3439 3440 return 0; 3441 } 3442 3443 /** 3444 * iavf_set_features - set the netdev feature flags 3445 * @netdev: ptr to the netdev being adjusted 3446 * @features: the feature set that the stack is suggesting 3447 * Note: expects to be called while under rtnl_lock() 3448 **/ 3449 static int iavf_set_features(struct net_device *netdev, 3450 netdev_features_t features) 3451 { 3452 struct iavf_adapter *adapter = netdev_priv(netdev); 3453 3454 /* Don't allow enabling VLAN features when adapter is not capable 3455 * of VLAN offload/filtering 3456 */ 3457 if (!VLAN_ALLOWED(adapter)) { 3458 netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 3459 NETIF_F_HW_VLAN_CTAG_TX | 3460 NETIF_F_HW_VLAN_CTAG_FILTER); 3461 if (features & (NETIF_F_HW_VLAN_CTAG_RX | 3462 NETIF_F_HW_VLAN_CTAG_TX | 3463 NETIF_F_HW_VLAN_CTAG_FILTER)) 3464 return -EINVAL; 3465 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3466 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3467 adapter->aq_required |= 3468 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3469 else 3470 adapter->aq_required |= 3471 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3472 } 3473 3474 return 0; 3475 } 3476 3477 /** 3478 * iavf_features_check - Validate encapsulated packet conforms to limits 3479 * @skb: skb buff 3480 * @dev: This physical port's netdev 3481 * @features: Offload features that the stack believes apply 3482 **/ 3483 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3484 struct net_device *dev, 3485 netdev_features_t features) 3486 { 3487 size_t len; 3488 3489 /* No point in doing any of this if neither checksum nor GSO are 3490 * being requested for this frame. We can rule out both by just 3491 * checking for CHECKSUM_PARTIAL 3492 */ 3493 if (skb->ip_summed != CHECKSUM_PARTIAL) 3494 return features; 3495 3496 /* We cannot support GSO if the MSS is going to be less than 3497 * 64 bytes. If it is then we need to drop support for GSO. 3498 */ 3499 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3500 features &= ~NETIF_F_GSO_MASK; 3501 3502 /* MACLEN can support at most 63 words */ 3503 len = skb_network_header(skb) - skb->data; 3504 if (len & ~(63 * 2)) 3505 goto out_err; 3506 3507 /* IPLEN and EIPLEN can support at most 127 dwords */ 3508 len = skb_transport_header(skb) - skb_network_header(skb); 3509 if (len & ~(127 * 4)) 3510 goto out_err; 3511 3512 if (skb->encapsulation) { 3513 /* L4TUNLEN can support 127 words */ 3514 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3515 if (len & ~(127 * 2)) 3516 goto out_err; 3517 3518 /* IPLEN can support at most 127 dwords */ 3519 len = skb_inner_transport_header(skb) - 3520 skb_inner_network_header(skb); 3521 if (len & ~(127 * 4)) 3522 goto out_err; 3523 } 3524 3525 /* No need to validate L4LEN as TCP is the only protocol with a 3526 * a flexible value and we support all possible values supported 3527 * by TCP, which is at most 15 dwords 3528 */ 3529 3530 return features; 3531 out_err: 3532 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3533 } 3534 3535 /** 3536 * iavf_fix_features - fix up the netdev feature bits 3537 * @netdev: our net device 3538 * @features: desired feature bits 3539 * 3540 * Returns fixed-up features bits 3541 **/ 3542 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3543 netdev_features_t features) 3544 { 3545 struct iavf_adapter *adapter = netdev_priv(netdev); 3546 3547 if (adapter->vf_res && 3548 !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3549 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3550 NETIF_F_HW_VLAN_CTAG_RX | 3551 NETIF_F_HW_VLAN_CTAG_FILTER); 3552 3553 return features; 3554 } 3555 3556 static const struct net_device_ops iavf_netdev_ops = { 3557 .ndo_open = iavf_open, 3558 .ndo_stop = iavf_close, 3559 .ndo_start_xmit = iavf_xmit_frame, 3560 .ndo_set_rx_mode = iavf_set_rx_mode, 3561 .ndo_validate_addr = eth_validate_addr, 3562 .ndo_set_mac_address = iavf_set_mac, 3563 .ndo_change_mtu = iavf_change_mtu, 3564 .ndo_tx_timeout = iavf_tx_timeout, 3565 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3566 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3567 .ndo_features_check = iavf_features_check, 3568 .ndo_fix_features = iavf_fix_features, 3569 .ndo_set_features = iavf_set_features, 3570 .ndo_setup_tc = iavf_setup_tc, 3571 }; 3572 3573 /** 3574 * iavf_check_reset_complete - check that VF reset is complete 3575 * @hw: pointer to hw struct 3576 * 3577 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3578 **/ 3579 static int iavf_check_reset_complete(struct iavf_hw *hw) 3580 { 3581 u32 rstat; 3582 int i; 3583 3584 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3585 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3586 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3587 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3588 (rstat == VIRTCHNL_VFR_COMPLETED)) 3589 return 0; 3590 usleep_range(10, 20); 3591 } 3592 return -EBUSY; 3593 } 3594 3595 /** 3596 * iavf_process_config - Process the config information we got from the PF 3597 * @adapter: board private structure 3598 * 3599 * Verify that we have a valid config struct, and set up our netdev features 3600 * and our VSI struct. 3601 **/ 3602 int iavf_process_config(struct iavf_adapter *adapter) 3603 { 3604 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3605 int i, num_req_queues = adapter->num_req_queues; 3606 struct net_device *netdev = adapter->netdev; 3607 struct iavf_vsi *vsi = &adapter->vsi; 3608 netdev_features_t hw_enc_features; 3609 netdev_features_t hw_features; 3610 3611 /* got VF config message back from PF, now we can parse it */ 3612 for (i = 0; i < vfres->num_vsis; i++) { 3613 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3614 adapter->vsi_res = &vfres->vsi_res[i]; 3615 } 3616 if (!adapter->vsi_res) { 3617 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3618 return -ENODEV; 3619 } 3620 3621 if (num_req_queues && 3622 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3623 /* Problem. The PF gave us fewer queues than what we had 3624 * negotiated in our request. Need a reset to see if we can't 3625 * get back to a working state. 3626 */ 3627 dev_err(&adapter->pdev->dev, 3628 "Requested %d queues, but PF only gave us %d.\n", 3629 num_req_queues, 3630 adapter->vsi_res->num_queue_pairs); 3631 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3632 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3633 iavf_schedule_reset(adapter); 3634 return -ENODEV; 3635 } 3636 adapter->num_req_queues = 0; 3637 3638 hw_enc_features = NETIF_F_SG | 3639 NETIF_F_IP_CSUM | 3640 NETIF_F_IPV6_CSUM | 3641 NETIF_F_HIGHDMA | 3642 NETIF_F_SOFT_FEATURES | 3643 NETIF_F_TSO | 3644 NETIF_F_TSO_ECN | 3645 NETIF_F_TSO6 | 3646 NETIF_F_SCTP_CRC | 3647 NETIF_F_RXHASH | 3648 NETIF_F_RXCSUM | 3649 0; 3650 3651 /* advertise to stack only if offloads for encapsulated packets is 3652 * supported 3653 */ 3654 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3655 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3656 NETIF_F_GSO_GRE | 3657 NETIF_F_GSO_GRE_CSUM | 3658 NETIF_F_GSO_IPXIP4 | 3659 NETIF_F_GSO_IPXIP6 | 3660 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3661 NETIF_F_GSO_PARTIAL | 3662 0; 3663 3664 if (!(vfres->vf_cap_flags & 3665 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3666 netdev->gso_partial_features |= 3667 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3668 3669 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3670 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3671 netdev->hw_enc_features |= hw_enc_features; 3672 } 3673 /* record features VLANs can make use of */ 3674 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3675 3676 /* Write features and hw_features separately to avoid polluting 3677 * with, or dropping, features that are set when we registered. 3678 */ 3679 hw_features = hw_enc_features; 3680 3681 /* Enable VLAN features if supported */ 3682 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3683 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3684 NETIF_F_HW_VLAN_CTAG_RX); 3685 /* Enable cloud filter if ADQ is supported */ 3686 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3687 hw_features |= NETIF_F_HW_TC; 3688 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3689 hw_features |= NETIF_F_GSO_UDP_L4; 3690 3691 netdev->hw_features |= hw_features; 3692 3693 netdev->features |= hw_features; 3694 3695 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3696 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3697 3698 netdev->priv_flags |= IFF_UNICAST_FLT; 3699 3700 /* Do not turn on offloads when they are requested to be turned off. 3701 * TSO needs minimum 576 bytes to work correctly. 3702 */ 3703 if (netdev->wanted_features) { 3704 if (!(netdev->wanted_features & NETIF_F_TSO) || 3705 netdev->mtu < 576) 3706 netdev->features &= ~NETIF_F_TSO; 3707 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3708 netdev->mtu < 576) 3709 netdev->features &= ~NETIF_F_TSO6; 3710 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3711 netdev->features &= ~NETIF_F_TSO_ECN; 3712 if (!(netdev->wanted_features & NETIF_F_GRO)) 3713 netdev->features &= ~NETIF_F_GRO; 3714 if (!(netdev->wanted_features & NETIF_F_GSO)) 3715 netdev->features &= ~NETIF_F_GSO; 3716 } 3717 3718 adapter->vsi.id = adapter->vsi_res->vsi_id; 3719 3720 adapter->vsi.back = adapter; 3721 adapter->vsi.base_vector = 1; 3722 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3723 vsi->netdev = adapter->netdev; 3724 vsi->qs_handle = adapter->vsi_res->qset_handle; 3725 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3726 adapter->rss_key_size = vfres->rss_key_size; 3727 adapter->rss_lut_size = vfres->rss_lut_size; 3728 } else { 3729 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3730 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3731 } 3732 3733 return 0; 3734 } 3735 3736 /** 3737 * iavf_shutdown - Shutdown the device in preparation for a reboot 3738 * @pdev: pci device structure 3739 **/ 3740 static void iavf_shutdown(struct pci_dev *pdev) 3741 { 3742 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3743 struct net_device *netdev = adapter->netdev; 3744 3745 netif_device_detach(netdev); 3746 3747 if (netif_running(netdev)) 3748 iavf_close(netdev); 3749 3750 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3751 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 3752 /* Prevent the watchdog from running. */ 3753 iavf_change_state(adapter, __IAVF_REMOVE); 3754 adapter->aq_required = 0; 3755 mutex_unlock(&adapter->crit_lock); 3756 3757 #ifdef CONFIG_PM 3758 pci_save_state(pdev); 3759 3760 #endif 3761 pci_disable_device(pdev); 3762 } 3763 3764 /** 3765 * iavf_probe - Device Initialization Routine 3766 * @pdev: PCI device information struct 3767 * @ent: entry in iavf_pci_tbl 3768 * 3769 * Returns 0 on success, negative on failure 3770 * 3771 * iavf_probe initializes an adapter identified by a pci_dev structure. 3772 * The OS initialization, configuring of the adapter private structure, 3773 * and a hardware reset occur. 3774 **/ 3775 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3776 { 3777 struct net_device *netdev; 3778 struct iavf_adapter *adapter = NULL; 3779 struct iavf_hw *hw = NULL; 3780 int err; 3781 3782 err = pci_enable_device(pdev); 3783 if (err) 3784 return err; 3785 3786 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3787 if (err) { 3788 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3789 if (err) { 3790 dev_err(&pdev->dev, 3791 "DMA configuration failed: 0x%x\n", err); 3792 goto err_dma; 3793 } 3794 } 3795 3796 err = pci_request_regions(pdev, iavf_driver_name); 3797 if (err) { 3798 dev_err(&pdev->dev, 3799 "pci_request_regions failed 0x%x\n", err); 3800 goto err_pci_reg; 3801 } 3802 3803 pci_enable_pcie_error_reporting(pdev); 3804 3805 pci_set_master(pdev); 3806 3807 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3808 IAVF_MAX_REQ_QUEUES); 3809 if (!netdev) { 3810 err = -ENOMEM; 3811 goto err_alloc_etherdev; 3812 } 3813 3814 SET_NETDEV_DEV(netdev, &pdev->dev); 3815 3816 pci_set_drvdata(pdev, netdev); 3817 adapter = netdev_priv(netdev); 3818 3819 adapter->netdev = netdev; 3820 adapter->pdev = pdev; 3821 3822 hw = &adapter->hw; 3823 hw->back = adapter; 3824 3825 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3826 iavf_change_state(adapter, __IAVF_STARTUP); 3827 3828 /* Call save state here because it relies on the adapter struct. */ 3829 pci_save_state(pdev); 3830 3831 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3832 pci_resource_len(pdev, 0)); 3833 if (!hw->hw_addr) { 3834 err = -EIO; 3835 goto err_ioremap; 3836 } 3837 hw->vendor_id = pdev->vendor; 3838 hw->device_id = pdev->device; 3839 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3840 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3841 hw->subsystem_device_id = pdev->subsystem_device; 3842 hw->bus.device = PCI_SLOT(pdev->devfn); 3843 hw->bus.func = PCI_FUNC(pdev->devfn); 3844 hw->bus.bus_id = pdev->bus->number; 3845 3846 /* set up the locks for the AQ, do this only once in probe 3847 * and destroy them only once in remove 3848 */ 3849 mutex_init(&adapter->crit_lock); 3850 mutex_init(&adapter->client_lock); 3851 mutex_init(&adapter->remove_lock); 3852 mutex_init(&hw->aq.asq_mutex); 3853 mutex_init(&hw->aq.arq_mutex); 3854 3855 spin_lock_init(&adapter->mac_vlan_list_lock); 3856 spin_lock_init(&adapter->cloud_filter_list_lock); 3857 spin_lock_init(&adapter->fdir_fltr_lock); 3858 spin_lock_init(&adapter->adv_rss_lock); 3859 3860 INIT_LIST_HEAD(&adapter->mac_filter_list); 3861 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3862 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3863 INIT_LIST_HEAD(&adapter->fdir_list_head); 3864 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3865 3866 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3867 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3868 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3869 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3870 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 3871 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3872 3873 /* Setup the wait queue for indicating transition to down status */ 3874 init_waitqueue_head(&adapter->down_waitqueue); 3875 3876 return 0; 3877 3878 err_ioremap: 3879 free_netdev(netdev); 3880 err_alloc_etherdev: 3881 pci_disable_pcie_error_reporting(pdev); 3882 pci_release_regions(pdev); 3883 err_pci_reg: 3884 err_dma: 3885 pci_disable_device(pdev); 3886 return err; 3887 } 3888 3889 /** 3890 * iavf_suspend - Power management suspend routine 3891 * @dev_d: device info pointer 3892 * 3893 * Called when the system (VM) is entering sleep/suspend. 3894 **/ 3895 static int __maybe_unused iavf_suspend(struct device *dev_d) 3896 { 3897 struct net_device *netdev = dev_get_drvdata(dev_d); 3898 struct iavf_adapter *adapter = netdev_priv(netdev); 3899 3900 netif_device_detach(netdev); 3901 3902 while (!mutex_trylock(&adapter->crit_lock)) 3903 usleep_range(500, 1000); 3904 3905 if (netif_running(netdev)) { 3906 rtnl_lock(); 3907 iavf_down(adapter); 3908 rtnl_unlock(); 3909 } 3910 iavf_free_misc_irq(adapter); 3911 iavf_reset_interrupt_capability(adapter); 3912 3913 mutex_unlock(&adapter->crit_lock); 3914 3915 return 0; 3916 } 3917 3918 /** 3919 * iavf_resume - Power management resume routine 3920 * @dev_d: device info pointer 3921 * 3922 * Called when the system (VM) is resumed from sleep/suspend. 3923 **/ 3924 static int __maybe_unused iavf_resume(struct device *dev_d) 3925 { 3926 struct pci_dev *pdev = to_pci_dev(dev_d); 3927 struct iavf_adapter *adapter; 3928 u32 err; 3929 3930 adapter = iavf_pdev_to_adapter(pdev); 3931 3932 pci_set_master(pdev); 3933 3934 rtnl_lock(); 3935 err = iavf_set_interrupt_capability(adapter); 3936 if (err) { 3937 rtnl_unlock(); 3938 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3939 return err; 3940 } 3941 err = iavf_request_misc_irq(adapter); 3942 rtnl_unlock(); 3943 if (err) { 3944 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3945 return err; 3946 } 3947 3948 queue_work(iavf_wq, &adapter->reset_task); 3949 3950 netif_device_attach(adapter->netdev); 3951 3952 return err; 3953 } 3954 3955 /** 3956 * iavf_remove - Device Removal Routine 3957 * @pdev: PCI device information struct 3958 * 3959 * iavf_remove is called by the PCI subsystem to alert the driver 3960 * that it should release a PCI device. The could be caused by a 3961 * Hot-Plug event, or because the driver is going to be removed from 3962 * memory. 3963 **/ 3964 static void iavf_remove(struct pci_dev *pdev) 3965 { 3966 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3967 enum iavf_state_t prev_state = adapter->last_state; 3968 struct net_device *netdev = adapter->netdev; 3969 struct iavf_fdir_fltr *fdir, *fdirtmp; 3970 struct iavf_vlan_filter *vlf, *vlftmp; 3971 struct iavf_adv_rss *rss, *rsstmp; 3972 struct iavf_mac_filter *f, *ftmp; 3973 struct iavf_cloud_filter *cf, *cftmp; 3974 struct iavf_hw *hw = &adapter->hw; 3975 int err; 3976 /* Indicate we are in remove and not to run reset_task */ 3977 mutex_lock(&adapter->remove_lock); 3978 cancel_work_sync(&adapter->reset_task); 3979 cancel_delayed_work_sync(&adapter->watchdog_task); 3980 cancel_delayed_work_sync(&adapter->client_task); 3981 if (adapter->netdev_registered) { 3982 unregister_netdev(netdev); 3983 adapter->netdev_registered = false; 3984 } 3985 if (CLIENT_ALLOWED(adapter)) { 3986 err = iavf_lan_del_device(adapter); 3987 if (err) 3988 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3989 err); 3990 } 3991 3992 iavf_request_reset(adapter); 3993 msleep(50); 3994 /* If the FW isn't responding, kick it once, but only once. */ 3995 if (!iavf_asq_done(hw)) { 3996 iavf_request_reset(adapter); 3997 msleep(50); 3998 } 3999 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 4000 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 4001 4002 /* Shut down all the garbage mashers on the detention level */ 4003 iavf_change_state(adapter, __IAVF_REMOVE); 4004 adapter->aq_required = 0; 4005 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 4006 4007 iavf_free_all_tx_resources(adapter); 4008 iavf_free_all_rx_resources(adapter); 4009 iavf_misc_irq_disable(adapter); 4010 iavf_free_misc_irq(adapter); 4011 4012 /* In case we enter iavf_remove from erroneous state, free traffic irqs 4013 * here, so as to not cause a kernel crash, when calling 4014 * iavf_reset_interrupt_capability. 4015 */ 4016 if ((adapter->last_state == __IAVF_RESETTING && 4017 prev_state != __IAVF_DOWN) || 4018 (adapter->last_state == __IAVF_RUNNING && 4019 !(netdev->flags & IFF_UP))) 4020 iavf_free_traffic_irqs(adapter); 4021 4022 iavf_reset_interrupt_capability(adapter); 4023 iavf_free_q_vectors(adapter); 4024 4025 cancel_delayed_work_sync(&adapter->watchdog_task); 4026 4027 cancel_work_sync(&adapter->adminq_task); 4028 4029 iavf_free_rss(adapter); 4030 4031 if (hw->aq.asq.count) 4032 iavf_shutdown_adminq(hw); 4033 4034 /* destroy the locks only once, here */ 4035 mutex_destroy(&hw->aq.arq_mutex); 4036 mutex_destroy(&hw->aq.asq_mutex); 4037 mutex_destroy(&adapter->client_lock); 4038 mutex_unlock(&adapter->crit_lock); 4039 mutex_destroy(&adapter->crit_lock); 4040 mutex_unlock(&adapter->remove_lock); 4041 mutex_destroy(&adapter->remove_lock); 4042 4043 iounmap(hw->hw_addr); 4044 pci_release_regions(pdev); 4045 iavf_free_queues(adapter); 4046 kfree(adapter->vf_res); 4047 spin_lock_bh(&adapter->mac_vlan_list_lock); 4048 /* If we got removed before an up/down sequence, we've got a filter 4049 * hanging out there that we need to get rid of. 4050 */ 4051 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 4052 list_del(&f->list); 4053 kfree(f); 4054 } 4055 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 4056 list) { 4057 list_del(&vlf->list); 4058 kfree(vlf); 4059 } 4060 4061 spin_unlock_bh(&adapter->mac_vlan_list_lock); 4062 4063 spin_lock_bh(&adapter->cloud_filter_list_lock); 4064 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 4065 list_del(&cf->list); 4066 kfree(cf); 4067 } 4068 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4069 4070 spin_lock_bh(&adapter->fdir_fltr_lock); 4071 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 4072 list_del(&fdir->list); 4073 kfree(fdir); 4074 } 4075 spin_unlock_bh(&adapter->fdir_fltr_lock); 4076 4077 spin_lock_bh(&adapter->adv_rss_lock); 4078 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 4079 list) { 4080 list_del(&rss->list); 4081 kfree(rss); 4082 } 4083 spin_unlock_bh(&adapter->adv_rss_lock); 4084 4085 free_netdev(netdev); 4086 4087 pci_disable_pcie_error_reporting(pdev); 4088 4089 pci_disable_device(pdev); 4090 } 4091 4092 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 4093 4094 static struct pci_driver iavf_driver = { 4095 .name = iavf_driver_name, 4096 .id_table = iavf_pci_tbl, 4097 .probe = iavf_probe, 4098 .remove = iavf_remove, 4099 .driver.pm = &iavf_pm_ops, 4100 .shutdown = iavf_shutdown, 4101 }; 4102 4103 /** 4104 * iavf_init_module - Driver Registration Routine 4105 * 4106 * iavf_init_module is the first routine called when the driver is 4107 * loaded. All it does is register with the PCI subsystem. 4108 **/ 4109 static int __init iavf_init_module(void) 4110 { 4111 int ret; 4112 4113 pr_info("iavf: %s\n", iavf_driver_string); 4114 4115 pr_info("%s\n", iavf_copyright); 4116 4117 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4118 iavf_driver_name); 4119 if (!iavf_wq) { 4120 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4121 return -ENOMEM; 4122 } 4123 ret = pci_register_driver(&iavf_driver); 4124 return ret; 4125 } 4126 4127 module_init(iavf_init_module); 4128 4129 /** 4130 * iavf_exit_module - Driver Exit Cleanup Routine 4131 * 4132 * iavf_exit_module is called just before the driver is removed 4133 * from memory. 4134 **/ 4135 static void __exit iavf_exit_module(void) 4136 { 4137 pci_unregister_driver(&iavf_driver); 4138 destroy_workqueue(iavf_wq); 4139 } 4140 4141 module_exit(iavf_exit_module); 4142 4143 /* iavf_main.c */ 4144