1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static void iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_pdev_to_adapter - go from pci_dev to adapter 56 * @pdev: pci_dev pointer 57 */ 58 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) 59 { 60 return netdev_priv(pci_get_drvdata(pdev)); 61 } 62 63 /** 64 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 65 * @hw: pointer to the HW structure 66 * @mem: ptr to mem struct to fill out 67 * @size: size of memory requested 68 * @alignment: what to align the allocation to 69 **/ 70 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 71 struct iavf_dma_mem *mem, 72 u64 size, u32 alignment) 73 { 74 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 75 76 if (!mem) 77 return IAVF_ERR_PARAM; 78 79 mem->size = ALIGN(size, alignment); 80 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 81 (dma_addr_t *)&mem->pa, GFP_KERNEL); 82 if (mem->va) 83 return 0; 84 else 85 return IAVF_ERR_NO_MEMORY; 86 } 87 88 /** 89 * iavf_free_dma_mem_d - OS specific memory free for shared code 90 * @hw: pointer to the HW structure 91 * @mem: ptr to mem struct to free 92 **/ 93 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 94 struct iavf_dma_mem *mem) 95 { 96 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 97 98 if (!mem || !mem->va) 99 return IAVF_ERR_PARAM; 100 dma_free_coherent(&adapter->pdev->dev, mem->size, 101 mem->va, (dma_addr_t)mem->pa); 102 return 0; 103 } 104 105 /** 106 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 107 * @hw: pointer to the HW structure 108 * @mem: ptr to mem struct to fill out 109 * @size: size of memory requested 110 **/ 111 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 112 struct iavf_virt_mem *mem, u32 size) 113 { 114 if (!mem) 115 return IAVF_ERR_PARAM; 116 117 mem->size = size; 118 mem->va = kzalloc(size, GFP_KERNEL); 119 120 if (mem->va) 121 return 0; 122 else 123 return IAVF_ERR_NO_MEMORY; 124 } 125 126 /** 127 * iavf_free_virt_mem_d - OS specific memory free for shared code 128 * @hw: pointer to the HW structure 129 * @mem: ptr to mem struct to free 130 **/ 131 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 132 struct iavf_virt_mem *mem) 133 { 134 if (!mem) 135 return IAVF_ERR_PARAM; 136 137 /* it's ok to kfree a NULL pointer */ 138 kfree(mem->va); 139 140 return 0; 141 } 142 143 /** 144 * iavf_lock_timeout - try to lock mutex but give up after timeout 145 * @lock: mutex that should be locked 146 * @msecs: timeout in msecs 147 * 148 * Returns 0 on success, negative on failure 149 **/ 150 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) 151 { 152 unsigned int wait, delay = 10; 153 154 for (wait = 0; wait < msecs; wait += delay) { 155 if (mutex_trylock(lock)) 156 return 0; 157 158 msleep(delay); 159 } 160 161 return -1; 162 } 163 164 /** 165 * iavf_schedule_reset - Set the flags and schedule a reset event 166 * @adapter: board private structure 167 **/ 168 void iavf_schedule_reset(struct iavf_adapter *adapter) 169 { 170 if (!(adapter->flags & 171 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 172 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 173 queue_work(iavf_wq, &adapter->reset_task); 174 } 175 } 176 177 /** 178 * iavf_schedule_request_stats - Set the flags and schedule statistics request 179 * @adapter: board private structure 180 * 181 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly 182 * request and refresh ethtool stats 183 **/ 184 void iavf_schedule_request_stats(struct iavf_adapter *adapter) 185 { 186 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; 187 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 188 } 189 190 /** 191 * iavf_tx_timeout - Respond to a Tx Hang 192 * @netdev: network interface device structure 193 * @txqueue: queue number that is timing out 194 **/ 195 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 196 { 197 struct iavf_adapter *adapter = netdev_priv(netdev); 198 199 adapter->tx_timeout_count++; 200 iavf_schedule_reset(adapter); 201 } 202 203 /** 204 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 205 * @adapter: board private structure 206 **/ 207 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 208 { 209 struct iavf_hw *hw = &adapter->hw; 210 211 if (!adapter->msix_entries) 212 return; 213 214 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 215 216 iavf_flush(hw); 217 218 synchronize_irq(adapter->msix_entries[0].vector); 219 } 220 221 /** 222 * iavf_misc_irq_enable - Enable default interrupt generation settings 223 * @adapter: board private structure 224 **/ 225 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 226 { 227 struct iavf_hw *hw = &adapter->hw; 228 229 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 230 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 231 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 232 233 iavf_flush(hw); 234 } 235 236 /** 237 * iavf_irq_disable - Mask off interrupt generation on the NIC 238 * @adapter: board private structure 239 **/ 240 static void iavf_irq_disable(struct iavf_adapter *adapter) 241 { 242 int i; 243 struct iavf_hw *hw = &adapter->hw; 244 245 if (!adapter->msix_entries) 246 return; 247 248 for (i = 1; i < adapter->num_msix_vectors; i++) { 249 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 250 synchronize_irq(adapter->msix_entries[i].vector); 251 } 252 iavf_flush(hw); 253 } 254 255 /** 256 * iavf_irq_enable_queues - Enable interrupt for specified queues 257 * @adapter: board private structure 258 * @mask: bitmap of queues to enable 259 **/ 260 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 261 { 262 struct iavf_hw *hw = &adapter->hw; 263 int i; 264 265 for (i = 1; i < adapter->num_msix_vectors; i++) { 266 if (mask & BIT(i - 1)) { 267 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 268 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 269 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 270 } 271 } 272 } 273 274 /** 275 * iavf_irq_enable - Enable default interrupt generation settings 276 * @adapter: board private structure 277 * @flush: boolean value whether to run rd32() 278 **/ 279 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 280 { 281 struct iavf_hw *hw = &adapter->hw; 282 283 iavf_misc_irq_enable(adapter); 284 iavf_irq_enable_queues(adapter, ~0); 285 286 if (flush) 287 iavf_flush(hw); 288 } 289 290 /** 291 * iavf_msix_aq - Interrupt handler for vector 0 292 * @irq: interrupt number 293 * @data: pointer to netdev 294 **/ 295 static irqreturn_t iavf_msix_aq(int irq, void *data) 296 { 297 struct net_device *netdev = data; 298 struct iavf_adapter *adapter = netdev_priv(netdev); 299 struct iavf_hw *hw = &adapter->hw; 300 301 /* handle non-queue interrupts, these reads clear the registers */ 302 rd32(hw, IAVF_VFINT_ICR01); 303 rd32(hw, IAVF_VFINT_ICR0_ENA1); 304 305 /* schedule work on the private workqueue */ 306 queue_work(iavf_wq, &adapter->adminq_task); 307 308 return IRQ_HANDLED; 309 } 310 311 /** 312 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 313 * @irq: interrupt number 314 * @data: pointer to a q_vector 315 **/ 316 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 317 { 318 struct iavf_q_vector *q_vector = data; 319 320 if (!q_vector->tx.ring && !q_vector->rx.ring) 321 return IRQ_HANDLED; 322 323 napi_schedule_irqoff(&q_vector->napi); 324 325 return IRQ_HANDLED; 326 } 327 328 /** 329 * iavf_map_vector_to_rxq - associate irqs with rx queues 330 * @adapter: board private structure 331 * @v_idx: interrupt number 332 * @r_idx: queue number 333 **/ 334 static void 335 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 336 { 337 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 338 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 339 struct iavf_hw *hw = &adapter->hw; 340 341 rx_ring->q_vector = q_vector; 342 rx_ring->next = q_vector->rx.ring; 343 rx_ring->vsi = &adapter->vsi; 344 q_vector->rx.ring = rx_ring; 345 q_vector->rx.count++; 346 q_vector->rx.next_update = jiffies + 1; 347 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 348 q_vector->ring_mask |= BIT(r_idx); 349 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 350 q_vector->rx.current_itr >> 1); 351 q_vector->rx.current_itr = q_vector->rx.target_itr; 352 } 353 354 /** 355 * iavf_map_vector_to_txq - associate irqs with tx queues 356 * @adapter: board private structure 357 * @v_idx: interrupt number 358 * @t_idx: queue number 359 **/ 360 static void 361 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 362 { 363 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 364 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 365 struct iavf_hw *hw = &adapter->hw; 366 367 tx_ring->q_vector = q_vector; 368 tx_ring->next = q_vector->tx.ring; 369 tx_ring->vsi = &adapter->vsi; 370 q_vector->tx.ring = tx_ring; 371 q_vector->tx.count++; 372 q_vector->tx.next_update = jiffies + 1; 373 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 374 q_vector->num_ringpairs++; 375 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 376 q_vector->tx.target_itr >> 1); 377 q_vector->tx.current_itr = q_vector->tx.target_itr; 378 } 379 380 /** 381 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 382 * @adapter: board private structure to initialize 383 * 384 * This function maps descriptor rings to the queue-specific vectors 385 * we were allotted through the MSI-X enabling code. Ideally, we'd have 386 * one vector per ring/queue, but on a constrained vector budget, we 387 * group the rings as "efficiently" as possible. You would add new 388 * mapping configurations in here. 389 **/ 390 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 391 { 392 int rings_remaining = adapter->num_active_queues; 393 int ridx = 0, vidx = 0; 394 int q_vectors; 395 396 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 397 398 for (; ridx < rings_remaining; ridx++) { 399 iavf_map_vector_to_rxq(adapter, vidx, ridx); 400 iavf_map_vector_to_txq(adapter, vidx, ridx); 401 402 /* In the case where we have more queues than vectors, continue 403 * round-robin on vectors until all queues are mapped. 404 */ 405 if (++vidx >= q_vectors) 406 vidx = 0; 407 } 408 409 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 410 } 411 412 /** 413 * iavf_irq_affinity_notify - Callback for affinity changes 414 * @notify: context as to what irq was changed 415 * @mask: the new affinity mask 416 * 417 * This is a callback function used by the irq_set_affinity_notifier function 418 * so that we may register to receive changes to the irq affinity masks. 419 **/ 420 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 421 const cpumask_t *mask) 422 { 423 struct iavf_q_vector *q_vector = 424 container_of(notify, struct iavf_q_vector, affinity_notify); 425 426 cpumask_copy(&q_vector->affinity_mask, mask); 427 } 428 429 /** 430 * iavf_irq_affinity_release - Callback for affinity notifier release 431 * @ref: internal core kernel usage 432 * 433 * This is a callback function used by the irq_set_affinity_notifier function 434 * to inform the current notification subscriber that they will no longer 435 * receive notifications. 436 **/ 437 static void iavf_irq_affinity_release(struct kref *ref) {} 438 439 /** 440 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 441 * @adapter: board private structure 442 * @basename: device basename 443 * 444 * Allocates MSI-X vectors for tx and rx handling, and requests 445 * interrupts from the kernel. 446 **/ 447 static int 448 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 449 { 450 unsigned int vector, q_vectors; 451 unsigned int rx_int_idx = 0, tx_int_idx = 0; 452 int irq_num, err; 453 int cpu; 454 455 iavf_irq_disable(adapter); 456 /* Decrement for Other and TCP Timer vectors */ 457 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 458 459 for (vector = 0; vector < q_vectors; vector++) { 460 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 461 462 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 463 464 if (q_vector->tx.ring && q_vector->rx.ring) { 465 snprintf(q_vector->name, sizeof(q_vector->name), 466 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 467 tx_int_idx++; 468 } else if (q_vector->rx.ring) { 469 snprintf(q_vector->name, sizeof(q_vector->name), 470 "iavf-%s-rx-%d", basename, rx_int_idx++); 471 } else if (q_vector->tx.ring) { 472 snprintf(q_vector->name, sizeof(q_vector->name), 473 "iavf-%s-tx-%d", basename, tx_int_idx++); 474 } else { 475 /* skip this unused q_vector */ 476 continue; 477 } 478 err = request_irq(irq_num, 479 iavf_msix_clean_rings, 480 0, 481 q_vector->name, 482 q_vector); 483 if (err) { 484 dev_info(&adapter->pdev->dev, 485 "Request_irq failed, error: %d\n", err); 486 goto free_queue_irqs; 487 } 488 /* register for affinity change notifications */ 489 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 490 q_vector->affinity_notify.release = 491 iavf_irq_affinity_release; 492 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 493 /* Spread the IRQ affinity hints across online CPUs. Note that 494 * get_cpu_mask returns a mask with a permanent lifetime so 495 * it's safe to use as a hint for irq_set_affinity_hint. 496 */ 497 cpu = cpumask_local_spread(q_vector->v_idx, -1); 498 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 499 } 500 501 return 0; 502 503 free_queue_irqs: 504 while (vector) { 505 vector--; 506 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 507 irq_set_affinity_notifier(irq_num, NULL); 508 irq_set_affinity_hint(irq_num, NULL); 509 free_irq(irq_num, &adapter->q_vectors[vector]); 510 } 511 return err; 512 } 513 514 /** 515 * iavf_request_misc_irq - Initialize MSI-X interrupts 516 * @adapter: board private structure 517 * 518 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 519 * vector is only for the admin queue, and stays active even when the netdev 520 * is closed. 521 **/ 522 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 523 { 524 struct net_device *netdev = adapter->netdev; 525 int err; 526 527 snprintf(adapter->misc_vector_name, 528 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 529 dev_name(&adapter->pdev->dev)); 530 err = request_irq(adapter->msix_entries[0].vector, 531 &iavf_msix_aq, 0, 532 adapter->misc_vector_name, netdev); 533 if (err) { 534 dev_err(&adapter->pdev->dev, 535 "request_irq for %s failed: %d\n", 536 adapter->misc_vector_name, err); 537 free_irq(adapter->msix_entries[0].vector, netdev); 538 } 539 return err; 540 } 541 542 /** 543 * iavf_free_traffic_irqs - Free MSI-X interrupts 544 * @adapter: board private structure 545 * 546 * Frees all MSI-X vectors other than 0. 547 **/ 548 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 549 { 550 int vector, irq_num, q_vectors; 551 552 if (!adapter->msix_entries) 553 return; 554 555 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 556 557 for (vector = 0; vector < q_vectors; vector++) { 558 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 559 irq_set_affinity_notifier(irq_num, NULL); 560 irq_set_affinity_hint(irq_num, NULL); 561 free_irq(irq_num, &adapter->q_vectors[vector]); 562 } 563 } 564 565 /** 566 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 567 * @adapter: board private structure 568 * 569 * Frees MSI-X vector 0. 570 **/ 571 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 572 { 573 struct net_device *netdev = adapter->netdev; 574 575 if (!adapter->msix_entries) 576 return; 577 578 free_irq(adapter->msix_entries[0].vector, netdev); 579 } 580 581 /** 582 * iavf_configure_tx - Configure Transmit Unit after Reset 583 * @adapter: board private structure 584 * 585 * Configure the Tx unit of the MAC after a reset. 586 **/ 587 static void iavf_configure_tx(struct iavf_adapter *adapter) 588 { 589 struct iavf_hw *hw = &adapter->hw; 590 int i; 591 592 for (i = 0; i < adapter->num_active_queues; i++) 593 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 594 } 595 596 /** 597 * iavf_configure_rx - Configure Receive Unit after Reset 598 * @adapter: board private structure 599 * 600 * Configure the Rx unit of the MAC after a reset. 601 **/ 602 static void iavf_configure_rx(struct iavf_adapter *adapter) 603 { 604 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 605 struct iavf_hw *hw = &adapter->hw; 606 int i; 607 608 /* Legacy Rx will always default to a 2048 buffer size. */ 609 #if (PAGE_SIZE < 8192) 610 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 611 struct net_device *netdev = adapter->netdev; 612 613 /* For jumbo frames on systems with 4K pages we have to use 614 * an order 1 page, so we might as well increase the size 615 * of our Rx buffer to make better use of the available space 616 */ 617 rx_buf_len = IAVF_RXBUFFER_3072; 618 619 /* We use a 1536 buffer size for configurations with 620 * standard Ethernet mtu. On x86 this gives us enough room 621 * for shared info and 192 bytes of padding. 622 */ 623 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 624 (netdev->mtu <= ETH_DATA_LEN)) 625 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 626 } 627 #endif 628 629 for (i = 0; i < adapter->num_active_queues; i++) { 630 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 631 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 632 633 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 634 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 635 else 636 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 637 } 638 } 639 640 /** 641 * iavf_find_vlan - Search filter list for specific vlan filter 642 * @adapter: board private structure 643 * @vlan: vlan tag 644 * 645 * Returns ptr to the filter object or NULL. Must be called while holding the 646 * mac_vlan_list_lock. 647 **/ 648 static struct 649 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 650 { 651 struct iavf_vlan_filter *f; 652 653 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 654 if (vlan == f->vlan) 655 return f; 656 } 657 return NULL; 658 } 659 660 /** 661 * iavf_add_vlan - Add a vlan filter to the list 662 * @adapter: board private structure 663 * @vlan: VLAN tag 664 * 665 * Returns ptr to the filter object or NULL when no memory available. 666 **/ 667 static struct 668 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 669 { 670 struct iavf_vlan_filter *f = NULL; 671 672 spin_lock_bh(&adapter->mac_vlan_list_lock); 673 674 f = iavf_find_vlan(adapter, vlan); 675 if (!f) { 676 f = kzalloc(sizeof(*f), GFP_ATOMIC); 677 if (!f) 678 goto clearout; 679 680 f->vlan = vlan; 681 682 list_add_tail(&f->list, &adapter->vlan_filter_list); 683 f->add = true; 684 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 685 } 686 687 clearout: 688 spin_unlock_bh(&adapter->mac_vlan_list_lock); 689 return f; 690 } 691 692 /** 693 * iavf_del_vlan - Remove a vlan filter from the list 694 * @adapter: board private structure 695 * @vlan: VLAN tag 696 **/ 697 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 698 { 699 struct iavf_vlan_filter *f; 700 701 spin_lock_bh(&adapter->mac_vlan_list_lock); 702 703 f = iavf_find_vlan(adapter, vlan); 704 if (f) { 705 f->remove = true; 706 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 707 } 708 709 spin_unlock_bh(&adapter->mac_vlan_list_lock); 710 } 711 712 /** 713 * iavf_restore_filters 714 * @adapter: board private structure 715 * 716 * Restore existing non MAC filters when VF netdev comes back up 717 **/ 718 static void iavf_restore_filters(struct iavf_adapter *adapter) 719 { 720 u16 vid; 721 722 /* re-add all VLAN filters */ 723 for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) 724 iavf_add_vlan(adapter, vid); 725 } 726 727 /** 728 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 729 * @netdev: network device struct 730 * @proto: unused protocol data 731 * @vid: VLAN tag 732 **/ 733 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 734 __always_unused __be16 proto, u16 vid) 735 { 736 struct iavf_adapter *adapter = netdev_priv(netdev); 737 738 if (!VLAN_ALLOWED(adapter)) 739 return -EIO; 740 741 if (iavf_add_vlan(adapter, vid) == NULL) 742 return -ENOMEM; 743 744 set_bit(vid, adapter->vsi.active_vlans); 745 return 0; 746 } 747 748 /** 749 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 750 * @netdev: network device struct 751 * @proto: unused protocol data 752 * @vid: VLAN tag 753 **/ 754 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 755 __always_unused __be16 proto, u16 vid) 756 { 757 struct iavf_adapter *adapter = netdev_priv(netdev); 758 759 iavf_del_vlan(adapter, vid); 760 clear_bit(vid, adapter->vsi.active_vlans); 761 762 return 0; 763 } 764 765 /** 766 * iavf_find_filter - Search filter list for specific mac filter 767 * @adapter: board private structure 768 * @macaddr: the MAC address 769 * 770 * Returns ptr to the filter object or NULL. Must be called while holding the 771 * mac_vlan_list_lock. 772 **/ 773 static struct 774 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 775 const u8 *macaddr) 776 { 777 struct iavf_mac_filter *f; 778 779 if (!macaddr) 780 return NULL; 781 782 list_for_each_entry(f, &adapter->mac_filter_list, list) { 783 if (ether_addr_equal(macaddr, f->macaddr)) 784 return f; 785 } 786 return NULL; 787 } 788 789 /** 790 * iavf_add_filter - Add a mac filter to the filter list 791 * @adapter: board private structure 792 * @macaddr: the MAC address 793 * 794 * Returns ptr to the filter object or NULL when no memory available. 795 **/ 796 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 797 const u8 *macaddr) 798 { 799 struct iavf_mac_filter *f; 800 801 if (!macaddr) 802 return NULL; 803 804 f = iavf_find_filter(adapter, macaddr); 805 if (!f) { 806 f = kzalloc(sizeof(*f), GFP_ATOMIC); 807 if (!f) 808 return f; 809 810 ether_addr_copy(f->macaddr, macaddr); 811 812 list_add_tail(&f->list, &adapter->mac_filter_list); 813 f->add = true; 814 f->is_new_mac = true; 815 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 816 } else { 817 f->remove = false; 818 } 819 820 return f; 821 } 822 823 /** 824 * iavf_set_mac - NDO callback to set port mac address 825 * @netdev: network interface device structure 826 * @p: pointer to an address structure 827 * 828 * Returns 0 on success, negative on failure 829 **/ 830 static int iavf_set_mac(struct net_device *netdev, void *p) 831 { 832 struct iavf_adapter *adapter = netdev_priv(netdev); 833 struct iavf_hw *hw = &adapter->hw; 834 struct iavf_mac_filter *f; 835 struct sockaddr *addr = p; 836 837 if (!is_valid_ether_addr(addr->sa_data)) 838 return -EADDRNOTAVAIL; 839 840 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 841 return 0; 842 843 spin_lock_bh(&adapter->mac_vlan_list_lock); 844 845 f = iavf_find_filter(adapter, hw->mac.addr); 846 if (f) { 847 f->remove = true; 848 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 849 } 850 851 f = iavf_add_filter(adapter, addr->sa_data); 852 853 spin_unlock_bh(&adapter->mac_vlan_list_lock); 854 855 if (f) { 856 ether_addr_copy(hw->mac.addr, addr->sa_data); 857 } 858 859 return (f == NULL) ? -ENOMEM : 0; 860 } 861 862 /** 863 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 864 * @netdev: the netdevice 865 * @addr: address to add 866 * 867 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 868 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 869 */ 870 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 871 { 872 struct iavf_adapter *adapter = netdev_priv(netdev); 873 874 if (iavf_add_filter(adapter, addr)) 875 return 0; 876 else 877 return -ENOMEM; 878 } 879 880 /** 881 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 882 * @netdev: the netdevice 883 * @addr: address to add 884 * 885 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 886 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 887 */ 888 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 889 { 890 struct iavf_adapter *adapter = netdev_priv(netdev); 891 struct iavf_mac_filter *f; 892 893 /* Under some circumstances, we might receive a request to delete 894 * our own device address from our uc list. Because we store the 895 * device address in the VSI's MAC/VLAN filter list, we need to ignore 896 * such requests and not delete our device address from this list. 897 */ 898 if (ether_addr_equal(addr, netdev->dev_addr)) 899 return 0; 900 901 f = iavf_find_filter(adapter, addr); 902 if (f) { 903 f->remove = true; 904 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 905 } 906 return 0; 907 } 908 909 /** 910 * iavf_set_rx_mode - NDO callback to set the netdev filters 911 * @netdev: network interface device structure 912 **/ 913 static void iavf_set_rx_mode(struct net_device *netdev) 914 { 915 struct iavf_adapter *adapter = netdev_priv(netdev); 916 917 spin_lock_bh(&adapter->mac_vlan_list_lock); 918 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 919 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 920 spin_unlock_bh(&adapter->mac_vlan_list_lock); 921 922 if (netdev->flags & IFF_PROMISC && 923 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 924 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 925 else if (!(netdev->flags & IFF_PROMISC) && 926 adapter->flags & IAVF_FLAG_PROMISC_ON) 927 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 928 929 if (netdev->flags & IFF_ALLMULTI && 930 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 931 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 932 else if (!(netdev->flags & IFF_ALLMULTI) && 933 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 934 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 935 } 936 937 /** 938 * iavf_napi_enable_all - enable NAPI on all queue vectors 939 * @adapter: board private structure 940 **/ 941 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 942 { 943 int q_idx; 944 struct iavf_q_vector *q_vector; 945 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 946 947 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 948 struct napi_struct *napi; 949 950 q_vector = &adapter->q_vectors[q_idx]; 951 napi = &q_vector->napi; 952 napi_enable(napi); 953 } 954 } 955 956 /** 957 * iavf_napi_disable_all - disable NAPI on all queue vectors 958 * @adapter: board private structure 959 **/ 960 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 961 { 962 int q_idx; 963 struct iavf_q_vector *q_vector; 964 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 965 966 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 967 q_vector = &adapter->q_vectors[q_idx]; 968 napi_disable(&q_vector->napi); 969 } 970 } 971 972 /** 973 * iavf_configure - set up transmit and receive data structures 974 * @adapter: board private structure 975 **/ 976 static void iavf_configure(struct iavf_adapter *adapter) 977 { 978 struct net_device *netdev = adapter->netdev; 979 int i; 980 981 iavf_set_rx_mode(netdev); 982 983 iavf_configure_tx(adapter); 984 iavf_configure_rx(adapter); 985 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 986 987 for (i = 0; i < adapter->num_active_queues; i++) { 988 struct iavf_ring *ring = &adapter->rx_rings[i]; 989 990 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 991 } 992 } 993 994 /** 995 * iavf_up_complete - Finish the last steps of bringing up a connection 996 * @adapter: board private structure 997 * 998 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 999 **/ 1000 static void iavf_up_complete(struct iavf_adapter *adapter) 1001 { 1002 iavf_change_state(adapter, __IAVF_RUNNING); 1003 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1004 1005 iavf_napi_enable_all(adapter); 1006 1007 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 1008 if (CLIENT_ENABLED(adapter)) 1009 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 1010 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1011 } 1012 1013 /** 1014 * iavf_down - Shutdown the connection processing 1015 * @adapter: board private structure 1016 * 1017 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 1018 **/ 1019 void iavf_down(struct iavf_adapter *adapter) 1020 { 1021 struct net_device *netdev = adapter->netdev; 1022 struct iavf_vlan_filter *vlf; 1023 struct iavf_cloud_filter *cf; 1024 struct iavf_fdir_fltr *fdir; 1025 struct iavf_mac_filter *f; 1026 struct iavf_adv_rss *rss; 1027 1028 if (adapter->state <= __IAVF_DOWN_PENDING) 1029 return; 1030 1031 netif_carrier_off(netdev); 1032 netif_tx_disable(netdev); 1033 adapter->link_up = false; 1034 iavf_napi_disable_all(adapter); 1035 iavf_irq_disable(adapter); 1036 1037 spin_lock_bh(&adapter->mac_vlan_list_lock); 1038 1039 /* clear the sync flag on all filters */ 1040 __dev_uc_unsync(adapter->netdev, NULL); 1041 __dev_mc_unsync(adapter->netdev, NULL); 1042 1043 /* remove all MAC filters */ 1044 list_for_each_entry(f, &adapter->mac_filter_list, list) { 1045 f->remove = true; 1046 } 1047 1048 /* remove all VLAN filters */ 1049 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1050 vlf->remove = true; 1051 } 1052 1053 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1054 1055 /* remove all cloud filters */ 1056 spin_lock_bh(&adapter->cloud_filter_list_lock); 1057 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1058 cf->del = true; 1059 } 1060 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1061 1062 /* remove all Flow Director filters */ 1063 spin_lock_bh(&adapter->fdir_fltr_lock); 1064 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1065 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1066 } 1067 spin_unlock_bh(&adapter->fdir_fltr_lock); 1068 1069 /* remove all advance RSS configuration */ 1070 spin_lock_bh(&adapter->adv_rss_lock); 1071 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1072 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1073 spin_unlock_bh(&adapter->adv_rss_lock); 1074 1075 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1076 adapter->state != __IAVF_RESETTING) { 1077 /* cancel any current operation */ 1078 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1079 /* Schedule operations to close down the HW. Don't wait 1080 * here for this to complete. The watchdog is still running 1081 * and it will take care of this. 1082 */ 1083 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1084 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1085 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1086 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1087 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1088 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1089 } 1090 1091 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1092 } 1093 1094 /** 1095 * iavf_acquire_msix_vectors - Setup the MSIX capability 1096 * @adapter: board private structure 1097 * @vectors: number of vectors to request 1098 * 1099 * Work with the OS to set up the MSIX vectors needed. 1100 * 1101 * Returns 0 on success, negative on failure 1102 **/ 1103 static int 1104 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1105 { 1106 int err, vector_threshold; 1107 1108 /* We'll want at least 3 (vector_threshold): 1109 * 0) Other (Admin Queue and link, mostly) 1110 * 1) TxQ[0] Cleanup 1111 * 2) RxQ[0] Cleanup 1112 */ 1113 vector_threshold = MIN_MSIX_COUNT; 1114 1115 /* The more we get, the more we will assign to Tx/Rx Cleanup 1116 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1117 * Right now, we simply care about how many we'll get; we'll 1118 * set them up later while requesting irq's. 1119 */ 1120 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1121 vector_threshold, vectors); 1122 if (err < 0) { 1123 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1124 kfree(adapter->msix_entries); 1125 adapter->msix_entries = NULL; 1126 return err; 1127 } 1128 1129 /* Adjust for only the vectors we'll use, which is minimum 1130 * of max_msix_q_vectors + NONQ_VECS, or the number of 1131 * vectors we were allocated. 1132 */ 1133 adapter->num_msix_vectors = err; 1134 return 0; 1135 } 1136 1137 /** 1138 * iavf_free_queues - Free memory for all rings 1139 * @adapter: board private structure to initialize 1140 * 1141 * Free all of the memory associated with queue pairs. 1142 **/ 1143 static void iavf_free_queues(struct iavf_adapter *adapter) 1144 { 1145 if (!adapter->vsi_res) 1146 return; 1147 adapter->num_active_queues = 0; 1148 kfree(adapter->tx_rings); 1149 adapter->tx_rings = NULL; 1150 kfree(adapter->rx_rings); 1151 adapter->rx_rings = NULL; 1152 } 1153 1154 /** 1155 * iavf_alloc_queues - Allocate memory for all rings 1156 * @adapter: board private structure to initialize 1157 * 1158 * We allocate one ring per queue at run-time since we don't know the 1159 * number of queues at compile-time. The polling_netdev array is 1160 * intended for Multiqueue, but should work fine with a single queue. 1161 **/ 1162 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1163 { 1164 int i, num_active_queues; 1165 1166 /* If we're in reset reallocating queues we don't actually know yet for 1167 * certain the PF gave us the number of queues we asked for but we'll 1168 * assume it did. Once basic reset is finished we'll confirm once we 1169 * start negotiating config with PF. 1170 */ 1171 if (adapter->num_req_queues) 1172 num_active_queues = adapter->num_req_queues; 1173 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1174 adapter->num_tc) 1175 num_active_queues = adapter->ch_config.total_qps; 1176 else 1177 num_active_queues = min_t(int, 1178 adapter->vsi_res->num_queue_pairs, 1179 (int)(num_online_cpus())); 1180 1181 1182 adapter->tx_rings = kcalloc(num_active_queues, 1183 sizeof(struct iavf_ring), GFP_KERNEL); 1184 if (!adapter->tx_rings) 1185 goto err_out; 1186 adapter->rx_rings = kcalloc(num_active_queues, 1187 sizeof(struct iavf_ring), GFP_KERNEL); 1188 if (!adapter->rx_rings) 1189 goto err_out; 1190 1191 for (i = 0; i < num_active_queues; i++) { 1192 struct iavf_ring *tx_ring; 1193 struct iavf_ring *rx_ring; 1194 1195 tx_ring = &adapter->tx_rings[i]; 1196 1197 tx_ring->queue_index = i; 1198 tx_ring->netdev = adapter->netdev; 1199 tx_ring->dev = &adapter->pdev->dev; 1200 tx_ring->count = adapter->tx_desc_count; 1201 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1202 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1203 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1204 1205 rx_ring = &adapter->rx_rings[i]; 1206 rx_ring->queue_index = i; 1207 rx_ring->netdev = adapter->netdev; 1208 rx_ring->dev = &adapter->pdev->dev; 1209 rx_ring->count = adapter->rx_desc_count; 1210 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1211 } 1212 1213 adapter->num_active_queues = num_active_queues; 1214 1215 return 0; 1216 1217 err_out: 1218 iavf_free_queues(adapter); 1219 return -ENOMEM; 1220 } 1221 1222 /** 1223 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1224 * @adapter: board private structure to initialize 1225 * 1226 * Attempt to configure the interrupts using the best available 1227 * capabilities of the hardware and the kernel. 1228 **/ 1229 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1230 { 1231 int vector, v_budget; 1232 int pairs = 0; 1233 int err = 0; 1234 1235 if (!adapter->vsi_res) { 1236 err = -EIO; 1237 goto out; 1238 } 1239 pairs = adapter->num_active_queues; 1240 1241 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1242 * us much good if we have more vectors than CPUs. However, we already 1243 * limit the total number of queues by the number of CPUs so we do not 1244 * need any further limiting here. 1245 */ 1246 v_budget = min_t(int, pairs + NONQ_VECS, 1247 (int)adapter->vf_res->max_vectors); 1248 1249 adapter->msix_entries = kcalloc(v_budget, 1250 sizeof(struct msix_entry), GFP_KERNEL); 1251 if (!adapter->msix_entries) { 1252 err = -ENOMEM; 1253 goto out; 1254 } 1255 1256 for (vector = 0; vector < v_budget; vector++) 1257 adapter->msix_entries[vector].entry = vector; 1258 1259 err = iavf_acquire_msix_vectors(adapter, v_budget); 1260 1261 out: 1262 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1263 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1264 return err; 1265 } 1266 1267 /** 1268 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1269 * @adapter: board private structure 1270 * 1271 * Return 0 on success, negative on failure 1272 **/ 1273 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1274 { 1275 struct iavf_aqc_get_set_rss_key_data *rss_key = 1276 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1277 struct iavf_hw *hw = &adapter->hw; 1278 int ret = 0; 1279 1280 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1281 /* bail because we already have a command pending */ 1282 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1283 adapter->current_op); 1284 return -EBUSY; 1285 } 1286 1287 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1288 if (ret) { 1289 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1290 iavf_stat_str(hw, ret), 1291 iavf_aq_str(hw, hw->aq.asq_last_status)); 1292 return ret; 1293 1294 } 1295 1296 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1297 adapter->rss_lut, adapter->rss_lut_size); 1298 if (ret) { 1299 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1300 iavf_stat_str(hw, ret), 1301 iavf_aq_str(hw, hw->aq.asq_last_status)); 1302 } 1303 1304 return ret; 1305 1306 } 1307 1308 /** 1309 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1310 * @adapter: board private structure 1311 * 1312 * Returns 0 on success, negative on failure 1313 **/ 1314 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1315 { 1316 struct iavf_hw *hw = &adapter->hw; 1317 u32 *dw; 1318 u16 i; 1319 1320 dw = (u32 *)adapter->rss_key; 1321 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1322 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1323 1324 dw = (u32 *)adapter->rss_lut; 1325 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1326 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1327 1328 iavf_flush(hw); 1329 1330 return 0; 1331 } 1332 1333 /** 1334 * iavf_config_rss - Configure RSS keys and lut 1335 * @adapter: board private structure 1336 * 1337 * Returns 0 on success, negative on failure 1338 **/ 1339 int iavf_config_rss(struct iavf_adapter *adapter) 1340 { 1341 1342 if (RSS_PF(adapter)) { 1343 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1344 IAVF_FLAG_AQ_SET_RSS_KEY; 1345 return 0; 1346 } else if (RSS_AQ(adapter)) { 1347 return iavf_config_rss_aq(adapter); 1348 } else { 1349 return iavf_config_rss_reg(adapter); 1350 } 1351 } 1352 1353 /** 1354 * iavf_fill_rss_lut - Fill the lut with default values 1355 * @adapter: board private structure 1356 **/ 1357 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1358 { 1359 u16 i; 1360 1361 for (i = 0; i < adapter->rss_lut_size; i++) 1362 adapter->rss_lut[i] = i % adapter->num_active_queues; 1363 } 1364 1365 /** 1366 * iavf_init_rss - Prepare for RSS 1367 * @adapter: board private structure 1368 * 1369 * Return 0 on success, negative on failure 1370 **/ 1371 static int iavf_init_rss(struct iavf_adapter *adapter) 1372 { 1373 struct iavf_hw *hw = &adapter->hw; 1374 int ret; 1375 1376 if (!RSS_PF(adapter)) { 1377 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1378 if (adapter->vf_res->vf_cap_flags & 1379 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1380 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1381 else 1382 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1383 1384 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1385 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1386 } 1387 1388 iavf_fill_rss_lut(adapter); 1389 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1390 ret = iavf_config_rss(adapter); 1391 1392 return ret; 1393 } 1394 1395 /** 1396 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1397 * @adapter: board private structure to initialize 1398 * 1399 * We allocate one q_vector per queue interrupt. If allocation fails we 1400 * return -ENOMEM. 1401 **/ 1402 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1403 { 1404 int q_idx = 0, num_q_vectors; 1405 struct iavf_q_vector *q_vector; 1406 1407 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1408 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1409 GFP_KERNEL); 1410 if (!adapter->q_vectors) 1411 return -ENOMEM; 1412 1413 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1414 q_vector = &adapter->q_vectors[q_idx]; 1415 q_vector->adapter = adapter; 1416 q_vector->vsi = &adapter->vsi; 1417 q_vector->v_idx = q_idx; 1418 q_vector->reg_idx = q_idx; 1419 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1420 netif_napi_add(adapter->netdev, &q_vector->napi, 1421 iavf_napi_poll, NAPI_POLL_WEIGHT); 1422 } 1423 1424 return 0; 1425 } 1426 1427 /** 1428 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1429 * @adapter: board private structure to initialize 1430 * 1431 * This function frees the memory allocated to the q_vectors. In addition if 1432 * NAPI is enabled it will delete any references to the NAPI struct prior 1433 * to freeing the q_vector. 1434 **/ 1435 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1436 { 1437 int q_idx, num_q_vectors; 1438 int napi_vectors; 1439 1440 if (!adapter->q_vectors) 1441 return; 1442 1443 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1444 napi_vectors = adapter->num_active_queues; 1445 1446 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1447 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1448 1449 if (q_idx < napi_vectors) 1450 netif_napi_del(&q_vector->napi); 1451 } 1452 kfree(adapter->q_vectors); 1453 adapter->q_vectors = NULL; 1454 } 1455 1456 /** 1457 * iavf_reset_interrupt_capability - Reset MSIX setup 1458 * @adapter: board private structure 1459 * 1460 **/ 1461 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1462 { 1463 if (!adapter->msix_entries) 1464 return; 1465 1466 pci_disable_msix(adapter->pdev); 1467 kfree(adapter->msix_entries); 1468 adapter->msix_entries = NULL; 1469 } 1470 1471 /** 1472 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1473 * @adapter: board private structure to initialize 1474 * 1475 **/ 1476 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1477 { 1478 int err; 1479 1480 err = iavf_alloc_queues(adapter); 1481 if (err) { 1482 dev_err(&adapter->pdev->dev, 1483 "Unable to allocate memory for queues\n"); 1484 goto err_alloc_queues; 1485 } 1486 1487 rtnl_lock(); 1488 err = iavf_set_interrupt_capability(adapter); 1489 rtnl_unlock(); 1490 if (err) { 1491 dev_err(&adapter->pdev->dev, 1492 "Unable to setup interrupt capabilities\n"); 1493 goto err_set_interrupt; 1494 } 1495 1496 err = iavf_alloc_q_vectors(adapter); 1497 if (err) { 1498 dev_err(&adapter->pdev->dev, 1499 "Unable to allocate memory for queue vectors\n"); 1500 goto err_alloc_q_vectors; 1501 } 1502 1503 /* If we've made it so far while ADq flag being ON, then we haven't 1504 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1505 * resources have been allocated in the reset path. 1506 * Now we can truly claim that ADq is enabled. 1507 */ 1508 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1509 adapter->num_tc) 1510 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1511 adapter->num_tc); 1512 1513 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1514 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1515 adapter->num_active_queues); 1516 1517 return 0; 1518 err_alloc_q_vectors: 1519 iavf_reset_interrupt_capability(adapter); 1520 err_set_interrupt: 1521 iavf_free_queues(adapter); 1522 err_alloc_queues: 1523 return err; 1524 } 1525 1526 /** 1527 * iavf_free_rss - Free memory used by RSS structs 1528 * @adapter: board private structure 1529 **/ 1530 static void iavf_free_rss(struct iavf_adapter *adapter) 1531 { 1532 kfree(adapter->rss_key); 1533 adapter->rss_key = NULL; 1534 1535 kfree(adapter->rss_lut); 1536 adapter->rss_lut = NULL; 1537 } 1538 1539 /** 1540 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1541 * @adapter: board private structure 1542 * 1543 * Returns 0 on success, negative on failure 1544 **/ 1545 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1546 { 1547 struct net_device *netdev = adapter->netdev; 1548 int err; 1549 1550 if (netif_running(netdev)) 1551 iavf_free_traffic_irqs(adapter); 1552 iavf_free_misc_irq(adapter); 1553 iavf_reset_interrupt_capability(adapter); 1554 iavf_free_q_vectors(adapter); 1555 iavf_free_queues(adapter); 1556 1557 err = iavf_init_interrupt_scheme(adapter); 1558 if (err) 1559 goto err; 1560 1561 netif_tx_stop_all_queues(netdev); 1562 1563 err = iavf_request_misc_irq(adapter); 1564 if (err) 1565 goto err; 1566 1567 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1568 1569 iavf_map_rings_to_vectors(adapter); 1570 err: 1571 return err; 1572 } 1573 1574 /** 1575 * iavf_process_aq_command - process aq_required flags 1576 * and sends aq command 1577 * @adapter: pointer to iavf adapter structure 1578 * 1579 * Returns 0 on success 1580 * Returns error code if no command was sent 1581 * or error code if the command failed. 1582 **/ 1583 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1584 { 1585 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1586 return iavf_send_vf_config_msg(adapter); 1587 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1588 iavf_disable_queues(adapter); 1589 return 0; 1590 } 1591 1592 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1593 iavf_map_queues(adapter); 1594 return 0; 1595 } 1596 1597 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1598 iavf_add_ether_addrs(adapter); 1599 return 0; 1600 } 1601 1602 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1603 iavf_add_vlans(adapter); 1604 return 0; 1605 } 1606 1607 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1608 iavf_del_ether_addrs(adapter); 1609 return 0; 1610 } 1611 1612 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1613 iavf_del_vlans(adapter); 1614 return 0; 1615 } 1616 1617 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1618 iavf_enable_vlan_stripping(adapter); 1619 return 0; 1620 } 1621 1622 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1623 iavf_disable_vlan_stripping(adapter); 1624 return 0; 1625 } 1626 1627 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1628 iavf_configure_queues(adapter); 1629 return 0; 1630 } 1631 1632 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1633 iavf_enable_queues(adapter); 1634 return 0; 1635 } 1636 1637 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1638 /* This message goes straight to the firmware, not the 1639 * PF, so we don't have to set current_op as we will 1640 * not get a response through the ARQ. 1641 */ 1642 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1643 return 0; 1644 } 1645 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1646 iavf_get_hena(adapter); 1647 return 0; 1648 } 1649 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1650 iavf_set_hena(adapter); 1651 return 0; 1652 } 1653 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1654 iavf_set_rss_key(adapter); 1655 return 0; 1656 } 1657 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1658 iavf_set_rss_lut(adapter); 1659 return 0; 1660 } 1661 1662 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1663 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1664 FLAG_VF_MULTICAST_PROMISC); 1665 return 0; 1666 } 1667 1668 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1669 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1670 return 0; 1671 } 1672 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || 1673 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1674 iavf_set_promiscuous(adapter, 0); 1675 return 0; 1676 } 1677 1678 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1679 iavf_enable_channels(adapter); 1680 return 0; 1681 } 1682 1683 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1684 iavf_disable_channels(adapter); 1685 return 0; 1686 } 1687 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1688 iavf_add_cloud_filter(adapter); 1689 return 0; 1690 } 1691 1692 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1693 iavf_del_cloud_filter(adapter); 1694 return 0; 1695 } 1696 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1697 iavf_del_cloud_filter(adapter); 1698 return 0; 1699 } 1700 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1701 iavf_add_cloud_filter(adapter); 1702 return 0; 1703 } 1704 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1705 iavf_add_fdir_filter(adapter); 1706 return IAVF_SUCCESS; 1707 } 1708 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1709 iavf_del_fdir_filter(adapter); 1710 return IAVF_SUCCESS; 1711 } 1712 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1713 iavf_add_adv_rss_cfg(adapter); 1714 return 0; 1715 } 1716 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1717 iavf_del_adv_rss_cfg(adapter); 1718 return 0; 1719 } 1720 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { 1721 iavf_request_stats(adapter); 1722 return 0; 1723 } 1724 1725 return -EAGAIN; 1726 } 1727 1728 /** 1729 * iavf_startup - first step of driver startup 1730 * @adapter: board private structure 1731 * 1732 * Function process __IAVF_STARTUP driver state. 1733 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1734 * when fails the state is changed to __IAVF_INIT_FAILED 1735 **/ 1736 static void iavf_startup(struct iavf_adapter *adapter) 1737 { 1738 struct pci_dev *pdev = adapter->pdev; 1739 struct iavf_hw *hw = &adapter->hw; 1740 int err; 1741 1742 WARN_ON(adapter->state != __IAVF_STARTUP); 1743 1744 /* driver loaded, probe complete */ 1745 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1746 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1747 err = iavf_set_mac_type(hw); 1748 if (err) { 1749 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1750 goto err; 1751 } 1752 1753 err = iavf_check_reset_complete(hw); 1754 if (err) { 1755 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1756 err); 1757 goto err; 1758 } 1759 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1760 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1761 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1762 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1763 1764 err = iavf_init_adminq(hw); 1765 if (err) { 1766 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1767 goto err; 1768 } 1769 err = iavf_send_api_ver(adapter); 1770 if (err) { 1771 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1772 iavf_shutdown_adminq(hw); 1773 goto err; 1774 } 1775 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); 1776 return; 1777 err: 1778 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1779 } 1780 1781 /** 1782 * iavf_init_version_check - second step of driver startup 1783 * @adapter: board private structure 1784 * 1785 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1786 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1787 * when fails the state is changed to __IAVF_INIT_FAILED 1788 **/ 1789 static void iavf_init_version_check(struct iavf_adapter *adapter) 1790 { 1791 struct pci_dev *pdev = adapter->pdev; 1792 struct iavf_hw *hw = &adapter->hw; 1793 int err = -EAGAIN; 1794 1795 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1796 1797 if (!iavf_asq_done(hw)) { 1798 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1799 iavf_shutdown_adminq(hw); 1800 iavf_change_state(adapter, __IAVF_STARTUP); 1801 goto err; 1802 } 1803 1804 /* aq msg sent, awaiting reply */ 1805 err = iavf_verify_api_ver(adapter); 1806 if (err) { 1807 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1808 err = iavf_send_api_ver(adapter); 1809 else 1810 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1811 adapter->pf_version.major, 1812 adapter->pf_version.minor, 1813 VIRTCHNL_VERSION_MAJOR, 1814 VIRTCHNL_VERSION_MINOR); 1815 goto err; 1816 } 1817 err = iavf_send_vf_config_msg(adapter); 1818 if (err) { 1819 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1820 err); 1821 goto err; 1822 } 1823 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); 1824 return; 1825 err: 1826 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1827 } 1828 1829 /** 1830 * iavf_init_get_resources - third step of driver startup 1831 * @adapter: board private structure 1832 * 1833 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1834 * finishes driver initialization procedure. 1835 * When success the state is changed to __IAVF_DOWN 1836 * when fails the state is changed to __IAVF_INIT_FAILED 1837 **/ 1838 static void iavf_init_get_resources(struct iavf_adapter *adapter) 1839 { 1840 struct net_device *netdev = adapter->netdev; 1841 struct pci_dev *pdev = adapter->pdev; 1842 struct iavf_hw *hw = &adapter->hw; 1843 int err; 1844 1845 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1846 /* aq msg sent, awaiting reply */ 1847 if (!adapter->vf_res) { 1848 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1849 GFP_KERNEL); 1850 if (!adapter->vf_res) { 1851 err = -ENOMEM; 1852 goto err; 1853 } 1854 } 1855 err = iavf_get_vf_config(adapter); 1856 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1857 err = iavf_send_vf_config_msg(adapter); 1858 goto err; 1859 } else if (err == IAVF_ERR_PARAM) { 1860 /* We only get ERR_PARAM if the device is in a very bad 1861 * state or if we've been disabled for previous bad 1862 * behavior. Either way, we're done now. 1863 */ 1864 iavf_shutdown_adminq(hw); 1865 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1866 return; 1867 } 1868 if (err) { 1869 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1870 goto err_alloc; 1871 } 1872 1873 err = iavf_process_config(adapter); 1874 if (err) 1875 goto err_alloc; 1876 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1877 1878 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1879 1880 netdev->netdev_ops = &iavf_netdev_ops; 1881 iavf_set_ethtool_ops(netdev); 1882 netdev->watchdog_timeo = 5 * HZ; 1883 1884 /* MTU range: 68 - 9710 */ 1885 netdev->min_mtu = ETH_MIN_MTU; 1886 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1887 1888 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1889 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1890 adapter->hw.mac.addr); 1891 eth_hw_addr_random(netdev); 1892 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1893 } else { 1894 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 1895 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1896 } 1897 1898 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1899 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1900 err = iavf_init_interrupt_scheme(adapter); 1901 if (err) 1902 goto err_sw_init; 1903 iavf_map_rings_to_vectors(adapter); 1904 if (adapter->vf_res->vf_cap_flags & 1905 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1906 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1907 1908 err = iavf_request_misc_irq(adapter); 1909 if (err) 1910 goto err_sw_init; 1911 1912 netif_carrier_off(netdev); 1913 adapter->link_up = false; 1914 1915 /* set the semaphore to prevent any callbacks after device registration 1916 * up to time when state of driver will be set to __IAVF_DOWN 1917 */ 1918 rtnl_lock(); 1919 if (!adapter->netdev_registered) { 1920 err = register_netdevice(netdev); 1921 if (err) { 1922 rtnl_unlock(); 1923 goto err_register; 1924 } 1925 } 1926 1927 adapter->netdev_registered = true; 1928 1929 netif_tx_stop_all_queues(netdev); 1930 if (CLIENT_ALLOWED(adapter)) { 1931 err = iavf_lan_add_device(adapter); 1932 if (err) 1933 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1934 err); 1935 } 1936 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1937 if (netdev->features & NETIF_F_GRO) 1938 dev_info(&pdev->dev, "GRO is enabled\n"); 1939 1940 iavf_change_state(adapter, __IAVF_DOWN); 1941 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1942 rtnl_unlock(); 1943 1944 iavf_misc_irq_enable(adapter); 1945 wake_up(&adapter->down_waitqueue); 1946 1947 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1948 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1949 if (!adapter->rss_key || !adapter->rss_lut) { 1950 err = -ENOMEM; 1951 goto err_mem; 1952 } 1953 if (RSS_AQ(adapter)) 1954 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1955 else 1956 iavf_init_rss(adapter); 1957 1958 return; 1959 err_mem: 1960 iavf_free_rss(adapter); 1961 err_register: 1962 iavf_free_misc_irq(adapter); 1963 err_sw_init: 1964 iavf_reset_interrupt_capability(adapter); 1965 err_alloc: 1966 kfree(adapter->vf_res); 1967 adapter->vf_res = NULL; 1968 err: 1969 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1970 } 1971 1972 /** 1973 * iavf_watchdog_task - Periodic call-back task 1974 * @work: pointer to work_struct 1975 **/ 1976 static void iavf_watchdog_task(struct work_struct *work) 1977 { 1978 struct iavf_adapter *adapter = container_of(work, 1979 struct iavf_adapter, 1980 watchdog_task.work); 1981 struct iavf_hw *hw = &adapter->hw; 1982 u32 reg_val; 1983 1984 if (!mutex_trylock(&adapter->crit_lock)) 1985 goto restart_watchdog; 1986 1987 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1988 iavf_change_state(adapter, __IAVF_COMM_FAILED); 1989 1990 if (adapter->flags & IAVF_FLAG_RESET_NEEDED && 1991 adapter->state != __IAVF_RESETTING) { 1992 iavf_change_state(adapter, __IAVF_RESETTING); 1993 adapter->aq_required = 0; 1994 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1995 } 1996 1997 switch (adapter->state) { 1998 case __IAVF_STARTUP: 1999 iavf_startup(adapter); 2000 mutex_unlock(&adapter->crit_lock); 2001 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2002 msecs_to_jiffies(30)); 2003 return; 2004 case __IAVF_INIT_VERSION_CHECK: 2005 iavf_init_version_check(adapter); 2006 mutex_unlock(&adapter->crit_lock); 2007 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2008 msecs_to_jiffies(30)); 2009 return; 2010 case __IAVF_INIT_GET_RESOURCES: 2011 iavf_init_get_resources(adapter); 2012 mutex_unlock(&adapter->crit_lock); 2013 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2014 msecs_to_jiffies(1)); 2015 return; 2016 case __IAVF_INIT_FAILED: 2017 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 2018 dev_err(&adapter->pdev->dev, 2019 "Failed to communicate with PF; waiting before retry\n"); 2020 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2021 iavf_shutdown_adminq(hw); 2022 mutex_unlock(&adapter->crit_lock); 2023 queue_delayed_work(iavf_wq, 2024 &adapter->watchdog_task, (5 * HZ)); 2025 return; 2026 } 2027 /* Try again from failed step*/ 2028 iavf_change_state(adapter, adapter->last_state); 2029 mutex_unlock(&adapter->crit_lock); 2030 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); 2031 return; 2032 case __IAVF_COMM_FAILED: 2033 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2034 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2035 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 2036 reg_val == VIRTCHNL_VFR_COMPLETED) { 2037 /* A chance for redemption! */ 2038 dev_err(&adapter->pdev->dev, 2039 "Hardware came out of reset. Attempting reinit.\n"); 2040 /* When init task contacts the PF and 2041 * gets everything set up again, it'll restart the 2042 * watchdog for us. Down, boy. Sit. Stay. Woof. 2043 */ 2044 iavf_change_state(adapter, __IAVF_STARTUP); 2045 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2046 } 2047 adapter->aq_required = 0; 2048 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2049 mutex_unlock(&adapter->crit_lock); 2050 queue_delayed_work(iavf_wq, 2051 &adapter->watchdog_task, 2052 msecs_to_jiffies(10)); 2053 return; 2054 case __IAVF_RESETTING: 2055 mutex_unlock(&adapter->crit_lock); 2056 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2057 return; 2058 case __IAVF_DOWN: 2059 case __IAVF_DOWN_PENDING: 2060 case __IAVF_TESTING: 2061 case __IAVF_RUNNING: 2062 if (adapter->current_op) { 2063 if (!iavf_asq_done(hw)) { 2064 dev_dbg(&adapter->pdev->dev, 2065 "Admin queue timeout\n"); 2066 iavf_send_api_ver(adapter); 2067 } 2068 } else { 2069 /* An error will be returned if no commands were 2070 * processed; use this opportunity to update stats 2071 */ 2072 if (iavf_process_aq_command(adapter) && 2073 adapter->state == __IAVF_RUNNING) 2074 iavf_request_stats(adapter); 2075 } 2076 if (adapter->state == __IAVF_RUNNING) 2077 iavf_detect_recover_hung(&adapter->vsi); 2078 break; 2079 case __IAVF_REMOVE: 2080 default: 2081 mutex_unlock(&adapter->crit_lock); 2082 return; 2083 } 2084 2085 /* check for hw reset */ 2086 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2087 if (!reg_val) { 2088 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2089 adapter->aq_required = 0; 2090 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2091 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2092 queue_work(iavf_wq, &adapter->reset_task); 2093 mutex_unlock(&adapter->crit_lock); 2094 queue_delayed_work(iavf_wq, 2095 &adapter->watchdog_task, HZ * 2); 2096 return; 2097 } 2098 2099 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2100 mutex_unlock(&adapter->crit_lock); 2101 restart_watchdog: 2102 queue_work(iavf_wq, &adapter->adminq_task); 2103 if (adapter->aq_required) 2104 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2105 msecs_to_jiffies(20)); 2106 else 2107 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2108 } 2109 2110 static void iavf_disable_vf(struct iavf_adapter *adapter) 2111 { 2112 struct iavf_mac_filter *f, *ftmp; 2113 struct iavf_vlan_filter *fv, *fvtmp; 2114 struct iavf_cloud_filter *cf, *cftmp; 2115 2116 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2117 2118 /* We don't use netif_running() because it may be true prior to 2119 * ndo_open() returning, so we can't assume it means all our open 2120 * tasks have finished, since we're not holding the rtnl_lock here. 2121 */ 2122 if (adapter->state == __IAVF_RUNNING) { 2123 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2124 netif_carrier_off(adapter->netdev); 2125 netif_tx_disable(adapter->netdev); 2126 adapter->link_up = false; 2127 iavf_napi_disable_all(adapter); 2128 iavf_irq_disable(adapter); 2129 iavf_free_traffic_irqs(adapter); 2130 iavf_free_all_tx_resources(adapter); 2131 iavf_free_all_rx_resources(adapter); 2132 } 2133 2134 spin_lock_bh(&adapter->mac_vlan_list_lock); 2135 2136 /* Delete all of the filters */ 2137 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2138 list_del(&f->list); 2139 kfree(f); 2140 } 2141 2142 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2143 list_del(&fv->list); 2144 kfree(fv); 2145 } 2146 2147 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2148 2149 spin_lock_bh(&adapter->cloud_filter_list_lock); 2150 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2151 list_del(&cf->list); 2152 kfree(cf); 2153 adapter->num_cloud_filters--; 2154 } 2155 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2156 2157 iavf_free_misc_irq(adapter); 2158 iavf_reset_interrupt_capability(adapter); 2159 iavf_free_q_vectors(adapter); 2160 iavf_free_queues(adapter); 2161 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2162 iavf_shutdown_adminq(&adapter->hw); 2163 adapter->netdev->flags &= ~IFF_UP; 2164 mutex_unlock(&adapter->crit_lock); 2165 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2166 iavf_change_state(adapter, __IAVF_DOWN); 2167 wake_up(&adapter->down_waitqueue); 2168 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2169 } 2170 2171 /** 2172 * iavf_reset_task - Call-back task to handle hardware reset 2173 * @work: pointer to work_struct 2174 * 2175 * During reset we need to shut down and reinitialize the admin queue 2176 * before we can use it to communicate with the PF again. We also clear 2177 * and reinit the rings because that context is lost as well. 2178 **/ 2179 static void iavf_reset_task(struct work_struct *work) 2180 { 2181 struct iavf_adapter *adapter = container_of(work, 2182 struct iavf_adapter, 2183 reset_task); 2184 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2185 struct net_device *netdev = adapter->netdev; 2186 struct iavf_hw *hw = &adapter->hw; 2187 struct iavf_mac_filter *f, *ftmp; 2188 struct iavf_cloud_filter *cf; 2189 u32 reg_val; 2190 int i = 0, err; 2191 bool running; 2192 2193 /* When device is being removed it doesn't make sense to run the reset 2194 * task, just return in such a case. 2195 */ 2196 if (mutex_is_locked(&adapter->remove_lock)) 2197 return; 2198 2199 if (iavf_lock_timeout(&adapter->crit_lock, 200)) { 2200 schedule_work(&adapter->reset_task); 2201 return; 2202 } 2203 while (!mutex_trylock(&adapter->client_lock)) 2204 usleep_range(500, 1000); 2205 if (CLIENT_ENABLED(adapter)) { 2206 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2207 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2208 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2209 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2210 cancel_delayed_work_sync(&adapter->client_task); 2211 iavf_notify_client_close(&adapter->vsi, true); 2212 } 2213 iavf_misc_irq_disable(adapter); 2214 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2215 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2216 /* Restart the AQ here. If we have been reset but didn't 2217 * detect it, or if the PF had to reinit, our AQ will be hosed. 2218 */ 2219 iavf_shutdown_adminq(hw); 2220 iavf_init_adminq(hw); 2221 iavf_request_reset(adapter); 2222 } 2223 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2224 2225 /* poll until we see the reset actually happen */ 2226 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2227 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2228 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2229 if (!reg_val) 2230 break; 2231 usleep_range(5000, 10000); 2232 } 2233 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2234 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2235 goto continue_reset; /* act like the reset happened */ 2236 } 2237 2238 /* wait until the reset is complete and the PF is responding to us */ 2239 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2240 /* sleep first to make sure a minimum wait time is met */ 2241 msleep(IAVF_RESET_WAIT_MS); 2242 2243 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2244 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2245 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2246 break; 2247 } 2248 2249 pci_set_master(adapter->pdev); 2250 pci_restore_msi_state(adapter->pdev); 2251 2252 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2253 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2254 reg_val); 2255 iavf_disable_vf(adapter); 2256 mutex_unlock(&adapter->client_lock); 2257 return; /* Do not attempt to reinit. It's dead, Jim. */ 2258 } 2259 2260 continue_reset: 2261 /* We don't use netif_running() because it may be true prior to 2262 * ndo_open() returning, so we can't assume it means all our open 2263 * tasks have finished, since we're not holding the rtnl_lock here. 2264 */ 2265 running = ((adapter->state == __IAVF_RUNNING) || 2266 (adapter->state == __IAVF_RESETTING)); 2267 2268 if (running) { 2269 netdev->flags &= ~IFF_UP; 2270 netif_carrier_off(netdev); 2271 netif_tx_stop_all_queues(netdev); 2272 adapter->link_up = false; 2273 iavf_napi_disable_all(adapter); 2274 } 2275 iavf_irq_disable(adapter); 2276 2277 iavf_change_state(adapter, __IAVF_RESETTING); 2278 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2279 2280 /* free the Tx/Rx rings and descriptors, might be better to just 2281 * re-use them sometime in the future 2282 */ 2283 iavf_free_all_rx_resources(adapter); 2284 iavf_free_all_tx_resources(adapter); 2285 2286 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2287 /* kill and reinit the admin queue */ 2288 iavf_shutdown_adminq(hw); 2289 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2290 err = iavf_init_adminq(hw); 2291 if (err) 2292 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2293 err); 2294 adapter->aq_required = 0; 2295 2296 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2297 err = iavf_reinit_interrupt_scheme(adapter); 2298 if (err) 2299 goto reset_err; 2300 } 2301 2302 if (RSS_AQ(adapter)) { 2303 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2304 } else { 2305 err = iavf_init_rss(adapter); 2306 if (err) 2307 goto reset_err; 2308 } 2309 2310 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2311 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2312 2313 spin_lock_bh(&adapter->mac_vlan_list_lock); 2314 2315 /* Delete filter for the current MAC address, it could have 2316 * been changed by the PF via administratively set MAC. 2317 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2318 */ 2319 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2320 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2321 list_del(&f->list); 2322 kfree(f); 2323 } 2324 } 2325 /* re-add all MAC filters */ 2326 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2327 f->add = true; 2328 } 2329 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2330 2331 /* check if TCs are running and re-add all cloud filters */ 2332 spin_lock_bh(&adapter->cloud_filter_list_lock); 2333 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2334 adapter->num_tc) { 2335 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2336 cf->add = true; 2337 } 2338 } 2339 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2340 2341 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2342 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2343 iavf_misc_irq_enable(adapter); 2344 2345 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2346 2347 /* We were running when the reset started, so we need to restore some 2348 * state here. 2349 */ 2350 if (running) { 2351 /* allocate transmit descriptors */ 2352 err = iavf_setup_all_tx_resources(adapter); 2353 if (err) 2354 goto reset_err; 2355 2356 /* allocate receive descriptors */ 2357 err = iavf_setup_all_rx_resources(adapter); 2358 if (err) 2359 goto reset_err; 2360 2361 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2362 err = iavf_request_traffic_irqs(adapter, netdev->name); 2363 if (err) 2364 goto reset_err; 2365 2366 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2367 } 2368 2369 iavf_configure(adapter); 2370 2371 /* iavf_up_complete() will switch device back 2372 * to __IAVF_RUNNING 2373 */ 2374 iavf_up_complete(adapter); 2375 netdev->flags |= IFF_UP; 2376 iavf_irq_enable(adapter, true); 2377 } else { 2378 iavf_change_state(adapter, __IAVF_DOWN); 2379 wake_up(&adapter->down_waitqueue); 2380 } 2381 mutex_unlock(&adapter->client_lock); 2382 mutex_unlock(&adapter->crit_lock); 2383 2384 return; 2385 reset_err: 2386 mutex_unlock(&adapter->client_lock); 2387 mutex_unlock(&adapter->crit_lock); 2388 if (running) { 2389 iavf_change_state(adapter, __IAVF_RUNNING); 2390 netdev->flags |= IFF_UP; 2391 } 2392 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2393 iavf_close(netdev); 2394 } 2395 2396 /** 2397 * iavf_adminq_task - worker thread to clean the admin queue 2398 * @work: pointer to work_struct containing our data 2399 **/ 2400 static void iavf_adminq_task(struct work_struct *work) 2401 { 2402 struct iavf_adapter *adapter = 2403 container_of(work, struct iavf_adapter, adminq_task); 2404 struct iavf_hw *hw = &adapter->hw; 2405 struct iavf_arq_event_info event; 2406 enum virtchnl_ops v_op; 2407 enum iavf_status ret, v_ret; 2408 u32 val, oldval; 2409 u16 pending; 2410 2411 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2412 goto out; 2413 2414 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2415 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2416 if (!event.msg_buf) 2417 goto out; 2418 2419 if (iavf_lock_timeout(&adapter->crit_lock, 200)) 2420 goto freedom; 2421 do { 2422 ret = iavf_clean_arq_element(hw, &event, &pending); 2423 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2424 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2425 2426 if (ret || !v_op) 2427 break; /* No event to process or error cleaning ARQ */ 2428 2429 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2430 event.msg_len); 2431 if (pending != 0) 2432 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2433 } while (pending); 2434 mutex_unlock(&adapter->crit_lock); 2435 2436 if ((adapter->flags & 2437 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2438 adapter->state == __IAVF_RESETTING) 2439 goto freedom; 2440 2441 /* check for error indications */ 2442 val = rd32(hw, hw->aq.arq.len); 2443 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ 2444 goto freedom; 2445 oldval = val; 2446 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2447 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2448 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2449 } 2450 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2451 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2452 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2453 } 2454 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2455 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2456 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2457 } 2458 if (oldval != val) 2459 wr32(hw, hw->aq.arq.len, val); 2460 2461 val = rd32(hw, hw->aq.asq.len); 2462 oldval = val; 2463 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2464 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2465 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2466 } 2467 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2468 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2469 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2470 } 2471 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2472 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2473 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2474 } 2475 if (oldval != val) 2476 wr32(hw, hw->aq.asq.len, val); 2477 2478 freedom: 2479 kfree(event.msg_buf); 2480 out: 2481 /* re-enable Admin queue interrupt cause */ 2482 iavf_misc_irq_enable(adapter); 2483 } 2484 2485 /** 2486 * iavf_client_task - worker thread to perform client work 2487 * @work: pointer to work_struct containing our data 2488 * 2489 * This task handles client interactions. Because client calls can be 2490 * reentrant, we can't handle them in the watchdog. 2491 **/ 2492 static void iavf_client_task(struct work_struct *work) 2493 { 2494 struct iavf_adapter *adapter = 2495 container_of(work, struct iavf_adapter, client_task.work); 2496 2497 /* If we can't get the client bit, just give up. We'll be rescheduled 2498 * later. 2499 */ 2500 2501 if (!mutex_trylock(&adapter->client_lock)) 2502 return; 2503 2504 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2505 iavf_client_subtask(adapter); 2506 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2507 goto out; 2508 } 2509 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2510 iavf_notify_client_l2_params(&adapter->vsi); 2511 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2512 goto out; 2513 } 2514 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2515 iavf_notify_client_close(&adapter->vsi, false); 2516 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2517 goto out; 2518 } 2519 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2520 iavf_notify_client_open(&adapter->vsi); 2521 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2522 } 2523 out: 2524 mutex_unlock(&adapter->client_lock); 2525 } 2526 2527 /** 2528 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2529 * @adapter: board private structure 2530 * 2531 * Free all transmit software resources 2532 **/ 2533 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2534 { 2535 int i; 2536 2537 if (!adapter->tx_rings) 2538 return; 2539 2540 for (i = 0; i < adapter->num_active_queues; i++) 2541 if (adapter->tx_rings[i].desc) 2542 iavf_free_tx_resources(&adapter->tx_rings[i]); 2543 } 2544 2545 /** 2546 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2547 * @adapter: board private structure 2548 * 2549 * If this function returns with an error, then it's possible one or 2550 * more of the rings is populated (while the rest are not). It is the 2551 * callers duty to clean those orphaned rings. 2552 * 2553 * Return 0 on success, negative on failure 2554 **/ 2555 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2556 { 2557 int i, err = 0; 2558 2559 for (i = 0; i < adapter->num_active_queues; i++) { 2560 adapter->tx_rings[i].count = adapter->tx_desc_count; 2561 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2562 if (!err) 2563 continue; 2564 dev_err(&adapter->pdev->dev, 2565 "Allocation for Tx Queue %u failed\n", i); 2566 break; 2567 } 2568 2569 return err; 2570 } 2571 2572 /** 2573 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2574 * @adapter: board private structure 2575 * 2576 * If this function returns with an error, then it's possible one or 2577 * more of the rings is populated (while the rest are not). It is the 2578 * callers duty to clean those orphaned rings. 2579 * 2580 * Return 0 on success, negative on failure 2581 **/ 2582 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2583 { 2584 int i, err = 0; 2585 2586 for (i = 0; i < adapter->num_active_queues; i++) { 2587 adapter->rx_rings[i].count = adapter->rx_desc_count; 2588 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2589 if (!err) 2590 continue; 2591 dev_err(&adapter->pdev->dev, 2592 "Allocation for Rx Queue %u failed\n", i); 2593 break; 2594 } 2595 return err; 2596 } 2597 2598 /** 2599 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2600 * @adapter: board private structure 2601 * 2602 * Free all receive software resources 2603 **/ 2604 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2605 { 2606 int i; 2607 2608 if (!adapter->rx_rings) 2609 return; 2610 2611 for (i = 0; i < adapter->num_active_queues; i++) 2612 if (adapter->rx_rings[i].desc) 2613 iavf_free_rx_resources(&adapter->rx_rings[i]); 2614 } 2615 2616 /** 2617 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2618 * @adapter: board private structure 2619 * @max_tx_rate: max Tx bw for a tc 2620 **/ 2621 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2622 u64 max_tx_rate) 2623 { 2624 int speed = 0, ret = 0; 2625 2626 if (ADV_LINK_SUPPORT(adapter)) { 2627 if (adapter->link_speed_mbps < U32_MAX) { 2628 speed = adapter->link_speed_mbps; 2629 goto validate_bw; 2630 } else { 2631 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2632 return -EINVAL; 2633 } 2634 } 2635 2636 switch (adapter->link_speed) { 2637 case VIRTCHNL_LINK_SPEED_40GB: 2638 speed = SPEED_40000; 2639 break; 2640 case VIRTCHNL_LINK_SPEED_25GB: 2641 speed = SPEED_25000; 2642 break; 2643 case VIRTCHNL_LINK_SPEED_20GB: 2644 speed = SPEED_20000; 2645 break; 2646 case VIRTCHNL_LINK_SPEED_10GB: 2647 speed = SPEED_10000; 2648 break; 2649 case VIRTCHNL_LINK_SPEED_5GB: 2650 speed = SPEED_5000; 2651 break; 2652 case VIRTCHNL_LINK_SPEED_2_5GB: 2653 speed = SPEED_2500; 2654 break; 2655 case VIRTCHNL_LINK_SPEED_1GB: 2656 speed = SPEED_1000; 2657 break; 2658 case VIRTCHNL_LINK_SPEED_100MB: 2659 speed = SPEED_100; 2660 break; 2661 default: 2662 break; 2663 } 2664 2665 validate_bw: 2666 if (max_tx_rate > speed) { 2667 dev_err(&adapter->pdev->dev, 2668 "Invalid tx rate specified\n"); 2669 ret = -EINVAL; 2670 } 2671 2672 return ret; 2673 } 2674 2675 /** 2676 * iavf_validate_ch_config - validate queue mapping info 2677 * @adapter: board private structure 2678 * @mqprio_qopt: queue parameters 2679 * 2680 * This function validates if the config provided by the user to 2681 * configure queue channels is valid or not. Returns 0 on a valid 2682 * config. 2683 **/ 2684 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2685 struct tc_mqprio_qopt_offload *mqprio_qopt) 2686 { 2687 u64 total_max_rate = 0; 2688 int i, num_qps = 0; 2689 u64 tx_rate = 0; 2690 int ret = 0; 2691 2692 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2693 mqprio_qopt->qopt.num_tc < 1) 2694 return -EINVAL; 2695 2696 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2697 if (!mqprio_qopt->qopt.count[i] || 2698 mqprio_qopt->qopt.offset[i] != num_qps) 2699 return -EINVAL; 2700 if (mqprio_qopt->min_rate[i]) { 2701 dev_err(&adapter->pdev->dev, 2702 "Invalid min tx rate (greater than 0) specified\n"); 2703 return -EINVAL; 2704 } 2705 /*convert to Mbps */ 2706 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2707 IAVF_MBPS_DIVISOR); 2708 total_max_rate += tx_rate; 2709 num_qps += mqprio_qopt->qopt.count[i]; 2710 } 2711 if (num_qps > adapter->num_active_queues) { 2712 dev_err(&adapter->pdev->dev, 2713 "Cannot support requested number of queues\n"); 2714 return -EINVAL; 2715 } 2716 2717 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2718 return ret; 2719 } 2720 2721 /** 2722 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2723 * @adapter: board private structure 2724 **/ 2725 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2726 { 2727 struct iavf_cloud_filter *cf, *cftmp; 2728 2729 spin_lock_bh(&adapter->cloud_filter_list_lock); 2730 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2731 list) { 2732 list_del(&cf->list); 2733 kfree(cf); 2734 adapter->num_cloud_filters--; 2735 } 2736 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2737 } 2738 2739 /** 2740 * __iavf_setup_tc - configure multiple traffic classes 2741 * @netdev: network interface device structure 2742 * @type_data: tc offload data 2743 * 2744 * This function processes the config information provided by the 2745 * user to configure traffic classes/queue channels and packages the 2746 * information to request the PF to setup traffic classes. 2747 * 2748 * Returns 0 on success. 2749 **/ 2750 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2751 { 2752 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2753 struct iavf_adapter *adapter = netdev_priv(netdev); 2754 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2755 u8 num_tc = 0, total_qps = 0; 2756 int ret = 0, netdev_tc = 0; 2757 u64 max_tx_rate; 2758 u16 mode; 2759 int i; 2760 2761 num_tc = mqprio_qopt->qopt.num_tc; 2762 mode = mqprio_qopt->mode; 2763 2764 /* delete queue_channel */ 2765 if (!mqprio_qopt->qopt.hw) { 2766 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2767 /* reset the tc configuration */ 2768 netdev_reset_tc(netdev); 2769 adapter->num_tc = 0; 2770 netif_tx_stop_all_queues(netdev); 2771 netif_tx_disable(netdev); 2772 iavf_del_all_cloud_filters(adapter); 2773 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2774 goto exit; 2775 } else { 2776 return -EINVAL; 2777 } 2778 } 2779 2780 /* add queue channel */ 2781 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2782 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2783 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2784 return -EOPNOTSUPP; 2785 } 2786 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2787 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2788 return -EINVAL; 2789 } 2790 2791 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2792 if (ret) 2793 return ret; 2794 /* Return if same TC config is requested */ 2795 if (adapter->num_tc == num_tc) 2796 return 0; 2797 adapter->num_tc = num_tc; 2798 2799 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2800 if (i < num_tc) { 2801 adapter->ch_config.ch_info[i].count = 2802 mqprio_qopt->qopt.count[i]; 2803 adapter->ch_config.ch_info[i].offset = 2804 mqprio_qopt->qopt.offset[i]; 2805 total_qps += mqprio_qopt->qopt.count[i]; 2806 max_tx_rate = mqprio_qopt->max_rate[i]; 2807 /* convert to Mbps */ 2808 max_tx_rate = div_u64(max_tx_rate, 2809 IAVF_MBPS_DIVISOR); 2810 adapter->ch_config.ch_info[i].max_tx_rate = 2811 max_tx_rate; 2812 } else { 2813 adapter->ch_config.ch_info[i].count = 1; 2814 adapter->ch_config.ch_info[i].offset = 0; 2815 } 2816 } 2817 adapter->ch_config.total_qps = total_qps; 2818 netif_tx_stop_all_queues(netdev); 2819 netif_tx_disable(netdev); 2820 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2821 netdev_reset_tc(netdev); 2822 /* Report the tc mapping up the stack */ 2823 netdev_set_num_tc(adapter->netdev, num_tc); 2824 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2825 u16 qcount = mqprio_qopt->qopt.count[i]; 2826 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2827 2828 if (i < num_tc) 2829 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2830 qoffset); 2831 } 2832 } 2833 exit: 2834 return ret; 2835 } 2836 2837 /** 2838 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2839 * @adapter: board private structure 2840 * @f: pointer to struct flow_cls_offload 2841 * @filter: pointer to cloud filter structure 2842 */ 2843 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2844 struct flow_cls_offload *f, 2845 struct iavf_cloud_filter *filter) 2846 { 2847 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2848 struct flow_dissector *dissector = rule->match.dissector; 2849 u16 n_proto_mask = 0; 2850 u16 n_proto_key = 0; 2851 u8 field_flags = 0; 2852 u16 addr_type = 0; 2853 u16 n_proto = 0; 2854 int i = 0; 2855 struct virtchnl_filter *vf = &filter->f; 2856 2857 if (dissector->used_keys & 2858 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2859 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2860 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2861 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2862 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2863 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2864 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2865 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2866 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2867 dissector->used_keys); 2868 return -EOPNOTSUPP; 2869 } 2870 2871 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2872 struct flow_match_enc_keyid match; 2873 2874 flow_rule_match_enc_keyid(rule, &match); 2875 if (match.mask->keyid != 0) 2876 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2877 } 2878 2879 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2880 struct flow_match_basic match; 2881 2882 flow_rule_match_basic(rule, &match); 2883 n_proto_key = ntohs(match.key->n_proto); 2884 n_proto_mask = ntohs(match.mask->n_proto); 2885 2886 if (n_proto_key == ETH_P_ALL) { 2887 n_proto_key = 0; 2888 n_proto_mask = 0; 2889 } 2890 n_proto = n_proto_key & n_proto_mask; 2891 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2892 return -EINVAL; 2893 if (n_proto == ETH_P_IPV6) { 2894 /* specify flow type as TCP IPv6 */ 2895 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2896 } 2897 2898 if (match.key->ip_proto != IPPROTO_TCP) { 2899 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2900 return -EINVAL; 2901 } 2902 } 2903 2904 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2905 struct flow_match_eth_addrs match; 2906 2907 flow_rule_match_eth_addrs(rule, &match); 2908 2909 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2910 if (!is_zero_ether_addr(match.mask->dst)) { 2911 if (is_broadcast_ether_addr(match.mask->dst)) { 2912 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2913 } else { 2914 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2915 match.mask->dst); 2916 return IAVF_ERR_CONFIG; 2917 } 2918 } 2919 2920 if (!is_zero_ether_addr(match.mask->src)) { 2921 if (is_broadcast_ether_addr(match.mask->src)) { 2922 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2923 } else { 2924 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2925 match.mask->src); 2926 return IAVF_ERR_CONFIG; 2927 } 2928 } 2929 2930 if (!is_zero_ether_addr(match.key->dst)) 2931 if (is_valid_ether_addr(match.key->dst) || 2932 is_multicast_ether_addr(match.key->dst)) { 2933 /* set the mask if a valid dst_mac address */ 2934 for (i = 0; i < ETH_ALEN; i++) 2935 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2936 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2937 match.key->dst); 2938 } 2939 2940 if (!is_zero_ether_addr(match.key->src)) 2941 if (is_valid_ether_addr(match.key->src) || 2942 is_multicast_ether_addr(match.key->src)) { 2943 /* set the mask if a valid dst_mac address */ 2944 for (i = 0; i < ETH_ALEN; i++) 2945 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2946 ether_addr_copy(vf->data.tcp_spec.src_mac, 2947 match.key->src); 2948 } 2949 } 2950 2951 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2952 struct flow_match_vlan match; 2953 2954 flow_rule_match_vlan(rule, &match); 2955 if (match.mask->vlan_id) { 2956 if (match.mask->vlan_id == VLAN_VID_MASK) { 2957 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2958 } else { 2959 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2960 match.mask->vlan_id); 2961 return IAVF_ERR_CONFIG; 2962 } 2963 } 2964 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2965 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2966 } 2967 2968 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2969 struct flow_match_control match; 2970 2971 flow_rule_match_control(rule, &match); 2972 addr_type = match.key->addr_type; 2973 } 2974 2975 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2976 struct flow_match_ipv4_addrs match; 2977 2978 flow_rule_match_ipv4_addrs(rule, &match); 2979 if (match.mask->dst) { 2980 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2981 field_flags |= IAVF_CLOUD_FIELD_IIP; 2982 } else { 2983 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2984 be32_to_cpu(match.mask->dst)); 2985 return IAVF_ERR_CONFIG; 2986 } 2987 } 2988 2989 if (match.mask->src) { 2990 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2991 field_flags |= IAVF_CLOUD_FIELD_IIP; 2992 } else { 2993 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2994 be32_to_cpu(match.mask->dst)); 2995 return IAVF_ERR_CONFIG; 2996 } 2997 } 2998 2999 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 3000 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 3001 return IAVF_ERR_CONFIG; 3002 } 3003 if (match.key->dst) { 3004 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 3005 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 3006 } 3007 if (match.key->src) { 3008 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 3009 vf->data.tcp_spec.src_ip[0] = match.key->src; 3010 } 3011 } 3012 3013 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 3014 struct flow_match_ipv6_addrs match; 3015 3016 flow_rule_match_ipv6_addrs(rule, &match); 3017 3018 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 3019 if (ipv6_addr_any(&match.mask->dst)) { 3020 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 3021 IPV6_ADDR_ANY); 3022 return IAVF_ERR_CONFIG; 3023 } 3024 3025 /* src and dest IPv6 address should not be LOOPBACK 3026 * (0:0:0:0:0:0:0:1) which can be represented as ::1 3027 */ 3028 if (ipv6_addr_loopback(&match.key->dst) || 3029 ipv6_addr_loopback(&match.key->src)) { 3030 dev_err(&adapter->pdev->dev, 3031 "ipv6 addr should not be loopback\n"); 3032 return IAVF_ERR_CONFIG; 3033 } 3034 if (!ipv6_addr_any(&match.mask->dst) || 3035 !ipv6_addr_any(&match.mask->src)) 3036 field_flags |= IAVF_CLOUD_FIELD_IIP; 3037 3038 for (i = 0; i < 4; i++) 3039 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 3040 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 3041 sizeof(vf->data.tcp_spec.dst_ip)); 3042 for (i = 0; i < 4; i++) 3043 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 3044 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 3045 sizeof(vf->data.tcp_spec.src_ip)); 3046 } 3047 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 3048 struct flow_match_ports match; 3049 3050 flow_rule_match_ports(rule, &match); 3051 if (match.mask->src) { 3052 if (match.mask->src == cpu_to_be16(0xffff)) { 3053 field_flags |= IAVF_CLOUD_FIELD_IIP; 3054 } else { 3055 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 3056 be16_to_cpu(match.mask->src)); 3057 return IAVF_ERR_CONFIG; 3058 } 3059 } 3060 3061 if (match.mask->dst) { 3062 if (match.mask->dst == cpu_to_be16(0xffff)) { 3063 field_flags |= IAVF_CLOUD_FIELD_IIP; 3064 } else { 3065 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 3066 be16_to_cpu(match.mask->dst)); 3067 return IAVF_ERR_CONFIG; 3068 } 3069 } 3070 if (match.key->dst) { 3071 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 3072 vf->data.tcp_spec.dst_port = match.key->dst; 3073 } 3074 3075 if (match.key->src) { 3076 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 3077 vf->data.tcp_spec.src_port = match.key->src; 3078 } 3079 } 3080 vf->field_flags = field_flags; 3081 3082 return 0; 3083 } 3084 3085 /** 3086 * iavf_handle_tclass - Forward to a traffic class on the device 3087 * @adapter: board private structure 3088 * @tc: traffic class index on the device 3089 * @filter: pointer to cloud filter structure 3090 */ 3091 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3092 struct iavf_cloud_filter *filter) 3093 { 3094 if (tc == 0) 3095 return 0; 3096 if (tc < adapter->num_tc) { 3097 if (!filter->f.data.tcp_spec.dst_port) { 3098 dev_err(&adapter->pdev->dev, 3099 "Specify destination port to redirect to traffic class other than TC0\n"); 3100 return -EINVAL; 3101 } 3102 } 3103 /* redirect to a traffic class on the same device */ 3104 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3105 filter->f.action_meta = tc; 3106 return 0; 3107 } 3108 3109 /** 3110 * iavf_configure_clsflower - Add tc flower filters 3111 * @adapter: board private structure 3112 * @cls_flower: Pointer to struct flow_cls_offload 3113 */ 3114 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3115 struct flow_cls_offload *cls_flower) 3116 { 3117 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3118 struct iavf_cloud_filter *filter = NULL; 3119 int err = -EINVAL, count = 50; 3120 3121 if (tc < 0) { 3122 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3123 return -EINVAL; 3124 } 3125 3126 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3127 if (!filter) 3128 return -ENOMEM; 3129 3130 while (!mutex_trylock(&adapter->crit_lock)) { 3131 if (--count == 0) { 3132 kfree(filter); 3133 return err; 3134 } 3135 udelay(1); 3136 } 3137 3138 filter->cookie = cls_flower->cookie; 3139 3140 /* set the mask to all zeroes to begin with */ 3141 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3142 /* start out with flow type and eth type IPv4 to begin with */ 3143 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3144 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3145 if (err) 3146 goto err; 3147 3148 err = iavf_handle_tclass(adapter, tc, filter); 3149 if (err) 3150 goto err; 3151 3152 /* add filter to the list */ 3153 spin_lock_bh(&adapter->cloud_filter_list_lock); 3154 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3155 adapter->num_cloud_filters++; 3156 filter->add = true; 3157 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3158 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3159 err: 3160 if (err) 3161 kfree(filter); 3162 3163 mutex_unlock(&adapter->crit_lock); 3164 return err; 3165 } 3166 3167 /* iavf_find_cf - Find the cloud filter in the list 3168 * @adapter: Board private structure 3169 * @cookie: filter specific cookie 3170 * 3171 * Returns ptr to the filter object or NULL. Must be called while holding the 3172 * cloud_filter_list_lock. 3173 */ 3174 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3175 unsigned long *cookie) 3176 { 3177 struct iavf_cloud_filter *filter = NULL; 3178 3179 if (!cookie) 3180 return NULL; 3181 3182 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3183 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3184 return filter; 3185 } 3186 return NULL; 3187 } 3188 3189 /** 3190 * iavf_delete_clsflower - Remove tc flower filters 3191 * @adapter: board private structure 3192 * @cls_flower: Pointer to struct flow_cls_offload 3193 */ 3194 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3195 struct flow_cls_offload *cls_flower) 3196 { 3197 struct iavf_cloud_filter *filter = NULL; 3198 int err = 0; 3199 3200 spin_lock_bh(&adapter->cloud_filter_list_lock); 3201 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3202 if (filter) { 3203 filter->del = true; 3204 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3205 } else { 3206 err = -EINVAL; 3207 } 3208 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3209 3210 return err; 3211 } 3212 3213 /** 3214 * iavf_setup_tc_cls_flower - flower classifier offloads 3215 * @adapter: board private structure 3216 * @cls_flower: pointer to flow_cls_offload struct with flow info 3217 */ 3218 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3219 struct flow_cls_offload *cls_flower) 3220 { 3221 switch (cls_flower->command) { 3222 case FLOW_CLS_REPLACE: 3223 return iavf_configure_clsflower(adapter, cls_flower); 3224 case FLOW_CLS_DESTROY: 3225 return iavf_delete_clsflower(adapter, cls_flower); 3226 case FLOW_CLS_STATS: 3227 return -EOPNOTSUPP; 3228 default: 3229 return -EOPNOTSUPP; 3230 } 3231 } 3232 3233 /** 3234 * iavf_setup_tc_block_cb - block callback for tc 3235 * @type: type of offload 3236 * @type_data: offload data 3237 * @cb_priv: 3238 * 3239 * This function is the block callback for traffic classes 3240 **/ 3241 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3242 void *cb_priv) 3243 { 3244 struct iavf_adapter *adapter = cb_priv; 3245 3246 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3247 return -EOPNOTSUPP; 3248 3249 switch (type) { 3250 case TC_SETUP_CLSFLOWER: 3251 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3252 default: 3253 return -EOPNOTSUPP; 3254 } 3255 } 3256 3257 static LIST_HEAD(iavf_block_cb_list); 3258 3259 /** 3260 * iavf_setup_tc - configure multiple traffic classes 3261 * @netdev: network interface device structure 3262 * @type: type of offload 3263 * @type_data: tc offload data 3264 * 3265 * This function is the callback to ndo_setup_tc in the 3266 * netdev_ops. 3267 * 3268 * Returns 0 on success 3269 **/ 3270 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3271 void *type_data) 3272 { 3273 struct iavf_adapter *adapter = netdev_priv(netdev); 3274 3275 switch (type) { 3276 case TC_SETUP_QDISC_MQPRIO: 3277 return __iavf_setup_tc(netdev, type_data); 3278 case TC_SETUP_BLOCK: 3279 return flow_block_cb_setup_simple(type_data, 3280 &iavf_block_cb_list, 3281 iavf_setup_tc_block_cb, 3282 adapter, adapter, true); 3283 default: 3284 return -EOPNOTSUPP; 3285 } 3286 } 3287 3288 /** 3289 * iavf_open - Called when a network interface is made active 3290 * @netdev: network interface device structure 3291 * 3292 * Returns 0 on success, negative value on failure 3293 * 3294 * The open entry point is called when a network interface is made 3295 * active by the system (IFF_UP). At this point all resources needed 3296 * for transmit and receive operations are allocated, the interrupt 3297 * handler is registered with the OS, the watchdog is started, 3298 * and the stack is notified that the interface is ready. 3299 **/ 3300 static int iavf_open(struct net_device *netdev) 3301 { 3302 struct iavf_adapter *adapter = netdev_priv(netdev); 3303 int err; 3304 3305 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3306 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3307 return -EIO; 3308 } 3309 3310 while (!mutex_trylock(&adapter->crit_lock)) 3311 usleep_range(500, 1000); 3312 3313 if (adapter->state != __IAVF_DOWN) { 3314 err = -EBUSY; 3315 goto err_unlock; 3316 } 3317 3318 if (adapter->state == __IAVF_RUNNING && 3319 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { 3320 dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); 3321 err = 0; 3322 goto err_unlock; 3323 } 3324 3325 /* allocate transmit descriptors */ 3326 err = iavf_setup_all_tx_resources(adapter); 3327 if (err) 3328 goto err_setup_tx; 3329 3330 /* allocate receive descriptors */ 3331 err = iavf_setup_all_rx_resources(adapter); 3332 if (err) 3333 goto err_setup_rx; 3334 3335 /* clear any pending interrupts, may auto mask */ 3336 err = iavf_request_traffic_irqs(adapter, netdev->name); 3337 if (err) 3338 goto err_req_irq; 3339 3340 spin_lock_bh(&adapter->mac_vlan_list_lock); 3341 3342 iavf_add_filter(adapter, adapter->hw.mac.addr); 3343 3344 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3345 3346 /* Restore VLAN filters that were removed with IFF_DOWN */ 3347 iavf_restore_filters(adapter); 3348 3349 iavf_configure(adapter); 3350 3351 iavf_up_complete(adapter); 3352 3353 iavf_irq_enable(adapter, true); 3354 3355 mutex_unlock(&adapter->crit_lock); 3356 3357 return 0; 3358 3359 err_req_irq: 3360 iavf_down(adapter); 3361 iavf_free_traffic_irqs(adapter); 3362 err_setup_rx: 3363 iavf_free_all_rx_resources(adapter); 3364 err_setup_tx: 3365 iavf_free_all_tx_resources(adapter); 3366 err_unlock: 3367 mutex_unlock(&adapter->crit_lock); 3368 3369 return err; 3370 } 3371 3372 /** 3373 * iavf_close - Disables a network interface 3374 * @netdev: network interface device structure 3375 * 3376 * Returns 0, this is not allowed to fail 3377 * 3378 * The close entry point is called when an interface is de-activated 3379 * by the OS. The hardware is still under the drivers control, but 3380 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3381 * are freed, along with all transmit and receive resources. 3382 **/ 3383 static int iavf_close(struct net_device *netdev) 3384 { 3385 struct iavf_adapter *adapter = netdev_priv(netdev); 3386 int status; 3387 3388 if (adapter->state <= __IAVF_DOWN_PENDING) 3389 return 0; 3390 3391 while (!mutex_trylock(&adapter->crit_lock)) 3392 usleep_range(500, 1000); 3393 3394 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3395 if (CLIENT_ENABLED(adapter)) 3396 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3397 3398 iavf_down(adapter); 3399 iavf_change_state(adapter, __IAVF_DOWN_PENDING); 3400 iavf_free_traffic_irqs(adapter); 3401 3402 mutex_unlock(&adapter->crit_lock); 3403 3404 /* We explicitly don't free resources here because the hardware is 3405 * still active and can DMA into memory. Resources are cleared in 3406 * iavf_virtchnl_completion() after we get confirmation from the PF 3407 * driver that the rings have been stopped. 3408 * 3409 * Also, we wait for state to transition to __IAVF_DOWN before 3410 * returning. State change occurs in iavf_virtchnl_completion() after 3411 * VF resources are released (which occurs after PF driver processes and 3412 * responds to admin queue commands). 3413 */ 3414 3415 status = wait_event_timeout(adapter->down_waitqueue, 3416 adapter->state == __IAVF_DOWN, 3417 msecs_to_jiffies(500)); 3418 if (!status) 3419 netdev_warn(netdev, "Device resources not yet released\n"); 3420 return 0; 3421 } 3422 3423 /** 3424 * iavf_change_mtu - Change the Maximum Transfer Unit 3425 * @netdev: network interface device structure 3426 * @new_mtu: new value for maximum frame size 3427 * 3428 * Returns 0 on success, negative on failure 3429 **/ 3430 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3431 { 3432 struct iavf_adapter *adapter = netdev_priv(netdev); 3433 3434 netdev->mtu = new_mtu; 3435 if (CLIENT_ENABLED(adapter)) { 3436 iavf_notify_client_l2_params(&adapter->vsi); 3437 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3438 } 3439 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3440 queue_work(iavf_wq, &adapter->reset_task); 3441 3442 return 0; 3443 } 3444 3445 /** 3446 * iavf_set_features - set the netdev feature flags 3447 * @netdev: ptr to the netdev being adjusted 3448 * @features: the feature set that the stack is suggesting 3449 * Note: expects to be called while under rtnl_lock() 3450 **/ 3451 static int iavf_set_features(struct net_device *netdev, 3452 netdev_features_t features) 3453 { 3454 struct iavf_adapter *adapter = netdev_priv(netdev); 3455 3456 /* Don't allow enabling VLAN features when adapter is not capable 3457 * of VLAN offload/filtering 3458 */ 3459 if (!VLAN_ALLOWED(adapter)) { 3460 netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 3461 NETIF_F_HW_VLAN_CTAG_TX | 3462 NETIF_F_HW_VLAN_CTAG_FILTER); 3463 if (features & (NETIF_F_HW_VLAN_CTAG_RX | 3464 NETIF_F_HW_VLAN_CTAG_TX | 3465 NETIF_F_HW_VLAN_CTAG_FILTER)) 3466 return -EINVAL; 3467 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3468 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3469 adapter->aq_required |= 3470 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3471 else 3472 adapter->aq_required |= 3473 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3474 } 3475 3476 return 0; 3477 } 3478 3479 /** 3480 * iavf_features_check - Validate encapsulated packet conforms to limits 3481 * @skb: skb buff 3482 * @dev: This physical port's netdev 3483 * @features: Offload features that the stack believes apply 3484 **/ 3485 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3486 struct net_device *dev, 3487 netdev_features_t features) 3488 { 3489 size_t len; 3490 3491 /* No point in doing any of this if neither checksum nor GSO are 3492 * being requested for this frame. We can rule out both by just 3493 * checking for CHECKSUM_PARTIAL 3494 */ 3495 if (skb->ip_summed != CHECKSUM_PARTIAL) 3496 return features; 3497 3498 /* We cannot support GSO if the MSS is going to be less than 3499 * 64 bytes. If it is then we need to drop support for GSO. 3500 */ 3501 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3502 features &= ~NETIF_F_GSO_MASK; 3503 3504 /* MACLEN can support at most 63 words */ 3505 len = skb_network_header(skb) - skb->data; 3506 if (len & ~(63 * 2)) 3507 goto out_err; 3508 3509 /* IPLEN and EIPLEN can support at most 127 dwords */ 3510 len = skb_transport_header(skb) - skb_network_header(skb); 3511 if (len & ~(127 * 4)) 3512 goto out_err; 3513 3514 if (skb->encapsulation) { 3515 /* L4TUNLEN can support 127 words */ 3516 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3517 if (len & ~(127 * 2)) 3518 goto out_err; 3519 3520 /* IPLEN can support at most 127 dwords */ 3521 len = skb_inner_transport_header(skb) - 3522 skb_inner_network_header(skb); 3523 if (len & ~(127 * 4)) 3524 goto out_err; 3525 } 3526 3527 /* No need to validate L4LEN as TCP is the only protocol with a 3528 * a flexible value and we support all possible values supported 3529 * by TCP, which is at most 15 dwords 3530 */ 3531 3532 return features; 3533 out_err: 3534 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3535 } 3536 3537 /** 3538 * iavf_fix_features - fix up the netdev feature bits 3539 * @netdev: our net device 3540 * @features: desired feature bits 3541 * 3542 * Returns fixed-up features bits 3543 **/ 3544 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3545 netdev_features_t features) 3546 { 3547 struct iavf_adapter *adapter = netdev_priv(netdev); 3548 3549 if (adapter->vf_res && 3550 !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3551 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3552 NETIF_F_HW_VLAN_CTAG_RX | 3553 NETIF_F_HW_VLAN_CTAG_FILTER); 3554 3555 return features; 3556 } 3557 3558 static const struct net_device_ops iavf_netdev_ops = { 3559 .ndo_open = iavf_open, 3560 .ndo_stop = iavf_close, 3561 .ndo_start_xmit = iavf_xmit_frame, 3562 .ndo_set_rx_mode = iavf_set_rx_mode, 3563 .ndo_validate_addr = eth_validate_addr, 3564 .ndo_set_mac_address = iavf_set_mac, 3565 .ndo_change_mtu = iavf_change_mtu, 3566 .ndo_tx_timeout = iavf_tx_timeout, 3567 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3568 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3569 .ndo_features_check = iavf_features_check, 3570 .ndo_fix_features = iavf_fix_features, 3571 .ndo_set_features = iavf_set_features, 3572 .ndo_setup_tc = iavf_setup_tc, 3573 }; 3574 3575 /** 3576 * iavf_check_reset_complete - check that VF reset is complete 3577 * @hw: pointer to hw struct 3578 * 3579 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3580 **/ 3581 static int iavf_check_reset_complete(struct iavf_hw *hw) 3582 { 3583 u32 rstat; 3584 int i; 3585 3586 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3587 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3588 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3589 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3590 (rstat == VIRTCHNL_VFR_COMPLETED)) 3591 return 0; 3592 usleep_range(10, 20); 3593 } 3594 return -EBUSY; 3595 } 3596 3597 /** 3598 * iavf_process_config - Process the config information we got from the PF 3599 * @adapter: board private structure 3600 * 3601 * Verify that we have a valid config struct, and set up our netdev features 3602 * and our VSI struct. 3603 **/ 3604 int iavf_process_config(struct iavf_adapter *adapter) 3605 { 3606 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3607 int i, num_req_queues = adapter->num_req_queues; 3608 struct net_device *netdev = adapter->netdev; 3609 struct iavf_vsi *vsi = &adapter->vsi; 3610 netdev_features_t hw_enc_features; 3611 netdev_features_t hw_features; 3612 3613 /* got VF config message back from PF, now we can parse it */ 3614 for (i = 0; i < vfres->num_vsis; i++) { 3615 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3616 adapter->vsi_res = &vfres->vsi_res[i]; 3617 } 3618 if (!adapter->vsi_res) { 3619 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3620 return -ENODEV; 3621 } 3622 3623 if (num_req_queues && 3624 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3625 /* Problem. The PF gave us fewer queues than what we had 3626 * negotiated in our request. Need a reset to see if we can't 3627 * get back to a working state. 3628 */ 3629 dev_err(&adapter->pdev->dev, 3630 "Requested %d queues, but PF only gave us %d.\n", 3631 num_req_queues, 3632 adapter->vsi_res->num_queue_pairs); 3633 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3634 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3635 iavf_schedule_reset(adapter); 3636 return -ENODEV; 3637 } 3638 adapter->num_req_queues = 0; 3639 3640 hw_enc_features = NETIF_F_SG | 3641 NETIF_F_IP_CSUM | 3642 NETIF_F_IPV6_CSUM | 3643 NETIF_F_HIGHDMA | 3644 NETIF_F_SOFT_FEATURES | 3645 NETIF_F_TSO | 3646 NETIF_F_TSO_ECN | 3647 NETIF_F_TSO6 | 3648 NETIF_F_SCTP_CRC | 3649 NETIF_F_RXHASH | 3650 NETIF_F_RXCSUM | 3651 0; 3652 3653 /* advertise to stack only if offloads for encapsulated packets is 3654 * supported 3655 */ 3656 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3657 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3658 NETIF_F_GSO_GRE | 3659 NETIF_F_GSO_GRE_CSUM | 3660 NETIF_F_GSO_IPXIP4 | 3661 NETIF_F_GSO_IPXIP6 | 3662 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3663 NETIF_F_GSO_PARTIAL | 3664 0; 3665 3666 if (!(vfres->vf_cap_flags & 3667 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3668 netdev->gso_partial_features |= 3669 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3670 3671 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3672 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3673 netdev->hw_enc_features |= hw_enc_features; 3674 } 3675 /* record features VLANs can make use of */ 3676 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3677 3678 /* Write features and hw_features separately to avoid polluting 3679 * with, or dropping, features that are set when we registered. 3680 */ 3681 hw_features = hw_enc_features; 3682 3683 /* Enable VLAN features if supported */ 3684 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3685 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3686 NETIF_F_HW_VLAN_CTAG_RX); 3687 /* Enable cloud filter if ADQ is supported */ 3688 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3689 hw_features |= NETIF_F_HW_TC; 3690 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3691 hw_features |= NETIF_F_GSO_UDP_L4; 3692 3693 netdev->hw_features |= hw_features; 3694 3695 netdev->features |= hw_features; 3696 3697 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3698 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3699 3700 netdev->priv_flags |= IFF_UNICAST_FLT; 3701 3702 /* Do not turn on offloads when they are requested to be turned off. 3703 * TSO needs minimum 576 bytes to work correctly. 3704 */ 3705 if (netdev->wanted_features) { 3706 if (!(netdev->wanted_features & NETIF_F_TSO) || 3707 netdev->mtu < 576) 3708 netdev->features &= ~NETIF_F_TSO; 3709 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3710 netdev->mtu < 576) 3711 netdev->features &= ~NETIF_F_TSO6; 3712 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3713 netdev->features &= ~NETIF_F_TSO_ECN; 3714 if (!(netdev->wanted_features & NETIF_F_GRO)) 3715 netdev->features &= ~NETIF_F_GRO; 3716 if (!(netdev->wanted_features & NETIF_F_GSO)) 3717 netdev->features &= ~NETIF_F_GSO; 3718 } 3719 3720 adapter->vsi.id = adapter->vsi_res->vsi_id; 3721 3722 adapter->vsi.back = adapter; 3723 adapter->vsi.base_vector = 1; 3724 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3725 vsi->netdev = adapter->netdev; 3726 vsi->qs_handle = adapter->vsi_res->qset_handle; 3727 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3728 adapter->rss_key_size = vfres->rss_key_size; 3729 adapter->rss_lut_size = vfres->rss_lut_size; 3730 } else { 3731 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3732 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3733 } 3734 3735 return 0; 3736 } 3737 3738 /** 3739 * iavf_shutdown - Shutdown the device in preparation for a reboot 3740 * @pdev: pci device structure 3741 **/ 3742 static void iavf_shutdown(struct pci_dev *pdev) 3743 { 3744 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3745 struct net_device *netdev = adapter->netdev; 3746 3747 netif_device_detach(netdev); 3748 3749 if (netif_running(netdev)) 3750 iavf_close(netdev); 3751 3752 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3753 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 3754 /* Prevent the watchdog from running. */ 3755 iavf_change_state(adapter, __IAVF_REMOVE); 3756 adapter->aq_required = 0; 3757 mutex_unlock(&adapter->crit_lock); 3758 3759 #ifdef CONFIG_PM 3760 pci_save_state(pdev); 3761 3762 #endif 3763 pci_disable_device(pdev); 3764 } 3765 3766 /** 3767 * iavf_probe - Device Initialization Routine 3768 * @pdev: PCI device information struct 3769 * @ent: entry in iavf_pci_tbl 3770 * 3771 * Returns 0 on success, negative on failure 3772 * 3773 * iavf_probe initializes an adapter identified by a pci_dev structure. 3774 * The OS initialization, configuring of the adapter private structure, 3775 * and a hardware reset occur. 3776 **/ 3777 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3778 { 3779 struct net_device *netdev; 3780 struct iavf_adapter *adapter = NULL; 3781 struct iavf_hw *hw = NULL; 3782 int err; 3783 3784 err = pci_enable_device(pdev); 3785 if (err) 3786 return err; 3787 3788 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3789 if (err) { 3790 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3791 if (err) { 3792 dev_err(&pdev->dev, 3793 "DMA configuration failed: 0x%x\n", err); 3794 goto err_dma; 3795 } 3796 } 3797 3798 err = pci_request_regions(pdev, iavf_driver_name); 3799 if (err) { 3800 dev_err(&pdev->dev, 3801 "pci_request_regions failed 0x%x\n", err); 3802 goto err_pci_reg; 3803 } 3804 3805 pci_enable_pcie_error_reporting(pdev); 3806 3807 pci_set_master(pdev); 3808 3809 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3810 IAVF_MAX_REQ_QUEUES); 3811 if (!netdev) { 3812 err = -ENOMEM; 3813 goto err_alloc_etherdev; 3814 } 3815 3816 SET_NETDEV_DEV(netdev, &pdev->dev); 3817 3818 pci_set_drvdata(pdev, netdev); 3819 adapter = netdev_priv(netdev); 3820 3821 adapter->netdev = netdev; 3822 adapter->pdev = pdev; 3823 3824 hw = &adapter->hw; 3825 hw->back = adapter; 3826 3827 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3828 iavf_change_state(adapter, __IAVF_STARTUP); 3829 3830 /* Call save state here because it relies on the adapter struct. */ 3831 pci_save_state(pdev); 3832 3833 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3834 pci_resource_len(pdev, 0)); 3835 if (!hw->hw_addr) { 3836 err = -EIO; 3837 goto err_ioremap; 3838 } 3839 hw->vendor_id = pdev->vendor; 3840 hw->device_id = pdev->device; 3841 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3842 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3843 hw->subsystem_device_id = pdev->subsystem_device; 3844 hw->bus.device = PCI_SLOT(pdev->devfn); 3845 hw->bus.func = PCI_FUNC(pdev->devfn); 3846 hw->bus.bus_id = pdev->bus->number; 3847 3848 /* set up the locks for the AQ, do this only once in probe 3849 * and destroy them only once in remove 3850 */ 3851 mutex_init(&adapter->crit_lock); 3852 mutex_init(&adapter->client_lock); 3853 mutex_init(&adapter->remove_lock); 3854 mutex_init(&hw->aq.asq_mutex); 3855 mutex_init(&hw->aq.arq_mutex); 3856 3857 spin_lock_init(&adapter->mac_vlan_list_lock); 3858 spin_lock_init(&adapter->cloud_filter_list_lock); 3859 spin_lock_init(&adapter->fdir_fltr_lock); 3860 spin_lock_init(&adapter->adv_rss_lock); 3861 3862 INIT_LIST_HEAD(&adapter->mac_filter_list); 3863 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3864 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3865 INIT_LIST_HEAD(&adapter->fdir_list_head); 3866 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3867 3868 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3869 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3870 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3871 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3872 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 3873 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3874 3875 /* Setup the wait queue for indicating transition to down status */ 3876 init_waitqueue_head(&adapter->down_waitqueue); 3877 3878 return 0; 3879 3880 err_ioremap: 3881 free_netdev(netdev); 3882 err_alloc_etherdev: 3883 pci_disable_pcie_error_reporting(pdev); 3884 pci_release_regions(pdev); 3885 err_pci_reg: 3886 err_dma: 3887 pci_disable_device(pdev); 3888 return err; 3889 } 3890 3891 /** 3892 * iavf_suspend - Power management suspend routine 3893 * @dev_d: device info pointer 3894 * 3895 * Called when the system (VM) is entering sleep/suspend. 3896 **/ 3897 static int __maybe_unused iavf_suspend(struct device *dev_d) 3898 { 3899 struct net_device *netdev = dev_get_drvdata(dev_d); 3900 struct iavf_adapter *adapter = netdev_priv(netdev); 3901 3902 netif_device_detach(netdev); 3903 3904 while (!mutex_trylock(&adapter->crit_lock)) 3905 usleep_range(500, 1000); 3906 3907 if (netif_running(netdev)) { 3908 rtnl_lock(); 3909 iavf_down(adapter); 3910 rtnl_unlock(); 3911 } 3912 iavf_free_misc_irq(adapter); 3913 iavf_reset_interrupt_capability(adapter); 3914 3915 mutex_unlock(&adapter->crit_lock); 3916 3917 return 0; 3918 } 3919 3920 /** 3921 * iavf_resume - Power management resume routine 3922 * @dev_d: device info pointer 3923 * 3924 * Called when the system (VM) is resumed from sleep/suspend. 3925 **/ 3926 static int __maybe_unused iavf_resume(struct device *dev_d) 3927 { 3928 struct pci_dev *pdev = to_pci_dev(dev_d); 3929 struct iavf_adapter *adapter; 3930 u32 err; 3931 3932 adapter = iavf_pdev_to_adapter(pdev); 3933 3934 pci_set_master(pdev); 3935 3936 rtnl_lock(); 3937 err = iavf_set_interrupt_capability(adapter); 3938 if (err) { 3939 rtnl_unlock(); 3940 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3941 return err; 3942 } 3943 err = iavf_request_misc_irq(adapter); 3944 rtnl_unlock(); 3945 if (err) { 3946 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3947 return err; 3948 } 3949 3950 queue_work(iavf_wq, &adapter->reset_task); 3951 3952 netif_device_attach(adapter->netdev); 3953 3954 return err; 3955 } 3956 3957 /** 3958 * iavf_remove - Device Removal Routine 3959 * @pdev: PCI device information struct 3960 * 3961 * iavf_remove is called by the PCI subsystem to alert the driver 3962 * that it should release a PCI device. The could be caused by a 3963 * Hot-Plug event, or because the driver is going to be removed from 3964 * memory. 3965 **/ 3966 static void iavf_remove(struct pci_dev *pdev) 3967 { 3968 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3969 enum iavf_state_t prev_state = adapter->last_state; 3970 struct net_device *netdev = adapter->netdev; 3971 struct iavf_fdir_fltr *fdir, *fdirtmp; 3972 struct iavf_vlan_filter *vlf, *vlftmp; 3973 struct iavf_adv_rss *rss, *rsstmp; 3974 struct iavf_mac_filter *f, *ftmp; 3975 struct iavf_cloud_filter *cf, *cftmp; 3976 struct iavf_hw *hw = &adapter->hw; 3977 int err; 3978 /* Indicate we are in remove and not to run reset_task */ 3979 mutex_lock(&adapter->remove_lock); 3980 cancel_work_sync(&adapter->reset_task); 3981 cancel_delayed_work_sync(&adapter->watchdog_task); 3982 cancel_delayed_work_sync(&adapter->client_task); 3983 if (adapter->netdev_registered) { 3984 unregister_netdev(netdev); 3985 adapter->netdev_registered = false; 3986 } 3987 if (CLIENT_ALLOWED(adapter)) { 3988 err = iavf_lan_del_device(adapter); 3989 if (err) 3990 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3991 err); 3992 } 3993 3994 iavf_request_reset(adapter); 3995 msleep(50); 3996 /* If the FW isn't responding, kick it once, but only once. */ 3997 if (!iavf_asq_done(hw)) { 3998 iavf_request_reset(adapter); 3999 msleep(50); 4000 } 4001 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 4002 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 4003 4004 /* Shut down all the garbage mashers on the detention level */ 4005 iavf_change_state(adapter, __IAVF_REMOVE); 4006 adapter->aq_required = 0; 4007 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 4008 4009 iavf_free_all_tx_resources(adapter); 4010 iavf_free_all_rx_resources(adapter); 4011 iavf_misc_irq_disable(adapter); 4012 iavf_free_misc_irq(adapter); 4013 4014 /* In case we enter iavf_remove from erroneous state, free traffic irqs 4015 * here, so as to not cause a kernel crash, when calling 4016 * iavf_reset_interrupt_capability. 4017 */ 4018 if ((adapter->last_state == __IAVF_RESETTING && 4019 prev_state != __IAVF_DOWN) || 4020 (adapter->last_state == __IAVF_RUNNING && 4021 !(netdev->flags & IFF_UP))) 4022 iavf_free_traffic_irqs(adapter); 4023 4024 iavf_reset_interrupt_capability(adapter); 4025 iavf_free_q_vectors(adapter); 4026 4027 cancel_delayed_work_sync(&adapter->watchdog_task); 4028 4029 cancel_work_sync(&adapter->adminq_task); 4030 4031 iavf_free_rss(adapter); 4032 4033 if (hw->aq.asq.count) 4034 iavf_shutdown_adminq(hw); 4035 4036 /* destroy the locks only once, here */ 4037 mutex_destroy(&hw->aq.arq_mutex); 4038 mutex_destroy(&hw->aq.asq_mutex); 4039 mutex_destroy(&adapter->client_lock); 4040 mutex_unlock(&adapter->crit_lock); 4041 mutex_destroy(&adapter->crit_lock); 4042 mutex_unlock(&adapter->remove_lock); 4043 mutex_destroy(&adapter->remove_lock); 4044 4045 iounmap(hw->hw_addr); 4046 pci_release_regions(pdev); 4047 iavf_free_queues(adapter); 4048 kfree(adapter->vf_res); 4049 spin_lock_bh(&adapter->mac_vlan_list_lock); 4050 /* If we got removed before an up/down sequence, we've got a filter 4051 * hanging out there that we need to get rid of. 4052 */ 4053 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 4054 list_del(&f->list); 4055 kfree(f); 4056 } 4057 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 4058 list) { 4059 list_del(&vlf->list); 4060 kfree(vlf); 4061 } 4062 4063 spin_unlock_bh(&adapter->mac_vlan_list_lock); 4064 4065 spin_lock_bh(&adapter->cloud_filter_list_lock); 4066 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 4067 list_del(&cf->list); 4068 kfree(cf); 4069 } 4070 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4071 4072 spin_lock_bh(&adapter->fdir_fltr_lock); 4073 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 4074 list_del(&fdir->list); 4075 kfree(fdir); 4076 } 4077 spin_unlock_bh(&adapter->fdir_fltr_lock); 4078 4079 spin_lock_bh(&adapter->adv_rss_lock); 4080 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 4081 list) { 4082 list_del(&rss->list); 4083 kfree(rss); 4084 } 4085 spin_unlock_bh(&adapter->adv_rss_lock); 4086 4087 free_netdev(netdev); 4088 4089 pci_disable_pcie_error_reporting(pdev); 4090 4091 pci_disable_device(pdev); 4092 } 4093 4094 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 4095 4096 static struct pci_driver iavf_driver = { 4097 .name = iavf_driver_name, 4098 .id_table = iavf_pci_tbl, 4099 .probe = iavf_probe, 4100 .remove = iavf_remove, 4101 .driver.pm = &iavf_pm_ops, 4102 .shutdown = iavf_shutdown, 4103 }; 4104 4105 /** 4106 * iavf_init_module - Driver Registration Routine 4107 * 4108 * iavf_init_module is the first routine called when the driver is 4109 * loaded. All it does is register with the PCI subsystem. 4110 **/ 4111 static int __init iavf_init_module(void) 4112 { 4113 int ret; 4114 4115 pr_info("iavf: %s\n", iavf_driver_string); 4116 4117 pr_info("%s\n", iavf_copyright); 4118 4119 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4120 iavf_driver_name); 4121 if (!iavf_wq) { 4122 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4123 return -ENOMEM; 4124 } 4125 ret = pci_register_driver(&iavf_driver); 4126 return ret; 4127 } 4128 4129 module_init(iavf_init_module); 4130 4131 /** 4132 * iavf_exit_module - Driver Exit Cleanup Routine 4133 * 4134 * iavf_exit_module is called just before the driver is removed 4135 * from memory. 4136 **/ 4137 static void __exit iavf_exit_module(void) 4138 { 4139 pci_unregister_driver(&iavf_driver); 4140 destroy_workqueue(iavf_wq); 4141 } 4142 4143 module_exit(iavf_exit_module); 4144 4145 /* iavf_main.c */ 4146