1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static int iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 56 * @hw: pointer to the HW structure 57 * @mem: ptr to mem struct to fill out 58 * @size: size of memory requested 59 * @alignment: what to align the allocation to 60 **/ 61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 62 struct iavf_dma_mem *mem, 63 u64 size, u32 alignment) 64 { 65 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 66 67 if (!mem) 68 return IAVF_ERR_PARAM; 69 70 mem->size = ALIGN(size, alignment); 71 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 72 (dma_addr_t *)&mem->pa, GFP_KERNEL); 73 if (mem->va) 74 return 0; 75 else 76 return IAVF_ERR_NO_MEMORY; 77 } 78 79 /** 80 * iavf_free_dma_mem_d - OS specific memory free for shared code 81 * @hw: pointer to the HW structure 82 * @mem: ptr to mem struct to free 83 **/ 84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 85 struct iavf_dma_mem *mem) 86 { 87 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 88 89 if (!mem || !mem->va) 90 return IAVF_ERR_PARAM; 91 dma_free_coherent(&adapter->pdev->dev, mem->size, 92 mem->va, (dma_addr_t)mem->pa); 93 return 0; 94 } 95 96 /** 97 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 98 * @hw: pointer to the HW structure 99 * @mem: ptr to mem struct to fill out 100 * @size: size of memory requested 101 **/ 102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 103 struct iavf_virt_mem *mem, u32 size) 104 { 105 if (!mem) 106 return IAVF_ERR_PARAM; 107 108 mem->size = size; 109 mem->va = kzalloc(size, GFP_KERNEL); 110 111 if (mem->va) 112 return 0; 113 else 114 return IAVF_ERR_NO_MEMORY; 115 } 116 117 /** 118 * iavf_free_virt_mem_d - OS specific memory free for shared code 119 * @hw: pointer to the HW structure 120 * @mem: ptr to mem struct to free 121 **/ 122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 123 struct iavf_virt_mem *mem) 124 { 125 if (!mem) 126 return IAVF_ERR_PARAM; 127 128 /* it's ok to kfree a NULL pointer */ 129 kfree(mem->va); 130 131 return 0; 132 } 133 134 /** 135 * iavf_lock_timeout - try to set bit but give up after timeout 136 * @adapter: board private structure 137 * @bit: bit to set 138 * @msecs: timeout in msecs 139 * 140 * Returns 0 on success, negative on failure 141 **/ 142 static int iavf_lock_timeout(struct iavf_adapter *adapter, 143 enum iavf_critical_section_t bit, 144 unsigned int msecs) 145 { 146 unsigned int wait, delay = 10; 147 148 for (wait = 0; wait < msecs; wait += delay) { 149 if (!test_and_set_bit(bit, &adapter->crit_section)) 150 return 0; 151 152 msleep(delay); 153 } 154 155 return -1; 156 } 157 158 /** 159 * iavf_schedule_reset - Set the flags and schedule a reset event 160 * @adapter: board private structure 161 **/ 162 void iavf_schedule_reset(struct iavf_adapter *adapter) 163 { 164 if (!(adapter->flags & 165 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 166 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 167 queue_work(iavf_wq, &adapter->reset_task); 168 } 169 } 170 171 /** 172 * iavf_tx_timeout - Respond to a Tx Hang 173 * @netdev: network interface device structure 174 * @txqueue: queue number that is timing out 175 **/ 176 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 177 { 178 struct iavf_adapter *adapter = netdev_priv(netdev); 179 180 adapter->tx_timeout_count++; 181 iavf_schedule_reset(adapter); 182 } 183 184 /** 185 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 186 * @adapter: board private structure 187 **/ 188 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 189 { 190 struct iavf_hw *hw = &adapter->hw; 191 192 if (!adapter->msix_entries) 193 return; 194 195 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 196 197 iavf_flush(hw); 198 199 synchronize_irq(adapter->msix_entries[0].vector); 200 } 201 202 /** 203 * iavf_misc_irq_enable - Enable default interrupt generation settings 204 * @adapter: board private structure 205 **/ 206 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 207 { 208 struct iavf_hw *hw = &adapter->hw; 209 210 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 211 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 212 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 213 214 iavf_flush(hw); 215 } 216 217 /** 218 * iavf_irq_disable - Mask off interrupt generation on the NIC 219 * @adapter: board private structure 220 **/ 221 static void iavf_irq_disable(struct iavf_adapter *adapter) 222 { 223 int i; 224 struct iavf_hw *hw = &adapter->hw; 225 226 if (!adapter->msix_entries) 227 return; 228 229 for (i = 1; i < adapter->num_msix_vectors; i++) { 230 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 231 synchronize_irq(adapter->msix_entries[i].vector); 232 } 233 iavf_flush(hw); 234 } 235 236 /** 237 * iavf_irq_enable_queues - Enable interrupt for specified queues 238 * @adapter: board private structure 239 * @mask: bitmap of queues to enable 240 **/ 241 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 242 { 243 struct iavf_hw *hw = &adapter->hw; 244 int i; 245 246 for (i = 1; i < adapter->num_msix_vectors; i++) { 247 if (mask & BIT(i - 1)) { 248 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 249 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 250 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 251 } 252 } 253 } 254 255 /** 256 * iavf_irq_enable - Enable default interrupt generation settings 257 * @adapter: board private structure 258 * @flush: boolean value whether to run rd32() 259 **/ 260 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 261 { 262 struct iavf_hw *hw = &adapter->hw; 263 264 iavf_misc_irq_enable(adapter); 265 iavf_irq_enable_queues(adapter, ~0); 266 267 if (flush) 268 iavf_flush(hw); 269 } 270 271 /** 272 * iavf_msix_aq - Interrupt handler for vector 0 273 * @irq: interrupt number 274 * @data: pointer to netdev 275 **/ 276 static irqreturn_t iavf_msix_aq(int irq, void *data) 277 { 278 struct net_device *netdev = data; 279 struct iavf_adapter *adapter = netdev_priv(netdev); 280 struct iavf_hw *hw = &adapter->hw; 281 282 /* handle non-queue interrupts, these reads clear the registers */ 283 rd32(hw, IAVF_VFINT_ICR01); 284 rd32(hw, IAVF_VFINT_ICR0_ENA1); 285 286 /* schedule work on the private workqueue */ 287 queue_work(iavf_wq, &adapter->adminq_task); 288 289 return IRQ_HANDLED; 290 } 291 292 /** 293 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 294 * @irq: interrupt number 295 * @data: pointer to a q_vector 296 **/ 297 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 298 { 299 struct iavf_q_vector *q_vector = data; 300 301 if (!q_vector->tx.ring && !q_vector->rx.ring) 302 return IRQ_HANDLED; 303 304 napi_schedule_irqoff(&q_vector->napi); 305 306 return IRQ_HANDLED; 307 } 308 309 /** 310 * iavf_map_vector_to_rxq - associate irqs with rx queues 311 * @adapter: board private structure 312 * @v_idx: interrupt number 313 * @r_idx: queue number 314 **/ 315 static void 316 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 317 { 318 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 319 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 320 struct iavf_hw *hw = &adapter->hw; 321 322 rx_ring->q_vector = q_vector; 323 rx_ring->next = q_vector->rx.ring; 324 rx_ring->vsi = &adapter->vsi; 325 q_vector->rx.ring = rx_ring; 326 q_vector->rx.count++; 327 q_vector->rx.next_update = jiffies + 1; 328 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 329 q_vector->ring_mask |= BIT(r_idx); 330 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 331 q_vector->rx.current_itr >> 1); 332 q_vector->rx.current_itr = q_vector->rx.target_itr; 333 } 334 335 /** 336 * iavf_map_vector_to_txq - associate irqs with tx queues 337 * @adapter: board private structure 338 * @v_idx: interrupt number 339 * @t_idx: queue number 340 **/ 341 static void 342 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 343 { 344 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 345 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 346 struct iavf_hw *hw = &adapter->hw; 347 348 tx_ring->q_vector = q_vector; 349 tx_ring->next = q_vector->tx.ring; 350 tx_ring->vsi = &adapter->vsi; 351 q_vector->tx.ring = tx_ring; 352 q_vector->tx.count++; 353 q_vector->tx.next_update = jiffies + 1; 354 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 355 q_vector->num_ringpairs++; 356 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 357 q_vector->tx.target_itr >> 1); 358 q_vector->tx.current_itr = q_vector->tx.target_itr; 359 } 360 361 /** 362 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 363 * @adapter: board private structure to initialize 364 * 365 * This function maps descriptor rings to the queue-specific vectors 366 * we were allotted through the MSI-X enabling code. Ideally, we'd have 367 * one vector per ring/queue, but on a constrained vector budget, we 368 * group the rings as "efficiently" as possible. You would add new 369 * mapping configurations in here. 370 **/ 371 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 372 { 373 int rings_remaining = adapter->num_active_queues; 374 int ridx = 0, vidx = 0; 375 int q_vectors; 376 377 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 378 379 for (; ridx < rings_remaining; ridx++) { 380 iavf_map_vector_to_rxq(adapter, vidx, ridx); 381 iavf_map_vector_to_txq(adapter, vidx, ridx); 382 383 /* In the case where we have more queues than vectors, continue 384 * round-robin on vectors until all queues are mapped. 385 */ 386 if (++vidx >= q_vectors) 387 vidx = 0; 388 } 389 390 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 391 } 392 393 /** 394 * iavf_irq_affinity_notify - Callback for affinity changes 395 * @notify: context as to what irq was changed 396 * @mask: the new affinity mask 397 * 398 * This is a callback function used by the irq_set_affinity_notifier function 399 * so that we may register to receive changes to the irq affinity masks. 400 **/ 401 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 402 const cpumask_t *mask) 403 { 404 struct iavf_q_vector *q_vector = 405 container_of(notify, struct iavf_q_vector, affinity_notify); 406 407 cpumask_copy(&q_vector->affinity_mask, mask); 408 } 409 410 /** 411 * iavf_irq_affinity_release - Callback for affinity notifier release 412 * @ref: internal core kernel usage 413 * 414 * This is a callback function used by the irq_set_affinity_notifier function 415 * to inform the current notification subscriber that they will no longer 416 * receive notifications. 417 **/ 418 static void iavf_irq_affinity_release(struct kref *ref) {} 419 420 /** 421 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 422 * @adapter: board private structure 423 * @basename: device basename 424 * 425 * Allocates MSI-X vectors for tx and rx handling, and requests 426 * interrupts from the kernel. 427 **/ 428 static int 429 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 430 { 431 unsigned int vector, q_vectors; 432 unsigned int rx_int_idx = 0, tx_int_idx = 0; 433 int irq_num, err; 434 int cpu; 435 436 iavf_irq_disable(adapter); 437 /* Decrement for Other and TCP Timer vectors */ 438 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 439 440 for (vector = 0; vector < q_vectors; vector++) { 441 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 442 443 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 444 445 if (q_vector->tx.ring && q_vector->rx.ring) { 446 snprintf(q_vector->name, sizeof(q_vector->name), 447 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 448 tx_int_idx++; 449 } else if (q_vector->rx.ring) { 450 snprintf(q_vector->name, sizeof(q_vector->name), 451 "iavf-%s-rx-%d", basename, rx_int_idx++); 452 } else if (q_vector->tx.ring) { 453 snprintf(q_vector->name, sizeof(q_vector->name), 454 "iavf-%s-tx-%d", basename, tx_int_idx++); 455 } else { 456 /* skip this unused q_vector */ 457 continue; 458 } 459 err = request_irq(irq_num, 460 iavf_msix_clean_rings, 461 0, 462 q_vector->name, 463 q_vector); 464 if (err) { 465 dev_info(&adapter->pdev->dev, 466 "Request_irq failed, error: %d\n", err); 467 goto free_queue_irqs; 468 } 469 /* register for affinity change notifications */ 470 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 471 q_vector->affinity_notify.release = 472 iavf_irq_affinity_release; 473 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 474 /* Spread the IRQ affinity hints across online CPUs. Note that 475 * get_cpu_mask returns a mask with a permanent lifetime so 476 * it's safe to use as a hint for irq_set_affinity_hint. 477 */ 478 cpu = cpumask_local_spread(q_vector->v_idx, -1); 479 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 480 } 481 482 return 0; 483 484 free_queue_irqs: 485 while (vector) { 486 vector--; 487 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 488 irq_set_affinity_notifier(irq_num, NULL); 489 irq_set_affinity_hint(irq_num, NULL); 490 free_irq(irq_num, &adapter->q_vectors[vector]); 491 } 492 return err; 493 } 494 495 /** 496 * iavf_request_misc_irq - Initialize MSI-X interrupts 497 * @adapter: board private structure 498 * 499 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 500 * vector is only for the admin queue, and stays active even when the netdev 501 * is closed. 502 **/ 503 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 504 { 505 struct net_device *netdev = adapter->netdev; 506 int err; 507 508 snprintf(adapter->misc_vector_name, 509 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 510 dev_name(&adapter->pdev->dev)); 511 err = request_irq(adapter->msix_entries[0].vector, 512 &iavf_msix_aq, 0, 513 adapter->misc_vector_name, netdev); 514 if (err) { 515 dev_err(&adapter->pdev->dev, 516 "request_irq for %s failed: %d\n", 517 adapter->misc_vector_name, err); 518 free_irq(adapter->msix_entries[0].vector, netdev); 519 } 520 return err; 521 } 522 523 /** 524 * iavf_free_traffic_irqs - Free MSI-X interrupts 525 * @adapter: board private structure 526 * 527 * Frees all MSI-X vectors other than 0. 528 **/ 529 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 530 { 531 int vector, irq_num, q_vectors; 532 533 if (!adapter->msix_entries) 534 return; 535 536 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 537 538 for (vector = 0; vector < q_vectors; vector++) { 539 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 540 irq_set_affinity_notifier(irq_num, NULL); 541 irq_set_affinity_hint(irq_num, NULL); 542 free_irq(irq_num, &adapter->q_vectors[vector]); 543 } 544 } 545 546 /** 547 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 548 * @adapter: board private structure 549 * 550 * Frees MSI-X vector 0. 551 **/ 552 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 553 { 554 struct net_device *netdev = adapter->netdev; 555 556 if (!adapter->msix_entries) 557 return; 558 559 free_irq(adapter->msix_entries[0].vector, netdev); 560 } 561 562 /** 563 * iavf_configure_tx - Configure Transmit Unit after Reset 564 * @adapter: board private structure 565 * 566 * Configure the Tx unit of the MAC after a reset. 567 **/ 568 static void iavf_configure_tx(struct iavf_adapter *adapter) 569 { 570 struct iavf_hw *hw = &adapter->hw; 571 int i; 572 573 for (i = 0; i < adapter->num_active_queues; i++) 574 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 575 } 576 577 /** 578 * iavf_configure_rx - Configure Receive Unit after Reset 579 * @adapter: board private structure 580 * 581 * Configure the Rx unit of the MAC after a reset. 582 **/ 583 static void iavf_configure_rx(struct iavf_adapter *adapter) 584 { 585 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 586 struct iavf_hw *hw = &adapter->hw; 587 int i; 588 589 /* Legacy Rx will always default to a 2048 buffer size. */ 590 #if (PAGE_SIZE < 8192) 591 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 592 struct net_device *netdev = adapter->netdev; 593 594 /* For jumbo frames on systems with 4K pages we have to use 595 * an order 1 page, so we might as well increase the size 596 * of our Rx buffer to make better use of the available space 597 */ 598 rx_buf_len = IAVF_RXBUFFER_3072; 599 600 /* We use a 1536 buffer size for configurations with 601 * standard Ethernet mtu. On x86 this gives us enough room 602 * for shared info and 192 bytes of padding. 603 */ 604 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 605 (netdev->mtu <= ETH_DATA_LEN)) 606 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 607 } 608 #endif 609 610 for (i = 0; i < adapter->num_active_queues; i++) { 611 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 612 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 613 614 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 615 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 616 else 617 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 618 } 619 } 620 621 /** 622 * iavf_find_vlan - Search filter list for specific vlan filter 623 * @adapter: board private structure 624 * @vlan: vlan tag 625 * 626 * Returns ptr to the filter object or NULL. Must be called while holding the 627 * mac_vlan_list_lock. 628 **/ 629 static struct 630 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 631 { 632 struct iavf_vlan_filter *f; 633 634 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 635 if (vlan == f->vlan) 636 return f; 637 } 638 return NULL; 639 } 640 641 /** 642 * iavf_add_vlan - Add a vlan filter to the list 643 * @adapter: board private structure 644 * @vlan: VLAN tag 645 * 646 * Returns ptr to the filter object or NULL when no memory available. 647 **/ 648 static struct 649 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 650 { 651 struct iavf_vlan_filter *f = NULL; 652 653 spin_lock_bh(&adapter->mac_vlan_list_lock); 654 655 f = iavf_find_vlan(adapter, vlan); 656 if (!f) { 657 f = kzalloc(sizeof(*f), GFP_ATOMIC); 658 if (!f) 659 goto clearout; 660 661 f->vlan = vlan; 662 663 list_add_tail(&f->list, &adapter->vlan_filter_list); 664 f->add = true; 665 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 666 } 667 668 clearout: 669 spin_unlock_bh(&adapter->mac_vlan_list_lock); 670 return f; 671 } 672 673 /** 674 * iavf_del_vlan - Remove a vlan filter from the list 675 * @adapter: board private structure 676 * @vlan: VLAN tag 677 **/ 678 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 679 { 680 struct iavf_vlan_filter *f; 681 682 spin_lock_bh(&adapter->mac_vlan_list_lock); 683 684 f = iavf_find_vlan(adapter, vlan); 685 if (f) { 686 f->remove = true; 687 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 688 } 689 690 spin_unlock_bh(&adapter->mac_vlan_list_lock); 691 } 692 693 /** 694 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 695 * @netdev: network device struct 696 * @proto: unused protocol data 697 * @vid: VLAN tag 698 **/ 699 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 700 __always_unused __be16 proto, u16 vid) 701 { 702 struct iavf_adapter *adapter = netdev_priv(netdev); 703 704 if (!VLAN_ALLOWED(adapter)) 705 return -EIO; 706 if (iavf_add_vlan(adapter, vid) == NULL) 707 return -ENOMEM; 708 return 0; 709 } 710 711 /** 712 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 713 * @netdev: network device struct 714 * @proto: unused protocol data 715 * @vid: VLAN tag 716 **/ 717 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 718 __always_unused __be16 proto, u16 vid) 719 { 720 struct iavf_adapter *adapter = netdev_priv(netdev); 721 722 if (VLAN_ALLOWED(adapter)) { 723 iavf_del_vlan(adapter, vid); 724 return 0; 725 } 726 return -EIO; 727 } 728 729 /** 730 * iavf_find_filter - Search filter list for specific mac filter 731 * @adapter: board private structure 732 * @macaddr: the MAC address 733 * 734 * Returns ptr to the filter object or NULL. Must be called while holding the 735 * mac_vlan_list_lock. 736 **/ 737 static struct 738 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 739 const u8 *macaddr) 740 { 741 struct iavf_mac_filter *f; 742 743 if (!macaddr) 744 return NULL; 745 746 list_for_each_entry(f, &adapter->mac_filter_list, list) { 747 if (ether_addr_equal(macaddr, f->macaddr)) 748 return f; 749 } 750 return NULL; 751 } 752 753 /** 754 * iavf_add_filter - Add a mac filter to the filter list 755 * @adapter: board private structure 756 * @macaddr: the MAC address 757 * 758 * Returns ptr to the filter object or NULL when no memory available. 759 **/ 760 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 761 const u8 *macaddr) 762 { 763 struct iavf_mac_filter *f; 764 765 if (!macaddr) 766 return NULL; 767 768 f = iavf_find_filter(adapter, macaddr); 769 if (!f) { 770 f = kzalloc(sizeof(*f), GFP_ATOMIC); 771 if (!f) 772 return f; 773 774 ether_addr_copy(f->macaddr, macaddr); 775 776 list_add_tail(&f->list, &adapter->mac_filter_list); 777 f->add = true; 778 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 779 } else { 780 f->remove = false; 781 } 782 783 return f; 784 } 785 786 /** 787 * iavf_set_mac - NDO callback to set port mac address 788 * @netdev: network interface device structure 789 * @p: pointer to an address structure 790 * 791 * Returns 0 on success, negative on failure 792 **/ 793 static int iavf_set_mac(struct net_device *netdev, void *p) 794 { 795 struct iavf_adapter *adapter = netdev_priv(netdev); 796 struct iavf_hw *hw = &adapter->hw; 797 struct iavf_mac_filter *f; 798 struct sockaddr *addr = p; 799 800 if (!is_valid_ether_addr(addr->sa_data)) 801 return -EADDRNOTAVAIL; 802 803 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 804 return 0; 805 806 spin_lock_bh(&adapter->mac_vlan_list_lock); 807 808 f = iavf_find_filter(adapter, hw->mac.addr); 809 if (f) { 810 f->remove = true; 811 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 812 } 813 814 f = iavf_add_filter(adapter, addr->sa_data); 815 816 spin_unlock_bh(&adapter->mac_vlan_list_lock); 817 818 if (f) { 819 ether_addr_copy(hw->mac.addr, addr->sa_data); 820 } 821 822 return (f == NULL) ? -ENOMEM : 0; 823 } 824 825 /** 826 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 827 * @netdev: the netdevice 828 * @addr: address to add 829 * 830 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 831 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 832 */ 833 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 834 { 835 struct iavf_adapter *adapter = netdev_priv(netdev); 836 837 if (iavf_add_filter(adapter, addr)) 838 return 0; 839 else 840 return -ENOMEM; 841 } 842 843 /** 844 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 845 * @netdev: the netdevice 846 * @addr: address to add 847 * 848 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 849 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 850 */ 851 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 852 { 853 struct iavf_adapter *adapter = netdev_priv(netdev); 854 struct iavf_mac_filter *f; 855 856 /* Under some circumstances, we might receive a request to delete 857 * our own device address from our uc list. Because we store the 858 * device address in the VSI's MAC/VLAN filter list, we need to ignore 859 * such requests and not delete our device address from this list. 860 */ 861 if (ether_addr_equal(addr, netdev->dev_addr)) 862 return 0; 863 864 f = iavf_find_filter(adapter, addr); 865 if (f) { 866 f->remove = true; 867 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 868 } 869 return 0; 870 } 871 872 /** 873 * iavf_set_rx_mode - NDO callback to set the netdev filters 874 * @netdev: network interface device structure 875 **/ 876 static void iavf_set_rx_mode(struct net_device *netdev) 877 { 878 struct iavf_adapter *adapter = netdev_priv(netdev); 879 880 spin_lock_bh(&adapter->mac_vlan_list_lock); 881 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 882 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 883 spin_unlock_bh(&adapter->mac_vlan_list_lock); 884 885 if (netdev->flags & IFF_PROMISC && 886 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 887 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 888 else if (!(netdev->flags & IFF_PROMISC) && 889 adapter->flags & IAVF_FLAG_PROMISC_ON) 890 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 891 892 if (netdev->flags & IFF_ALLMULTI && 893 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 894 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 895 else if (!(netdev->flags & IFF_ALLMULTI) && 896 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 897 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 898 } 899 900 /** 901 * iavf_napi_enable_all - enable NAPI on all queue vectors 902 * @adapter: board private structure 903 **/ 904 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 905 { 906 int q_idx; 907 struct iavf_q_vector *q_vector; 908 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 909 910 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 911 struct napi_struct *napi; 912 913 q_vector = &adapter->q_vectors[q_idx]; 914 napi = &q_vector->napi; 915 napi_enable(napi); 916 } 917 } 918 919 /** 920 * iavf_napi_disable_all - disable NAPI on all queue vectors 921 * @adapter: board private structure 922 **/ 923 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 924 { 925 int q_idx; 926 struct iavf_q_vector *q_vector; 927 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 928 929 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 930 q_vector = &adapter->q_vectors[q_idx]; 931 napi_disable(&q_vector->napi); 932 } 933 } 934 935 /** 936 * iavf_configure - set up transmit and receive data structures 937 * @adapter: board private structure 938 **/ 939 static void iavf_configure(struct iavf_adapter *adapter) 940 { 941 struct net_device *netdev = adapter->netdev; 942 int i; 943 944 iavf_set_rx_mode(netdev); 945 946 iavf_configure_tx(adapter); 947 iavf_configure_rx(adapter); 948 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 949 950 for (i = 0; i < adapter->num_active_queues; i++) { 951 struct iavf_ring *ring = &adapter->rx_rings[i]; 952 953 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 954 } 955 } 956 957 /** 958 * iavf_up_complete - Finish the last steps of bringing up a connection 959 * @adapter: board private structure 960 * 961 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 962 **/ 963 static void iavf_up_complete(struct iavf_adapter *adapter) 964 { 965 adapter->state = __IAVF_RUNNING; 966 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 967 968 iavf_napi_enable_all(adapter); 969 970 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 971 if (CLIENT_ENABLED(adapter)) 972 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 973 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 974 } 975 976 /** 977 * iavf_down - Shutdown the connection processing 978 * @adapter: board private structure 979 * 980 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 981 **/ 982 void iavf_down(struct iavf_adapter *adapter) 983 { 984 struct net_device *netdev = adapter->netdev; 985 struct iavf_vlan_filter *vlf; 986 struct iavf_cloud_filter *cf; 987 struct iavf_fdir_fltr *fdir; 988 struct iavf_mac_filter *f; 989 struct iavf_adv_rss *rss; 990 991 if (adapter->state <= __IAVF_DOWN_PENDING) 992 return; 993 994 netif_carrier_off(netdev); 995 netif_tx_disable(netdev); 996 adapter->link_up = false; 997 iavf_napi_disable_all(adapter); 998 iavf_irq_disable(adapter); 999 1000 spin_lock_bh(&adapter->mac_vlan_list_lock); 1001 1002 /* clear the sync flag on all filters */ 1003 __dev_uc_unsync(adapter->netdev, NULL); 1004 __dev_mc_unsync(adapter->netdev, NULL); 1005 1006 /* remove all MAC filters */ 1007 list_for_each_entry(f, &adapter->mac_filter_list, list) { 1008 f->remove = true; 1009 } 1010 1011 /* remove all VLAN filters */ 1012 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1013 vlf->remove = true; 1014 } 1015 1016 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1017 1018 /* remove all cloud filters */ 1019 spin_lock_bh(&adapter->cloud_filter_list_lock); 1020 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1021 cf->del = true; 1022 } 1023 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1024 1025 /* remove all Flow Director filters */ 1026 spin_lock_bh(&adapter->fdir_fltr_lock); 1027 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1028 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1029 } 1030 spin_unlock_bh(&adapter->fdir_fltr_lock); 1031 1032 /* remove all advance RSS configuration */ 1033 spin_lock_bh(&adapter->adv_rss_lock); 1034 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1035 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1036 spin_unlock_bh(&adapter->adv_rss_lock); 1037 1038 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1039 adapter->state != __IAVF_RESETTING) { 1040 /* cancel any current operation */ 1041 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1042 /* Schedule operations to close down the HW. Don't wait 1043 * here for this to complete. The watchdog is still running 1044 * and it will take care of this. 1045 */ 1046 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1047 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1048 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1049 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1050 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1051 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1052 } 1053 1054 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1055 } 1056 1057 /** 1058 * iavf_acquire_msix_vectors - Setup the MSIX capability 1059 * @adapter: board private structure 1060 * @vectors: number of vectors to request 1061 * 1062 * Work with the OS to set up the MSIX vectors needed. 1063 * 1064 * Returns 0 on success, negative on failure 1065 **/ 1066 static int 1067 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1068 { 1069 int err, vector_threshold; 1070 1071 /* We'll want at least 3 (vector_threshold): 1072 * 0) Other (Admin Queue and link, mostly) 1073 * 1) TxQ[0] Cleanup 1074 * 2) RxQ[0] Cleanup 1075 */ 1076 vector_threshold = MIN_MSIX_COUNT; 1077 1078 /* The more we get, the more we will assign to Tx/Rx Cleanup 1079 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1080 * Right now, we simply care about how many we'll get; we'll 1081 * set them up later while requesting irq's. 1082 */ 1083 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1084 vector_threshold, vectors); 1085 if (err < 0) { 1086 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1087 kfree(adapter->msix_entries); 1088 adapter->msix_entries = NULL; 1089 return err; 1090 } 1091 1092 /* Adjust for only the vectors we'll use, which is minimum 1093 * of max_msix_q_vectors + NONQ_VECS, or the number of 1094 * vectors we were allocated. 1095 */ 1096 adapter->num_msix_vectors = err; 1097 return 0; 1098 } 1099 1100 /** 1101 * iavf_free_queues - Free memory for all rings 1102 * @adapter: board private structure to initialize 1103 * 1104 * Free all of the memory associated with queue pairs. 1105 **/ 1106 static void iavf_free_queues(struct iavf_adapter *adapter) 1107 { 1108 if (!adapter->vsi_res) 1109 return; 1110 adapter->num_active_queues = 0; 1111 kfree(adapter->tx_rings); 1112 adapter->tx_rings = NULL; 1113 kfree(adapter->rx_rings); 1114 adapter->rx_rings = NULL; 1115 } 1116 1117 /** 1118 * iavf_alloc_queues - Allocate memory for all rings 1119 * @adapter: board private structure to initialize 1120 * 1121 * We allocate one ring per queue at run-time since we don't know the 1122 * number of queues at compile-time. The polling_netdev array is 1123 * intended for Multiqueue, but should work fine with a single queue. 1124 **/ 1125 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1126 { 1127 int i, num_active_queues; 1128 1129 /* If we're in reset reallocating queues we don't actually know yet for 1130 * certain the PF gave us the number of queues we asked for but we'll 1131 * assume it did. Once basic reset is finished we'll confirm once we 1132 * start negotiating config with PF. 1133 */ 1134 if (adapter->num_req_queues) 1135 num_active_queues = adapter->num_req_queues; 1136 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1137 adapter->num_tc) 1138 num_active_queues = adapter->ch_config.total_qps; 1139 else 1140 num_active_queues = min_t(int, 1141 adapter->vsi_res->num_queue_pairs, 1142 (int)(num_online_cpus())); 1143 1144 1145 adapter->tx_rings = kcalloc(num_active_queues, 1146 sizeof(struct iavf_ring), GFP_KERNEL); 1147 if (!adapter->tx_rings) 1148 goto err_out; 1149 adapter->rx_rings = kcalloc(num_active_queues, 1150 sizeof(struct iavf_ring), GFP_KERNEL); 1151 if (!adapter->rx_rings) 1152 goto err_out; 1153 1154 for (i = 0; i < num_active_queues; i++) { 1155 struct iavf_ring *tx_ring; 1156 struct iavf_ring *rx_ring; 1157 1158 tx_ring = &adapter->tx_rings[i]; 1159 1160 tx_ring->queue_index = i; 1161 tx_ring->netdev = adapter->netdev; 1162 tx_ring->dev = &adapter->pdev->dev; 1163 tx_ring->count = adapter->tx_desc_count; 1164 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1165 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1166 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1167 1168 rx_ring = &adapter->rx_rings[i]; 1169 rx_ring->queue_index = i; 1170 rx_ring->netdev = adapter->netdev; 1171 rx_ring->dev = &adapter->pdev->dev; 1172 rx_ring->count = adapter->rx_desc_count; 1173 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1174 } 1175 1176 adapter->num_active_queues = num_active_queues; 1177 1178 return 0; 1179 1180 err_out: 1181 iavf_free_queues(adapter); 1182 return -ENOMEM; 1183 } 1184 1185 /** 1186 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1187 * @adapter: board private structure to initialize 1188 * 1189 * Attempt to configure the interrupts using the best available 1190 * capabilities of the hardware and the kernel. 1191 **/ 1192 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1193 { 1194 int vector, v_budget; 1195 int pairs = 0; 1196 int err = 0; 1197 1198 if (!adapter->vsi_res) { 1199 err = -EIO; 1200 goto out; 1201 } 1202 pairs = adapter->num_active_queues; 1203 1204 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1205 * us much good if we have more vectors than CPUs. However, we already 1206 * limit the total number of queues by the number of CPUs so we do not 1207 * need any further limiting here. 1208 */ 1209 v_budget = min_t(int, pairs + NONQ_VECS, 1210 (int)adapter->vf_res->max_vectors); 1211 1212 adapter->msix_entries = kcalloc(v_budget, 1213 sizeof(struct msix_entry), GFP_KERNEL); 1214 if (!adapter->msix_entries) { 1215 err = -ENOMEM; 1216 goto out; 1217 } 1218 1219 for (vector = 0; vector < v_budget; vector++) 1220 adapter->msix_entries[vector].entry = vector; 1221 1222 err = iavf_acquire_msix_vectors(adapter, v_budget); 1223 1224 out: 1225 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1226 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1227 return err; 1228 } 1229 1230 /** 1231 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1232 * @adapter: board private structure 1233 * 1234 * Return 0 on success, negative on failure 1235 **/ 1236 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1237 { 1238 struct iavf_aqc_get_set_rss_key_data *rss_key = 1239 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1240 struct iavf_hw *hw = &adapter->hw; 1241 int ret = 0; 1242 1243 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1244 /* bail because we already have a command pending */ 1245 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1246 adapter->current_op); 1247 return -EBUSY; 1248 } 1249 1250 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1251 if (ret) { 1252 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1253 iavf_stat_str(hw, ret), 1254 iavf_aq_str(hw, hw->aq.asq_last_status)); 1255 return ret; 1256 1257 } 1258 1259 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1260 adapter->rss_lut, adapter->rss_lut_size); 1261 if (ret) { 1262 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1263 iavf_stat_str(hw, ret), 1264 iavf_aq_str(hw, hw->aq.asq_last_status)); 1265 } 1266 1267 return ret; 1268 1269 } 1270 1271 /** 1272 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1273 * @adapter: board private structure 1274 * 1275 * Returns 0 on success, negative on failure 1276 **/ 1277 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1278 { 1279 struct iavf_hw *hw = &adapter->hw; 1280 u32 *dw; 1281 u16 i; 1282 1283 dw = (u32 *)adapter->rss_key; 1284 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1285 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1286 1287 dw = (u32 *)adapter->rss_lut; 1288 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1289 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1290 1291 iavf_flush(hw); 1292 1293 return 0; 1294 } 1295 1296 /** 1297 * iavf_config_rss - Configure RSS keys and lut 1298 * @adapter: board private structure 1299 * 1300 * Returns 0 on success, negative on failure 1301 **/ 1302 int iavf_config_rss(struct iavf_adapter *adapter) 1303 { 1304 1305 if (RSS_PF(adapter)) { 1306 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1307 IAVF_FLAG_AQ_SET_RSS_KEY; 1308 return 0; 1309 } else if (RSS_AQ(adapter)) { 1310 return iavf_config_rss_aq(adapter); 1311 } else { 1312 return iavf_config_rss_reg(adapter); 1313 } 1314 } 1315 1316 /** 1317 * iavf_fill_rss_lut - Fill the lut with default values 1318 * @adapter: board private structure 1319 **/ 1320 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1321 { 1322 u16 i; 1323 1324 for (i = 0; i < adapter->rss_lut_size; i++) 1325 adapter->rss_lut[i] = i % adapter->num_active_queues; 1326 } 1327 1328 /** 1329 * iavf_init_rss - Prepare for RSS 1330 * @adapter: board private structure 1331 * 1332 * Return 0 on success, negative on failure 1333 **/ 1334 static int iavf_init_rss(struct iavf_adapter *adapter) 1335 { 1336 struct iavf_hw *hw = &adapter->hw; 1337 int ret; 1338 1339 if (!RSS_PF(adapter)) { 1340 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1341 if (adapter->vf_res->vf_cap_flags & 1342 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1343 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1344 else 1345 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1346 1347 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1348 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1349 } 1350 1351 iavf_fill_rss_lut(adapter); 1352 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1353 ret = iavf_config_rss(adapter); 1354 1355 return ret; 1356 } 1357 1358 /** 1359 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1360 * @adapter: board private structure to initialize 1361 * 1362 * We allocate one q_vector per queue interrupt. If allocation fails we 1363 * return -ENOMEM. 1364 **/ 1365 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1366 { 1367 int q_idx = 0, num_q_vectors; 1368 struct iavf_q_vector *q_vector; 1369 1370 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1371 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1372 GFP_KERNEL); 1373 if (!adapter->q_vectors) 1374 return -ENOMEM; 1375 1376 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1377 q_vector = &adapter->q_vectors[q_idx]; 1378 q_vector->adapter = adapter; 1379 q_vector->vsi = &adapter->vsi; 1380 q_vector->v_idx = q_idx; 1381 q_vector->reg_idx = q_idx; 1382 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1383 netif_napi_add(adapter->netdev, &q_vector->napi, 1384 iavf_napi_poll, NAPI_POLL_WEIGHT); 1385 } 1386 1387 return 0; 1388 } 1389 1390 /** 1391 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1392 * @adapter: board private structure to initialize 1393 * 1394 * This function frees the memory allocated to the q_vectors. In addition if 1395 * NAPI is enabled it will delete any references to the NAPI struct prior 1396 * to freeing the q_vector. 1397 **/ 1398 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1399 { 1400 int q_idx, num_q_vectors; 1401 int napi_vectors; 1402 1403 if (!adapter->q_vectors) 1404 return; 1405 1406 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1407 napi_vectors = adapter->num_active_queues; 1408 1409 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1410 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1411 1412 if (q_idx < napi_vectors) 1413 netif_napi_del(&q_vector->napi); 1414 } 1415 kfree(adapter->q_vectors); 1416 adapter->q_vectors = NULL; 1417 } 1418 1419 /** 1420 * iavf_reset_interrupt_capability - Reset MSIX setup 1421 * @adapter: board private structure 1422 * 1423 **/ 1424 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1425 { 1426 if (!adapter->msix_entries) 1427 return; 1428 1429 pci_disable_msix(adapter->pdev); 1430 kfree(adapter->msix_entries); 1431 adapter->msix_entries = NULL; 1432 } 1433 1434 /** 1435 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1436 * @adapter: board private structure to initialize 1437 * 1438 **/ 1439 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1440 { 1441 int err; 1442 1443 err = iavf_alloc_queues(adapter); 1444 if (err) { 1445 dev_err(&adapter->pdev->dev, 1446 "Unable to allocate memory for queues\n"); 1447 goto err_alloc_queues; 1448 } 1449 1450 rtnl_lock(); 1451 err = iavf_set_interrupt_capability(adapter); 1452 rtnl_unlock(); 1453 if (err) { 1454 dev_err(&adapter->pdev->dev, 1455 "Unable to setup interrupt capabilities\n"); 1456 goto err_set_interrupt; 1457 } 1458 1459 err = iavf_alloc_q_vectors(adapter); 1460 if (err) { 1461 dev_err(&adapter->pdev->dev, 1462 "Unable to allocate memory for queue vectors\n"); 1463 goto err_alloc_q_vectors; 1464 } 1465 1466 /* If we've made it so far while ADq flag being ON, then we haven't 1467 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1468 * resources have been allocated in the reset path. 1469 * Now we can truly claim that ADq is enabled. 1470 */ 1471 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1472 adapter->num_tc) 1473 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1474 adapter->num_tc); 1475 1476 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1477 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1478 adapter->num_active_queues); 1479 1480 return 0; 1481 err_alloc_q_vectors: 1482 iavf_reset_interrupt_capability(adapter); 1483 err_set_interrupt: 1484 iavf_free_queues(adapter); 1485 err_alloc_queues: 1486 return err; 1487 } 1488 1489 /** 1490 * iavf_free_rss - Free memory used by RSS structs 1491 * @adapter: board private structure 1492 **/ 1493 static void iavf_free_rss(struct iavf_adapter *adapter) 1494 { 1495 kfree(adapter->rss_key); 1496 adapter->rss_key = NULL; 1497 1498 kfree(adapter->rss_lut); 1499 adapter->rss_lut = NULL; 1500 } 1501 1502 /** 1503 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1504 * @adapter: board private structure 1505 * 1506 * Returns 0 on success, negative on failure 1507 **/ 1508 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1509 { 1510 struct net_device *netdev = adapter->netdev; 1511 int err; 1512 1513 if (netif_running(netdev)) 1514 iavf_free_traffic_irqs(adapter); 1515 iavf_free_misc_irq(adapter); 1516 iavf_reset_interrupt_capability(adapter); 1517 iavf_free_q_vectors(adapter); 1518 iavf_free_queues(adapter); 1519 1520 err = iavf_init_interrupt_scheme(adapter); 1521 if (err) 1522 goto err; 1523 1524 netif_tx_stop_all_queues(netdev); 1525 1526 err = iavf_request_misc_irq(adapter); 1527 if (err) 1528 goto err; 1529 1530 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1531 1532 iavf_map_rings_to_vectors(adapter); 1533 err: 1534 return err; 1535 } 1536 1537 /** 1538 * iavf_process_aq_command - process aq_required flags 1539 * and sends aq command 1540 * @adapter: pointer to iavf adapter structure 1541 * 1542 * Returns 0 on success 1543 * Returns error code if no command was sent 1544 * or error code if the command failed. 1545 **/ 1546 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1547 { 1548 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1549 return iavf_send_vf_config_msg(adapter); 1550 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1551 iavf_disable_queues(adapter); 1552 return 0; 1553 } 1554 1555 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1556 iavf_map_queues(adapter); 1557 return 0; 1558 } 1559 1560 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1561 iavf_add_ether_addrs(adapter); 1562 return 0; 1563 } 1564 1565 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1566 iavf_add_vlans(adapter); 1567 return 0; 1568 } 1569 1570 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1571 iavf_del_ether_addrs(adapter); 1572 return 0; 1573 } 1574 1575 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1576 iavf_del_vlans(adapter); 1577 return 0; 1578 } 1579 1580 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1581 iavf_enable_vlan_stripping(adapter); 1582 return 0; 1583 } 1584 1585 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1586 iavf_disable_vlan_stripping(adapter); 1587 return 0; 1588 } 1589 1590 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1591 iavf_configure_queues(adapter); 1592 return 0; 1593 } 1594 1595 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1596 iavf_enable_queues(adapter); 1597 return 0; 1598 } 1599 1600 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1601 /* This message goes straight to the firmware, not the 1602 * PF, so we don't have to set current_op as we will 1603 * not get a response through the ARQ. 1604 */ 1605 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1606 return 0; 1607 } 1608 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1609 iavf_get_hena(adapter); 1610 return 0; 1611 } 1612 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1613 iavf_set_hena(adapter); 1614 return 0; 1615 } 1616 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1617 iavf_set_rss_key(adapter); 1618 return 0; 1619 } 1620 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1621 iavf_set_rss_lut(adapter); 1622 return 0; 1623 } 1624 1625 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1626 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1627 FLAG_VF_MULTICAST_PROMISC); 1628 return 0; 1629 } 1630 1631 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1632 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1633 return 0; 1634 } 1635 1636 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1637 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1638 iavf_set_promiscuous(adapter, 0); 1639 return 0; 1640 } 1641 1642 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1643 iavf_enable_channels(adapter); 1644 return 0; 1645 } 1646 1647 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1648 iavf_disable_channels(adapter); 1649 return 0; 1650 } 1651 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1652 iavf_add_cloud_filter(adapter); 1653 return 0; 1654 } 1655 1656 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1657 iavf_del_cloud_filter(adapter); 1658 return 0; 1659 } 1660 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1661 iavf_del_cloud_filter(adapter); 1662 return 0; 1663 } 1664 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1665 iavf_add_cloud_filter(adapter); 1666 return 0; 1667 } 1668 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1669 iavf_add_fdir_filter(adapter); 1670 return IAVF_SUCCESS; 1671 } 1672 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1673 iavf_del_fdir_filter(adapter); 1674 return IAVF_SUCCESS; 1675 } 1676 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1677 iavf_add_adv_rss_cfg(adapter); 1678 return 0; 1679 } 1680 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1681 iavf_del_adv_rss_cfg(adapter); 1682 return 0; 1683 } 1684 return -EAGAIN; 1685 } 1686 1687 /** 1688 * iavf_startup - first step of driver startup 1689 * @adapter: board private structure 1690 * 1691 * Function process __IAVF_STARTUP driver state. 1692 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1693 * when fails it returns -EAGAIN 1694 **/ 1695 static int iavf_startup(struct iavf_adapter *adapter) 1696 { 1697 struct pci_dev *pdev = adapter->pdev; 1698 struct iavf_hw *hw = &adapter->hw; 1699 int err; 1700 1701 WARN_ON(adapter->state != __IAVF_STARTUP); 1702 1703 /* driver loaded, probe complete */ 1704 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1705 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1706 err = iavf_set_mac_type(hw); 1707 if (err) { 1708 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1709 goto err; 1710 } 1711 1712 err = iavf_check_reset_complete(hw); 1713 if (err) { 1714 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1715 err); 1716 goto err; 1717 } 1718 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1719 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1720 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1721 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1722 1723 err = iavf_init_adminq(hw); 1724 if (err) { 1725 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1726 goto err; 1727 } 1728 err = iavf_send_api_ver(adapter); 1729 if (err) { 1730 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1731 iavf_shutdown_adminq(hw); 1732 goto err; 1733 } 1734 adapter->state = __IAVF_INIT_VERSION_CHECK; 1735 err: 1736 return err; 1737 } 1738 1739 /** 1740 * iavf_init_version_check - second step of driver startup 1741 * @adapter: board private structure 1742 * 1743 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1744 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1745 * when fails it returns -EAGAIN 1746 **/ 1747 static int iavf_init_version_check(struct iavf_adapter *adapter) 1748 { 1749 struct pci_dev *pdev = adapter->pdev; 1750 struct iavf_hw *hw = &adapter->hw; 1751 int err = -EAGAIN; 1752 1753 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1754 1755 if (!iavf_asq_done(hw)) { 1756 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1757 iavf_shutdown_adminq(hw); 1758 adapter->state = __IAVF_STARTUP; 1759 goto err; 1760 } 1761 1762 /* aq msg sent, awaiting reply */ 1763 err = iavf_verify_api_ver(adapter); 1764 if (err) { 1765 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1766 err = iavf_send_api_ver(adapter); 1767 else 1768 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1769 adapter->pf_version.major, 1770 adapter->pf_version.minor, 1771 VIRTCHNL_VERSION_MAJOR, 1772 VIRTCHNL_VERSION_MINOR); 1773 goto err; 1774 } 1775 err = iavf_send_vf_config_msg(adapter); 1776 if (err) { 1777 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1778 err); 1779 goto err; 1780 } 1781 adapter->state = __IAVF_INIT_GET_RESOURCES; 1782 1783 err: 1784 return err; 1785 } 1786 1787 /** 1788 * iavf_init_get_resources - third step of driver startup 1789 * @adapter: board private structure 1790 * 1791 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1792 * finishes driver initialization procedure. 1793 * When success the state is changed to __IAVF_DOWN 1794 * when fails it returns -EAGAIN 1795 **/ 1796 static int iavf_init_get_resources(struct iavf_adapter *adapter) 1797 { 1798 struct net_device *netdev = adapter->netdev; 1799 struct pci_dev *pdev = adapter->pdev; 1800 struct iavf_hw *hw = &adapter->hw; 1801 int err; 1802 1803 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1804 /* aq msg sent, awaiting reply */ 1805 if (!adapter->vf_res) { 1806 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1807 GFP_KERNEL); 1808 if (!adapter->vf_res) { 1809 err = -ENOMEM; 1810 goto err; 1811 } 1812 } 1813 err = iavf_get_vf_config(adapter); 1814 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1815 err = iavf_send_vf_config_msg(adapter); 1816 goto err; 1817 } else if (err == IAVF_ERR_PARAM) { 1818 /* We only get ERR_PARAM if the device is in a very bad 1819 * state or if we've been disabled for previous bad 1820 * behavior. Either way, we're done now. 1821 */ 1822 iavf_shutdown_adminq(hw); 1823 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1824 return 0; 1825 } 1826 if (err) { 1827 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1828 goto err_alloc; 1829 } 1830 1831 err = iavf_process_config(adapter); 1832 if (err) 1833 goto err_alloc; 1834 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1835 1836 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1837 1838 netdev->netdev_ops = &iavf_netdev_ops; 1839 iavf_set_ethtool_ops(netdev); 1840 netdev->watchdog_timeo = 5 * HZ; 1841 1842 /* MTU range: 68 - 9710 */ 1843 netdev->min_mtu = ETH_MIN_MTU; 1844 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1845 1846 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1847 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1848 adapter->hw.mac.addr); 1849 eth_hw_addr_random(netdev); 1850 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1851 } else { 1852 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 1853 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1854 } 1855 1856 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1857 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1858 err = iavf_init_interrupt_scheme(adapter); 1859 if (err) 1860 goto err_sw_init; 1861 iavf_map_rings_to_vectors(adapter); 1862 if (adapter->vf_res->vf_cap_flags & 1863 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1864 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1865 1866 err = iavf_request_misc_irq(adapter); 1867 if (err) 1868 goto err_sw_init; 1869 1870 netif_carrier_off(netdev); 1871 adapter->link_up = false; 1872 1873 /* set the semaphore to prevent any callbacks after device registration 1874 * up to time when state of driver will be set to __IAVF_DOWN 1875 */ 1876 rtnl_lock(); 1877 if (!adapter->netdev_registered) { 1878 err = register_netdevice(netdev); 1879 if (err) { 1880 rtnl_unlock(); 1881 goto err_register; 1882 } 1883 } 1884 1885 adapter->netdev_registered = true; 1886 1887 netif_tx_stop_all_queues(netdev); 1888 if (CLIENT_ALLOWED(adapter)) { 1889 err = iavf_lan_add_device(adapter); 1890 if (err) 1891 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1892 err); 1893 } 1894 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1895 if (netdev->features & NETIF_F_GRO) 1896 dev_info(&pdev->dev, "GRO is enabled\n"); 1897 1898 adapter->state = __IAVF_DOWN; 1899 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1900 rtnl_unlock(); 1901 1902 iavf_misc_irq_enable(adapter); 1903 wake_up(&adapter->down_waitqueue); 1904 1905 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1906 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1907 if (!adapter->rss_key || !adapter->rss_lut) { 1908 err = -ENOMEM; 1909 goto err_mem; 1910 } 1911 if (RSS_AQ(adapter)) 1912 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1913 else 1914 iavf_init_rss(adapter); 1915 1916 return err; 1917 err_mem: 1918 iavf_free_rss(adapter); 1919 err_register: 1920 iavf_free_misc_irq(adapter); 1921 err_sw_init: 1922 iavf_reset_interrupt_capability(adapter); 1923 err_alloc: 1924 kfree(adapter->vf_res); 1925 adapter->vf_res = NULL; 1926 err: 1927 return err; 1928 } 1929 1930 /** 1931 * iavf_watchdog_task - Periodic call-back task 1932 * @work: pointer to work_struct 1933 **/ 1934 static void iavf_watchdog_task(struct work_struct *work) 1935 { 1936 struct iavf_adapter *adapter = container_of(work, 1937 struct iavf_adapter, 1938 watchdog_task.work); 1939 struct iavf_hw *hw = &adapter->hw; 1940 u32 reg_val; 1941 1942 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1943 goto restart_watchdog; 1944 1945 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1946 adapter->state = __IAVF_COMM_FAILED; 1947 1948 switch (adapter->state) { 1949 case __IAVF_COMM_FAILED: 1950 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 1951 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1952 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 1953 reg_val == VIRTCHNL_VFR_COMPLETED) { 1954 /* A chance for redemption! */ 1955 dev_err(&adapter->pdev->dev, 1956 "Hardware came out of reset. Attempting reinit.\n"); 1957 adapter->state = __IAVF_STARTUP; 1958 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1959 queue_delayed_work(iavf_wq, &adapter->init_task, 10); 1960 clear_bit(__IAVF_IN_CRITICAL_TASK, 1961 &adapter->crit_section); 1962 /* Don't reschedule the watchdog, since we've restarted 1963 * the init task. When init_task contacts the PF and 1964 * gets everything set up again, it'll restart the 1965 * watchdog for us. Down, boy. Sit. Stay. Woof. 1966 */ 1967 return; 1968 } 1969 adapter->aq_required = 0; 1970 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1971 clear_bit(__IAVF_IN_CRITICAL_TASK, 1972 &adapter->crit_section); 1973 queue_delayed_work(iavf_wq, 1974 &adapter->watchdog_task, 1975 msecs_to_jiffies(10)); 1976 goto watchdog_done; 1977 case __IAVF_RESETTING: 1978 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1979 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1980 return; 1981 case __IAVF_DOWN: 1982 case __IAVF_DOWN_PENDING: 1983 case __IAVF_TESTING: 1984 case __IAVF_RUNNING: 1985 if (adapter->current_op) { 1986 if (!iavf_asq_done(hw)) { 1987 dev_dbg(&adapter->pdev->dev, 1988 "Admin queue timeout\n"); 1989 iavf_send_api_ver(adapter); 1990 } 1991 } else { 1992 /* An error will be returned if no commands were 1993 * processed; use this opportunity to update stats 1994 */ 1995 if (iavf_process_aq_command(adapter) && 1996 adapter->state == __IAVF_RUNNING) 1997 iavf_request_stats(adapter); 1998 } 1999 break; 2000 case __IAVF_REMOVE: 2001 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2002 return; 2003 default: 2004 goto restart_watchdog; 2005 } 2006 2007 /* check for hw reset */ 2008 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2009 if (!reg_val) { 2010 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2011 adapter->aq_required = 0; 2012 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2013 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2014 queue_work(iavf_wq, &adapter->reset_task); 2015 goto watchdog_done; 2016 } 2017 2018 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2019 watchdog_done: 2020 if (adapter->state == __IAVF_RUNNING || 2021 adapter->state == __IAVF_COMM_FAILED) 2022 iavf_detect_recover_hung(&adapter->vsi); 2023 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2024 restart_watchdog: 2025 if (adapter->aq_required) 2026 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2027 msecs_to_jiffies(20)); 2028 else 2029 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2030 queue_work(iavf_wq, &adapter->adminq_task); 2031 } 2032 2033 static void iavf_disable_vf(struct iavf_adapter *adapter) 2034 { 2035 struct iavf_mac_filter *f, *ftmp; 2036 struct iavf_vlan_filter *fv, *fvtmp; 2037 struct iavf_cloud_filter *cf, *cftmp; 2038 2039 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2040 2041 /* We don't use netif_running() because it may be true prior to 2042 * ndo_open() returning, so we can't assume it means all our open 2043 * tasks have finished, since we're not holding the rtnl_lock here. 2044 */ 2045 if (adapter->state == __IAVF_RUNNING) { 2046 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2047 netif_carrier_off(adapter->netdev); 2048 netif_tx_disable(adapter->netdev); 2049 adapter->link_up = false; 2050 iavf_napi_disable_all(adapter); 2051 iavf_irq_disable(adapter); 2052 iavf_free_traffic_irqs(adapter); 2053 iavf_free_all_tx_resources(adapter); 2054 iavf_free_all_rx_resources(adapter); 2055 } 2056 2057 spin_lock_bh(&adapter->mac_vlan_list_lock); 2058 2059 /* Delete all of the filters */ 2060 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2061 list_del(&f->list); 2062 kfree(f); 2063 } 2064 2065 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2066 list_del(&fv->list); 2067 kfree(fv); 2068 } 2069 2070 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2071 2072 spin_lock_bh(&adapter->cloud_filter_list_lock); 2073 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2074 list_del(&cf->list); 2075 kfree(cf); 2076 adapter->num_cloud_filters--; 2077 } 2078 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2079 2080 iavf_free_misc_irq(adapter); 2081 iavf_reset_interrupt_capability(adapter); 2082 iavf_free_queues(adapter); 2083 iavf_free_q_vectors(adapter); 2084 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2085 iavf_shutdown_adminq(&adapter->hw); 2086 adapter->netdev->flags &= ~IFF_UP; 2087 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2088 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2089 adapter->state = __IAVF_DOWN; 2090 wake_up(&adapter->down_waitqueue); 2091 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2092 } 2093 2094 /** 2095 * iavf_reset_task - Call-back task to handle hardware reset 2096 * @work: pointer to work_struct 2097 * 2098 * During reset we need to shut down and reinitialize the admin queue 2099 * before we can use it to communicate with the PF again. We also clear 2100 * and reinit the rings because that context is lost as well. 2101 **/ 2102 static void iavf_reset_task(struct work_struct *work) 2103 { 2104 struct iavf_adapter *adapter = container_of(work, 2105 struct iavf_adapter, 2106 reset_task); 2107 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2108 struct net_device *netdev = adapter->netdev; 2109 struct iavf_hw *hw = &adapter->hw; 2110 struct iavf_mac_filter *f, *ftmp; 2111 struct iavf_vlan_filter *vlf; 2112 struct iavf_cloud_filter *cf; 2113 u32 reg_val; 2114 int i = 0, err; 2115 bool running; 2116 2117 /* When device is being removed it doesn't make sense to run the reset 2118 * task, just return in such a case. 2119 */ 2120 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 2121 return; 2122 2123 if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) { 2124 schedule_work(&adapter->reset_task); 2125 return; 2126 } 2127 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, 2128 &adapter->crit_section)) 2129 usleep_range(500, 1000); 2130 if (CLIENT_ENABLED(adapter)) { 2131 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2132 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2133 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2134 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2135 cancel_delayed_work_sync(&adapter->client_task); 2136 iavf_notify_client_close(&adapter->vsi, true); 2137 } 2138 iavf_misc_irq_disable(adapter); 2139 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2140 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2141 /* Restart the AQ here. If we have been reset but didn't 2142 * detect it, or if the PF had to reinit, our AQ will be hosed. 2143 */ 2144 iavf_shutdown_adminq(hw); 2145 iavf_init_adminq(hw); 2146 iavf_request_reset(adapter); 2147 } 2148 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2149 2150 /* poll until we see the reset actually happen */ 2151 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2152 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2153 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2154 if (!reg_val) 2155 break; 2156 usleep_range(5000, 10000); 2157 } 2158 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2159 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2160 goto continue_reset; /* act like the reset happened */ 2161 } 2162 2163 /* wait until the reset is complete and the PF is responding to us */ 2164 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2165 /* sleep first to make sure a minimum wait time is met */ 2166 msleep(IAVF_RESET_WAIT_MS); 2167 2168 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2169 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2170 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2171 break; 2172 } 2173 2174 pci_set_master(adapter->pdev); 2175 2176 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2177 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2178 reg_val); 2179 iavf_disable_vf(adapter); 2180 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2181 return; /* Do not attempt to reinit. It's dead, Jim. */ 2182 } 2183 2184 continue_reset: 2185 /* We don't use netif_running() because it may be true prior to 2186 * ndo_open() returning, so we can't assume it means all our open 2187 * tasks have finished, since we're not holding the rtnl_lock here. 2188 */ 2189 running = ((adapter->state == __IAVF_RUNNING) || 2190 (adapter->state == __IAVF_RESETTING)); 2191 2192 if (running) { 2193 netif_carrier_off(netdev); 2194 netif_tx_stop_all_queues(netdev); 2195 adapter->link_up = false; 2196 iavf_napi_disable_all(adapter); 2197 } 2198 iavf_irq_disable(adapter); 2199 2200 adapter->state = __IAVF_RESETTING; 2201 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2202 2203 /* free the Tx/Rx rings and descriptors, might be better to just 2204 * re-use them sometime in the future 2205 */ 2206 iavf_free_all_rx_resources(adapter); 2207 iavf_free_all_tx_resources(adapter); 2208 2209 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2210 /* kill and reinit the admin queue */ 2211 iavf_shutdown_adminq(hw); 2212 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2213 err = iavf_init_adminq(hw); 2214 if (err) 2215 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2216 err); 2217 adapter->aq_required = 0; 2218 2219 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2220 err = iavf_reinit_interrupt_scheme(adapter); 2221 if (err) 2222 goto reset_err; 2223 } 2224 2225 if (RSS_AQ(adapter)) { 2226 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2227 } else { 2228 err = iavf_init_rss(adapter); 2229 if (err) 2230 goto reset_err; 2231 } 2232 2233 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2234 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2235 2236 spin_lock_bh(&adapter->mac_vlan_list_lock); 2237 2238 /* Delete filter for the current MAC address, it could have 2239 * been changed by the PF via administratively set MAC. 2240 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2241 */ 2242 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2243 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2244 list_del(&f->list); 2245 kfree(f); 2246 } 2247 } 2248 /* re-add all MAC filters */ 2249 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2250 f->add = true; 2251 } 2252 /* re-add all VLAN filters */ 2253 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2254 vlf->add = true; 2255 } 2256 2257 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2258 2259 /* check if TCs are running and re-add all cloud filters */ 2260 spin_lock_bh(&adapter->cloud_filter_list_lock); 2261 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2262 adapter->num_tc) { 2263 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2264 cf->add = true; 2265 } 2266 } 2267 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2268 2269 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2270 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2271 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2272 iavf_misc_irq_enable(adapter); 2273 2274 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2275 2276 /* We were running when the reset started, so we need to restore some 2277 * state here. 2278 */ 2279 if (running) { 2280 /* allocate transmit descriptors */ 2281 err = iavf_setup_all_tx_resources(adapter); 2282 if (err) 2283 goto reset_err; 2284 2285 /* allocate receive descriptors */ 2286 err = iavf_setup_all_rx_resources(adapter); 2287 if (err) 2288 goto reset_err; 2289 2290 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2291 err = iavf_request_traffic_irqs(adapter, netdev->name); 2292 if (err) 2293 goto reset_err; 2294 2295 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2296 } 2297 2298 iavf_configure(adapter); 2299 2300 iavf_up_complete(adapter); 2301 2302 iavf_irq_enable(adapter, true); 2303 } else { 2304 adapter->state = __IAVF_DOWN; 2305 wake_up(&adapter->down_waitqueue); 2306 } 2307 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2308 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2309 2310 return; 2311 reset_err: 2312 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2313 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2314 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2315 iavf_close(netdev); 2316 } 2317 2318 /** 2319 * iavf_adminq_task - worker thread to clean the admin queue 2320 * @work: pointer to work_struct containing our data 2321 **/ 2322 static void iavf_adminq_task(struct work_struct *work) 2323 { 2324 struct iavf_adapter *adapter = 2325 container_of(work, struct iavf_adapter, adminq_task); 2326 struct iavf_hw *hw = &adapter->hw; 2327 struct iavf_arq_event_info event; 2328 enum virtchnl_ops v_op; 2329 enum iavf_status ret, v_ret; 2330 u32 val, oldval; 2331 u16 pending; 2332 2333 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2334 goto out; 2335 2336 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2337 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2338 if (!event.msg_buf) 2339 goto out; 2340 2341 if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) 2342 goto freedom; 2343 do { 2344 ret = iavf_clean_arq_element(hw, &event, &pending); 2345 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2346 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2347 2348 if (ret || !v_op) 2349 break; /* No event to process or error cleaning ARQ */ 2350 2351 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2352 event.msg_len); 2353 if (pending != 0) 2354 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2355 } while (pending); 2356 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2357 2358 if ((adapter->flags & 2359 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2360 adapter->state == __IAVF_RESETTING) 2361 goto freedom; 2362 2363 /* check for error indications */ 2364 val = rd32(hw, hw->aq.arq.len); 2365 if (val == 0xdeadbeef) /* indicates device in reset */ 2366 goto freedom; 2367 oldval = val; 2368 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2369 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2370 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2371 } 2372 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2373 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2374 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2375 } 2376 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2377 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2378 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2379 } 2380 if (oldval != val) 2381 wr32(hw, hw->aq.arq.len, val); 2382 2383 val = rd32(hw, hw->aq.asq.len); 2384 oldval = val; 2385 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2386 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2387 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2388 } 2389 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2390 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2391 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2392 } 2393 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2394 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2395 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2396 } 2397 if (oldval != val) 2398 wr32(hw, hw->aq.asq.len, val); 2399 2400 freedom: 2401 kfree(event.msg_buf); 2402 out: 2403 /* re-enable Admin queue interrupt cause */ 2404 iavf_misc_irq_enable(adapter); 2405 } 2406 2407 /** 2408 * iavf_client_task - worker thread to perform client work 2409 * @work: pointer to work_struct containing our data 2410 * 2411 * This task handles client interactions. Because client calls can be 2412 * reentrant, we can't handle them in the watchdog. 2413 **/ 2414 static void iavf_client_task(struct work_struct *work) 2415 { 2416 struct iavf_adapter *adapter = 2417 container_of(work, struct iavf_adapter, client_task.work); 2418 2419 /* If we can't get the client bit, just give up. We'll be rescheduled 2420 * later. 2421 */ 2422 2423 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) 2424 return; 2425 2426 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2427 iavf_client_subtask(adapter); 2428 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2429 goto out; 2430 } 2431 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2432 iavf_notify_client_l2_params(&adapter->vsi); 2433 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2434 goto out; 2435 } 2436 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2437 iavf_notify_client_close(&adapter->vsi, false); 2438 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2439 goto out; 2440 } 2441 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2442 iavf_notify_client_open(&adapter->vsi); 2443 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2444 } 2445 out: 2446 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2447 } 2448 2449 /** 2450 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2451 * @adapter: board private structure 2452 * 2453 * Free all transmit software resources 2454 **/ 2455 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2456 { 2457 int i; 2458 2459 if (!adapter->tx_rings) 2460 return; 2461 2462 for (i = 0; i < adapter->num_active_queues; i++) 2463 if (adapter->tx_rings[i].desc) 2464 iavf_free_tx_resources(&adapter->tx_rings[i]); 2465 } 2466 2467 /** 2468 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2469 * @adapter: board private structure 2470 * 2471 * If this function returns with an error, then it's possible one or 2472 * more of the rings is populated (while the rest are not). It is the 2473 * callers duty to clean those orphaned rings. 2474 * 2475 * Return 0 on success, negative on failure 2476 **/ 2477 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2478 { 2479 int i, err = 0; 2480 2481 for (i = 0; i < adapter->num_active_queues; i++) { 2482 adapter->tx_rings[i].count = adapter->tx_desc_count; 2483 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2484 if (!err) 2485 continue; 2486 dev_err(&adapter->pdev->dev, 2487 "Allocation for Tx Queue %u failed\n", i); 2488 break; 2489 } 2490 2491 return err; 2492 } 2493 2494 /** 2495 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2496 * @adapter: board private structure 2497 * 2498 * If this function returns with an error, then it's possible one or 2499 * more of the rings is populated (while the rest are not). It is the 2500 * callers duty to clean those orphaned rings. 2501 * 2502 * Return 0 on success, negative on failure 2503 **/ 2504 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2505 { 2506 int i, err = 0; 2507 2508 for (i = 0; i < adapter->num_active_queues; i++) { 2509 adapter->rx_rings[i].count = adapter->rx_desc_count; 2510 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2511 if (!err) 2512 continue; 2513 dev_err(&adapter->pdev->dev, 2514 "Allocation for Rx Queue %u failed\n", i); 2515 break; 2516 } 2517 return err; 2518 } 2519 2520 /** 2521 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2522 * @adapter: board private structure 2523 * 2524 * Free all receive software resources 2525 **/ 2526 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2527 { 2528 int i; 2529 2530 if (!adapter->rx_rings) 2531 return; 2532 2533 for (i = 0; i < adapter->num_active_queues; i++) 2534 if (adapter->rx_rings[i].desc) 2535 iavf_free_rx_resources(&adapter->rx_rings[i]); 2536 } 2537 2538 /** 2539 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2540 * @adapter: board private structure 2541 * @max_tx_rate: max Tx bw for a tc 2542 **/ 2543 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2544 u64 max_tx_rate) 2545 { 2546 int speed = 0, ret = 0; 2547 2548 if (ADV_LINK_SUPPORT(adapter)) { 2549 if (adapter->link_speed_mbps < U32_MAX) { 2550 speed = adapter->link_speed_mbps; 2551 goto validate_bw; 2552 } else { 2553 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2554 return -EINVAL; 2555 } 2556 } 2557 2558 switch (adapter->link_speed) { 2559 case VIRTCHNL_LINK_SPEED_40GB: 2560 speed = SPEED_40000; 2561 break; 2562 case VIRTCHNL_LINK_SPEED_25GB: 2563 speed = SPEED_25000; 2564 break; 2565 case VIRTCHNL_LINK_SPEED_20GB: 2566 speed = SPEED_20000; 2567 break; 2568 case VIRTCHNL_LINK_SPEED_10GB: 2569 speed = SPEED_10000; 2570 break; 2571 case VIRTCHNL_LINK_SPEED_5GB: 2572 speed = SPEED_5000; 2573 break; 2574 case VIRTCHNL_LINK_SPEED_2_5GB: 2575 speed = SPEED_2500; 2576 break; 2577 case VIRTCHNL_LINK_SPEED_1GB: 2578 speed = SPEED_1000; 2579 break; 2580 case VIRTCHNL_LINK_SPEED_100MB: 2581 speed = SPEED_100; 2582 break; 2583 default: 2584 break; 2585 } 2586 2587 validate_bw: 2588 if (max_tx_rate > speed) { 2589 dev_err(&adapter->pdev->dev, 2590 "Invalid tx rate specified\n"); 2591 ret = -EINVAL; 2592 } 2593 2594 return ret; 2595 } 2596 2597 /** 2598 * iavf_validate_ch_config - validate queue mapping info 2599 * @adapter: board private structure 2600 * @mqprio_qopt: queue parameters 2601 * 2602 * This function validates if the config provided by the user to 2603 * configure queue channels is valid or not. Returns 0 on a valid 2604 * config. 2605 **/ 2606 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2607 struct tc_mqprio_qopt_offload *mqprio_qopt) 2608 { 2609 u64 total_max_rate = 0; 2610 int i, num_qps = 0; 2611 u64 tx_rate = 0; 2612 int ret = 0; 2613 2614 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2615 mqprio_qopt->qopt.num_tc < 1) 2616 return -EINVAL; 2617 2618 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2619 if (!mqprio_qopt->qopt.count[i] || 2620 mqprio_qopt->qopt.offset[i] != num_qps) 2621 return -EINVAL; 2622 if (mqprio_qopt->min_rate[i]) { 2623 dev_err(&adapter->pdev->dev, 2624 "Invalid min tx rate (greater than 0) specified\n"); 2625 return -EINVAL; 2626 } 2627 /*convert to Mbps */ 2628 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2629 IAVF_MBPS_DIVISOR); 2630 total_max_rate += tx_rate; 2631 num_qps += mqprio_qopt->qopt.count[i]; 2632 } 2633 if (num_qps > IAVF_MAX_REQ_QUEUES) 2634 return -EINVAL; 2635 2636 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2637 return ret; 2638 } 2639 2640 /** 2641 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2642 * @adapter: board private structure 2643 **/ 2644 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2645 { 2646 struct iavf_cloud_filter *cf, *cftmp; 2647 2648 spin_lock_bh(&adapter->cloud_filter_list_lock); 2649 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2650 list) { 2651 list_del(&cf->list); 2652 kfree(cf); 2653 adapter->num_cloud_filters--; 2654 } 2655 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2656 } 2657 2658 /** 2659 * __iavf_setup_tc - configure multiple traffic classes 2660 * @netdev: network interface device structure 2661 * @type_data: tc offload data 2662 * 2663 * This function processes the config information provided by the 2664 * user to configure traffic classes/queue channels and packages the 2665 * information to request the PF to setup traffic classes. 2666 * 2667 * Returns 0 on success. 2668 **/ 2669 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2670 { 2671 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2672 struct iavf_adapter *adapter = netdev_priv(netdev); 2673 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2674 u8 num_tc = 0, total_qps = 0; 2675 int ret = 0, netdev_tc = 0; 2676 u64 max_tx_rate; 2677 u16 mode; 2678 int i; 2679 2680 num_tc = mqprio_qopt->qopt.num_tc; 2681 mode = mqprio_qopt->mode; 2682 2683 /* delete queue_channel */ 2684 if (!mqprio_qopt->qopt.hw) { 2685 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2686 /* reset the tc configuration */ 2687 netdev_reset_tc(netdev); 2688 adapter->num_tc = 0; 2689 netif_tx_stop_all_queues(netdev); 2690 netif_tx_disable(netdev); 2691 iavf_del_all_cloud_filters(adapter); 2692 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2693 goto exit; 2694 } else { 2695 return -EINVAL; 2696 } 2697 } 2698 2699 /* add queue channel */ 2700 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2701 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2702 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2703 return -EOPNOTSUPP; 2704 } 2705 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2706 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2707 return -EINVAL; 2708 } 2709 2710 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2711 if (ret) 2712 return ret; 2713 /* Return if same TC config is requested */ 2714 if (adapter->num_tc == num_tc) 2715 return 0; 2716 adapter->num_tc = num_tc; 2717 2718 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2719 if (i < num_tc) { 2720 adapter->ch_config.ch_info[i].count = 2721 mqprio_qopt->qopt.count[i]; 2722 adapter->ch_config.ch_info[i].offset = 2723 mqprio_qopt->qopt.offset[i]; 2724 total_qps += mqprio_qopt->qopt.count[i]; 2725 max_tx_rate = mqprio_qopt->max_rate[i]; 2726 /* convert to Mbps */ 2727 max_tx_rate = div_u64(max_tx_rate, 2728 IAVF_MBPS_DIVISOR); 2729 adapter->ch_config.ch_info[i].max_tx_rate = 2730 max_tx_rate; 2731 } else { 2732 adapter->ch_config.ch_info[i].count = 1; 2733 adapter->ch_config.ch_info[i].offset = 0; 2734 } 2735 } 2736 adapter->ch_config.total_qps = total_qps; 2737 netif_tx_stop_all_queues(netdev); 2738 netif_tx_disable(netdev); 2739 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2740 netdev_reset_tc(netdev); 2741 /* Report the tc mapping up the stack */ 2742 netdev_set_num_tc(adapter->netdev, num_tc); 2743 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2744 u16 qcount = mqprio_qopt->qopt.count[i]; 2745 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2746 2747 if (i < num_tc) 2748 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2749 qoffset); 2750 } 2751 } 2752 exit: 2753 return ret; 2754 } 2755 2756 /** 2757 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2758 * @adapter: board private structure 2759 * @f: pointer to struct flow_cls_offload 2760 * @filter: pointer to cloud filter structure 2761 */ 2762 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2763 struct flow_cls_offload *f, 2764 struct iavf_cloud_filter *filter) 2765 { 2766 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2767 struct flow_dissector *dissector = rule->match.dissector; 2768 u16 n_proto_mask = 0; 2769 u16 n_proto_key = 0; 2770 u8 field_flags = 0; 2771 u16 addr_type = 0; 2772 u16 n_proto = 0; 2773 int i = 0; 2774 struct virtchnl_filter *vf = &filter->f; 2775 2776 if (dissector->used_keys & 2777 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2778 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2779 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2780 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2781 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2782 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2783 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2784 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2785 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2786 dissector->used_keys); 2787 return -EOPNOTSUPP; 2788 } 2789 2790 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2791 struct flow_match_enc_keyid match; 2792 2793 flow_rule_match_enc_keyid(rule, &match); 2794 if (match.mask->keyid != 0) 2795 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2796 } 2797 2798 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2799 struct flow_match_basic match; 2800 2801 flow_rule_match_basic(rule, &match); 2802 n_proto_key = ntohs(match.key->n_proto); 2803 n_proto_mask = ntohs(match.mask->n_proto); 2804 2805 if (n_proto_key == ETH_P_ALL) { 2806 n_proto_key = 0; 2807 n_proto_mask = 0; 2808 } 2809 n_proto = n_proto_key & n_proto_mask; 2810 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2811 return -EINVAL; 2812 if (n_proto == ETH_P_IPV6) { 2813 /* specify flow type as TCP IPv6 */ 2814 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2815 } 2816 2817 if (match.key->ip_proto != IPPROTO_TCP) { 2818 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2819 return -EINVAL; 2820 } 2821 } 2822 2823 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2824 struct flow_match_eth_addrs match; 2825 2826 flow_rule_match_eth_addrs(rule, &match); 2827 2828 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2829 if (!is_zero_ether_addr(match.mask->dst)) { 2830 if (is_broadcast_ether_addr(match.mask->dst)) { 2831 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2832 } else { 2833 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2834 match.mask->dst); 2835 return IAVF_ERR_CONFIG; 2836 } 2837 } 2838 2839 if (!is_zero_ether_addr(match.mask->src)) { 2840 if (is_broadcast_ether_addr(match.mask->src)) { 2841 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2842 } else { 2843 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2844 match.mask->src); 2845 return IAVF_ERR_CONFIG; 2846 } 2847 } 2848 2849 if (!is_zero_ether_addr(match.key->dst)) 2850 if (is_valid_ether_addr(match.key->dst) || 2851 is_multicast_ether_addr(match.key->dst)) { 2852 /* set the mask if a valid dst_mac address */ 2853 for (i = 0; i < ETH_ALEN; i++) 2854 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2855 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2856 match.key->dst); 2857 } 2858 2859 if (!is_zero_ether_addr(match.key->src)) 2860 if (is_valid_ether_addr(match.key->src) || 2861 is_multicast_ether_addr(match.key->src)) { 2862 /* set the mask if a valid dst_mac address */ 2863 for (i = 0; i < ETH_ALEN; i++) 2864 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2865 ether_addr_copy(vf->data.tcp_spec.src_mac, 2866 match.key->src); 2867 } 2868 } 2869 2870 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2871 struct flow_match_vlan match; 2872 2873 flow_rule_match_vlan(rule, &match); 2874 if (match.mask->vlan_id) { 2875 if (match.mask->vlan_id == VLAN_VID_MASK) { 2876 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2877 } else { 2878 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2879 match.mask->vlan_id); 2880 return IAVF_ERR_CONFIG; 2881 } 2882 } 2883 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2884 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2885 } 2886 2887 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2888 struct flow_match_control match; 2889 2890 flow_rule_match_control(rule, &match); 2891 addr_type = match.key->addr_type; 2892 } 2893 2894 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2895 struct flow_match_ipv4_addrs match; 2896 2897 flow_rule_match_ipv4_addrs(rule, &match); 2898 if (match.mask->dst) { 2899 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2900 field_flags |= IAVF_CLOUD_FIELD_IIP; 2901 } else { 2902 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2903 be32_to_cpu(match.mask->dst)); 2904 return IAVF_ERR_CONFIG; 2905 } 2906 } 2907 2908 if (match.mask->src) { 2909 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2910 field_flags |= IAVF_CLOUD_FIELD_IIP; 2911 } else { 2912 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2913 be32_to_cpu(match.mask->dst)); 2914 return IAVF_ERR_CONFIG; 2915 } 2916 } 2917 2918 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2919 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2920 return IAVF_ERR_CONFIG; 2921 } 2922 if (match.key->dst) { 2923 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2924 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2925 } 2926 if (match.key->src) { 2927 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2928 vf->data.tcp_spec.src_ip[0] = match.key->src; 2929 } 2930 } 2931 2932 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2933 struct flow_match_ipv6_addrs match; 2934 2935 flow_rule_match_ipv6_addrs(rule, &match); 2936 2937 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2938 if (ipv6_addr_any(&match.mask->dst)) { 2939 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2940 IPV6_ADDR_ANY); 2941 return IAVF_ERR_CONFIG; 2942 } 2943 2944 /* src and dest IPv6 address should not be LOOPBACK 2945 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2946 */ 2947 if (ipv6_addr_loopback(&match.key->dst) || 2948 ipv6_addr_loopback(&match.key->src)) { 2949 dev_err(&adapter->pdev->dev, 2950 "ipv6 addr should not be loopback\n"); 2951 return IAVF_ERR_CONFIG; 2952 } 2953 if (!ipv6_addr_any(&match.mask->dst) || 2954 !ipv6_addr_any(&match.mask->src)) 2955 field_flags |= IAVF_CLOUD_FIELD_IIP; 2956 2957 for (i = 0; i < 4; i++) 2958 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 2959 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 2960 sizeof(vf->data.tcp_spec.dst_ip)); 2961 for (i = 0; i < 4; i++) 2962 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 2963 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 2964 sizeof(vf->data.tcp_spec.src_ip)); 2965 } 2966 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2967 struct flow_match_ports match; 2968 2969 flow_rule_match_ports(rule, &match); 2970 if (match.mask->src) { 2971 if (match.mask->src == cpu_to_be16(0xffff)) { 2972 field_flags |= IAVF_CLOUD_FIELD_IIP; 2973 } else { 2974 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 2975 be16_to_cpu(match.mask->src)); 2976 return IAVF_ERR_CONFIG; 2977 } 2978 } 2979 2980 if (match.mask->dst) { 2981 if (match.mask->dst == cpu_to_be16(0xffff)) { 2982 field_flags |= IAVF_CLOUD_FIELD_IIP; 2983 } else { 2984 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 2985 be16_to_cpu(match.mask->dst)); 2986 return IAVF_ERR_CONFIG; 2987 } 2988 } 2989 if (match.key->dst) { 2990 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 2991 vf->data.tcp_spec.dst_port = match.key->dst; 2992 } 2993 2994 if (match.key->src) { 2995 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 2996 vf->data.tcp_spec.src_port = match.key->src; 2997 } 2998 } 2999 vf->field_flags = field_flags; 3000 3001 return 0; 3002 } 3003 3004 /** 3005 * iavf_handle_tclass - Forward to a traffic class on the device 3006 * @adapter: board private structure 3007 * @tc: traffic class index on the device 3008 * @filter: pointer to cloud filter structure 3009 */ 3010 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3011 struct iavf_cloud_filter *filter) 3012 { 3013 if (tc == 0) 3014 return 0; 3015 if (tc < adapter->num_tc) { 3016 if (!filter->f.data.tcp_spec.dst_port) { 3017 dev_err(&adapter->pdev->dev, 3018 "Specify destination port to redirect to traffic class other than TC0\n"); 3019 return -EINVAL; 3020 } 3021 } 3022 /* redirect to a traffic class on the same device */ 3023 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3024 filter->f.action_meta = tc; 3025 return 0; 3026 } 3027 3028 /** 3029 * iavf_configure_clsflower - Add tc flower filters 3030 * @adapter: board private structure 3031 * @cls_flower: Pointer to struct flow_cls_offload 3032 */ 3033 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3034 struct flow_cls_offload *cls_flower) 3035 { 3036 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3037 struct iavf_cloud_filter *filter = NULL; 3038 int err = -EINVAL, count = 50; 3039 3040 if (tc < 0) { 3041 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3042 return -EINVAL; 3043 } 3044 3045 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3046 if (!filter) 3047 return -ENOMEM; 3048 3049 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3050 &adapter->crit_section)) { 3051 if (--count == 0) 3052 goto err; 3053 udelay(1); 3054 } 3055 3056 filter->cookie = cls_flower->cookie; 3057 3058 /* set the mask to all zeroes to begin with */ 3059 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3060 /* start out with flow type and eth type IPv4 to begin with */ 3061 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3062 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3063 if (err < 0) 3064 goto err; 3065 3066 err = iavf_handle_tclass(adapter, tc, filter); 3067 if (err < 0) 3068 goto err; 3069 3070 /* add filter to the list */ 3071 spin_lock_bh(&adapter->cloud_filter_list_lock); 3072 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3073 adapter->num_cloud_filters++; 3074 filter->add = true; 3075 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3076 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3077 err: 3078 if (err) 3079 kfree(filter); 3080 3081 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3082 return err; 3083 } 3084 3085 /* iavf_find_cf - Find the cloud filter in the list 3086 * @adapter: Board private structure 3087 * @cookie: filter specific cookie 3088 * 3089 * Returns ptr to the filter object or NULL. Must be called while holding the 3090 * cloud_filter_list_lock. 3091 */ 3092 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3093 unsigned long *cookie) 3094 { 3095 struct iavf_cloud_filter *filter = NULL; 3096 3097 if (!cookie) 3098 return NULL; 3099 3100 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3101 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3102 return filter; 3103 } 3104 return NULL; 3105 } 3106 3107 /** 3108 * iavf_delete_clsflower - Remove tc flower filters 3109 * @adapter: board private structure 3110 * @cls_flower: Pointer to struct flow_cls_offload 3111 */ 3112 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3113 struct flow_cls_offload *cls_flower) 3114 { 3115 struct iavf_cloud_filter *filter = NULL; 3116 int err = 0; 3117 3118 spin_lock_bh(&adapter->cloud_filter_list_lock); 3119 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3120 if (filter) { 3121 filter->del = true; 3122 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3123 } else { 3124 err = -EINVAL; 3125 } 3126 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3127 3128 return err; 3129 } 3130 3131 /** 3132 * iavf_setup_tc_cls_flower - flower classifier offloads 3133 * @adapter: board private structure 3134 * @cls_flower: pointer to flow_cls_offload struct with flow info 3135 */ 3136 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3137 struct flow_cls_offload *cls_flower) 3138 { 3139 switch (cls_flower->command) { 3140 case FLOW_CLS_REPLACE: 3141 return iavf_configure_clsflower(adapter, cls_flower); 3142 case FLOW_CLS_DESTROY: 3143 return iavf_delete_clsflower(adapter, cls_flower); 3144 case FLOW_CLS_STATS: 3145 return -EOPNOTSUPP; 3146 default: 3147 return -EOPNOTSUPP; 3148 } 3149 } 3150 3151 /** 3152 * iavf_setup_tc_block_cb - block callback for tc 3153 * @type: type of offload 3154 * @type_data: offload data 3155 * @cb_priv: 3156 * 3157 * This function is the block callback for traffic classes 3158 **/ 3159 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3160 void *cb_priv) 3161 { 3162 struct iavf_adapter *adapter = cb_priv; 3163 3164 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3165 return -EOPNOTSUPP; 3166 3167 switch (type) { 3168 case TC_SETUP_CLSFLOWER: 3169 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3170 default: 3171 return -EOPNOTSUPP; 3172 } 3173 } 3174 3175 static LIST_HEAD(iavf_block_cb_list); 3176 3177 /** 3178 * iavf_setup_tc - configure multiple traffic classes 3179 * @netdev: network interface device structure 3180 * @type: type of offload 3181 * @type_data: tc offload data 3182 * 3183 * This function is the callback to ndo_setup_tc in the 3184 * netdev_ops. 3185 * 3186 * Returns 0 on success 3187 **/ 3188 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3189 void *type_data) 3190 { 3191 struct iavf_adapter *adapter = netdev_priv(netdev); 3192 3193 switch (type) { 3194 case TC_SETUP_QDISC_MQPRIO: 3195 return __iavf_setup_tc(netdev, type_data); 3196 case TC_SETUP_BLOCK: 3197 return flow_block_cb_setup_simple(type_data, 3198 &iavf_block_cb_list, 3199 iavf_setup_tc_block_cb, 3200 adapter, adapter, true); 3201 default: 3202 return -EOPNOTSUPP; 3203 } 3204 } 3205 3206 /** 3207 * iavf_open - Called when a network interface is made active 3208 * @netdev: network interface device structure 3209 * 3210 * Returns 0 on success, negative value on failure 3211 * 3212 * The open entry point is called when a network interface is made 3213 * active by the system (IFF_UP). At this point all resources needed 3214 * for transmit and receive operations are allocated, the interrupt 3215 * handler is registered with the OS, the watchdog is started, 3216 * and the stack is notified that the interface is ready. 3217 **/ 3218 static int iavf_open(struct net_device *netdev) 3219 { 3220 struct iavf_adapter *adapter = netdev_priv(netdev); 3221 int err; 3222 3223 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3224 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3225 return -EIO; 3226 } 3227 3228 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3229 &adapter->crit_section)) 3230 usleep_range(500, 1000); 3231 3232 if (adapter->state != __IAVF_DOWN) { 3233 err = -EBUSY; 3234 goto err_unlock; 3235 } 3236 3237 /* allocate transmit descriptors */ 3238 err = iavf_setup_all_tx_resources(adapter); 3239 if (err) 3240 goto err_setup_tx; 3241 3242 /* allocate receive descriptors */ 3243 err = iavf_setup_all_rx_resources(adapter); 3244 if (err) 3245 goto err_setup_rx; 3246 3247 /* clear any pending interrupts, may auto mask */ 3248 err = iavf_request_traffic_irqs(adapter, netdev->name); 3249 if (err) 3250 goto err_req_irq; 3251 3252 spin_lock_bh(&adapter->mac_vlan_list_lock); 3253 3254 iavf_add_filter(adapter, adapter->hw.mac.addr); 3255 3256 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3257 3258 iavf_configure(adapter); 3259 3260 iavf_up_complete(adapter); 3261 3262 iavf_irq_enable(adapter, true); 3263 3264 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3265 3266 return 0; 3267 3268 err_req_irq: 3269 iavf_down(adapter); 3270 iavf_free_traffic_irqs(adapter); 3271 err_setup_rx: 3272 iavf_free_all_rx_resources(adapter); 3273 err_setup_tx: 3274 iavf_free_all_tx_resources(adapter); 3275 err_unlock: 3276 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3277 3278 return err; 3279 } 3280 3281 /** 3282 * iavf_close - Disables a network interface 3283 * @netdev: network interface device structure 3284 * 3285 * Returns 0, this is not allowed to fail 3286 * 3287 * The close entry point is called when an interface is de-activated 3288 * by the OS. The hardware is still under the drivers control, but 3289 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3290 * are freed, along with all transmit and receive resources. 3291 **/ 3292 static int iavf_close(struct net_device *netdev) 3293 { 3294 struct iavf_adapter *adapter = netdev_priv(netdev); 3295 int status; 3296 3297 if (adapter->state <= __IAVF_DOWN_PENDING) 3298 return 0; 3299 3300 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3301 &adapter->crit_section)) 3302 usleep_range(500, 1000); 3303 3304 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3305 if (CLIENT_ENABLED(adapter)) 3306 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3307 3308 iavf_down(adapter); 3309 adapter->state = __IAVF_DOWN_PENDING; 3310 iavf_free_traffic_irqs(adapter); 3311 3312 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3313 3314 /* We explicitly don't free resources here because the hardware is 3315 * still active and can DMA into memory. Resources are cleared in 3316 * iavf_virtchnl_completion() after we get confirmation from the PF 3317 * driver that the rings have been stopped. 3318 * 3319 * Also, we wait for state to transition to __IAVF_DOWN before 3320 * returning. State change occurs in iavf_virtchnl_completion() after 3321 * VF resources are released (which occurs after PF driver processes and 3322 * responds to admin queue commands). 3323 */ 3324 3325 status = wait_event_timeout(adapter->down_waitqueue, 3326 adapter->state == __IAVF_DOWN, 3327 msecs_to_jiffies(500)); 3328 if (!status) 3329 netdev_warn(netdev, "Device resources not yet released\n"); 3330 return 0; 3331 } 3332 3333 /** 3334 * iavf_change_mtu - Change the Maximum Transfer Unit 3335 * @netdev: network interface device structure 3336 * @new_mtu: new value for maximum frame size 3337 * 3338 * Returns 0 on success, negative on failure 3339 **/ 3340 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3341 { 3342 struct iavf_adapter *adapter = netdev_priv(netdev); 3343 3344 netdev->mtu = new_mtu; 3345 if (CLIENT_ENABLED(adapter)) { 3346 iavf_notify_client_l2_params(&adapter->vsi); 3347 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3348 } 3349 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3350 queue_work(iavf_wq, &adapter->reset_task); 3351 3352 return 0; 3353 } 3354 3355 /** 3356 * iavf_set_features - set the netdev feature flags 3357 * @netdev: ptr to the netdev being adjusted 3358 * @features: the feature set that the stack is suggesting 3359 * Note: expects to be called while under rtnl_lock() 3360 **/ 3361 static int iavf_set_features(struct net_device *netdev, 3362 netdev_features_t features) 3363 { 3364 struct iavf_adapter *adapter = netdev_priv(netdev); 3365 3366 /* Don't allow changing VLAN_RX flag when adapter is not capable 3367 * of VLAN offload 3368 */ 3369 if (!VLAN_ALLOWED(adapter)) { 3370 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3371 return -EINVAL; 3372 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3373 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3374 adapter->aq_required |= 3375 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3376 else 3377 adapter->aq_required |= 3378 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3379 } 3380 3381 return 0; 3382 } 3383 3384 /** 3385 * iavf_features_check - Validate encapsulated packet conforms to limits 3386 * @skb: skb buff 3387 * @dev: This physical port's netdev 3388 * @features: Offload features that the stack believes apply 3389 **/ 3390 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3391 struct net_device *dev, 3392 netdev_features_t features) 3393 { 3394 size_t len; 3395 3396 /* No point in doing any of this if neither checksum nor GSO are 3397 * being requested for this frame. We can rule out both by just 3398 * checking for CHECKSUM_PARTIAL 3399 */ 3400 if (skb->ip_summed != CHECKSUM_PARTIAL) 3401 return features; 3402 3403 /* We cannot support GSO if the MSS is going to be less than 3404 * 64 bytes. If it is then we need to drop support for GSO. 3405 */ 3406 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3407 features &= ~NETIF_F_GSO_MASK; 3408 3409 /* MACLEN can support at most 63 words */ 3410 len = skb_network_header(skb) - skb->data; 3411 if (len & ~(63 * 2)) 3412 goto out_err; 3413 3414 /* IPLEN and EIPLEN can support at most 127 dwords */ 3415 len = skb_transport_header(skb) - skb_network_header(skb); 3416 if (len & ~(127 * 4)) 3417 goto out_err; 3418 3419 if (skb->encapsulation) { 3420 /* L4TUNLEN can support 127 words */ 3421 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3422 if (len & ~(127 * 2)) 3423 goto out_err; 3424 3425 /* IPLEN can support at most 127 dwords */ 3426 len = skb_inner_transport_header(skb) - 3427 skb_inner_network_header(skb); 3428 if (len & ~(127 * 4)) 3429 goto out_err; 3430 } 3431 3432 /* No need to validate L4LEN as TCP is the only protocol with a 3433 * a flexible value and we support all possible values supported 3434 * by TCP, which is at most 15 dwords 3435 */ 3436 3437 return features; 3438 out_err: 3439 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3440 } 3441 3442 /** 3443 * iavf_fix_features - fix up the netdev feature bits 3444 * @netdev: our net device 3445 * @features: desired feature bits 3446 * 3447 * Returns fixed-up features bits 3448 **/ 3449 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3450 netdev_features_t features) 3451 { 3452 struct iavf_adapter *adapter = netdev_priv(netdev); 3453 3454 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3455 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3456 NETIF_F_HW_VLAN_CTAG_RX | 3457 NETIF_F_HW_VLAN_CTAG_FILTER); 3458 3459 return features; 3460 } 3461 3462 static const struct net_device_ops iavf_netdev_ops = { 3463 .ndo_open = iavf_open, 3464 .ndo_stop = iavf_close, 3465 .ndo_start_xmit = iavf_xmit_frame, 3466 .ndo_set_rx_mode = iavf_set_rx_mode, 3467 .ndo_validate_addr = eth_validate_addr, 3468 .ndo_set_mac_address = iavf_set_mac, 3469 .ndo_change_mtu = iavf_change_mtu, 3470 .ndo_tx_timeout = iavf_tx_timeout, 3471 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3472 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3473 .ndo_features_check = iavf_features_check, 3474 .ndo_fix_features = iavf_fix_features, 3475 .ndo_set_features = iavf_set_features, 3476 .ndo_setup_tc = iavf_setup_tc, 3477 }; 3478 3479 /** 3480 * iavf_check_reset_complete - check that VF reset is complete 3481 * @hw: pointer to hw struct 3482 * 3483 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3484 **/ 3485 static int iavf_check_reset_complete(struct iavf_hw *hw) 3486 { 3487 u32 rstat; 3488 int i; 3489 3490 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3491 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3492 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3493 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3494 (rstat == VIRTCHNL_VFR_COMPLETED)) 3495 return 0; 3496 usleep_range(10, 20); 3497 } 3498 return -EBUSY; 3499 } 3500 3501 /** 3502 * iavf_process_config - Process the config information we got from the PF 3503 * @adapter: board private structure 3504 * 3505 * Verify that we have a valid config struct, and set up our netdev features 3506 * and our VSI struct. 3507 **/ 3508 int iavf_process_config(struct iavf_adapter *adapter) 3509 { 3510 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3511 int i, num_req_queues = adapter->num_req_queues; 3512 struct net_device *netdev = adapter->netdev; 3513 struct iavf_vsi *vsi = &adapter->vsi; 3514 netdev_features_t hw_enc_features; 3515 netdev_features_t hw_features; 3516 3517 /* got VF config message back from PF, now we can parse it */ 3518 for (i = 0; i < vfres->num_vsis; i++) { 3519 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3520 adapter->vsi_res = &vfres->vsi_res[i]; 3521 } 3522 if (!adapter->vsi_res) { 3523 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3524 return -ENODEV; 3525 } 3526 3527 if (num_req_queues && 3528 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3529 /* Problem. The PF gave us fewer queues than what we had 3530 * negotiated in our request. Need a reset to see if we can't 3531 * get back to a working state. 3532 */ 3533 dev_err(&adapter->pdev->dev, 3534 "Requested %d queues, but PF only gave us %d.\n", 3535 num_req_queues, 3536 adapter->vsi_res->num_queue_pairs); 3537 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3538 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3539 iavf_schedule_reset(adapter); 3540 return -ENODEV; 3541 } 3542 adapter->num_req_queues = 0; 3543 3544 hw_enc_features = NETIF_F_SG | 3545 NETIF_F_IP_CSUM | 3546 NETIF_F_IPV6_CSUM | 3547 NETIF_F_HIGHDMA | 3548 NETIF_F_SOFT_FEATURES | 3549 NETIF_F_TSO | 3550 NETIF_F_TSO_ECN | 3551 NETIF_F_TSO6 | 3552 NETIF_F_SCTP_CRC | 3553 NETIF_F_RXHASH | 3554 NETIF_F_RXCSUM | 3555 0; 3556 3557 /* advertise to stack only if offloads for encapsulated packets is 3558 * supported 3559 */ 3560 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3561 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3562 NETIF_F_GSO_GRE | 3563 NETIF_F_GSO_GRE_CSUM | 3564 NETIF_F_GSO_IPXIP4 | 3565 NETIF_F_GSO_IPXIP6 | 3566 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3567 NETIF_F_GSO_PARTIAL | 3568 0; 3569 3570 if (!(vfres->vf_cap_flags & 3571 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3572 netdev->gso_partial_features |= 3573 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3574 3575 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3576 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3577 netdev->hw_enc_features |= hw_enc_features; 3578 } 3579 /* record features VLANs can make use of */ 3580 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3581 3582 /* Write features and hw_features separately to avoid polluting 3583 * with, or dropping, features that are set when we registered. 3584 */ 3585 hw_features = hw_enc_features; 3586 3587 /* Enable VLAN features if supported */ 3588 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3589 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3590 NETIF_F_HW_VLAN_CTAG_RX); 3591 /* Enable cloud filter if ADQ is supported */ 3592 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3593 hw_features |= NETIF_F_HW_TC; 3594 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3595 hw_features |= NETIF_F_GSO_UDP_L4; 3596 3597 netdev->hw_features |= hw_features; 3598 3599 netdev->features |= hw_features; 3600 3601 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3602 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3603 3604 netdev->priv_flags |= IFF_UNICAST_FLT; 3605 3606 /* Do not turn on offloads when they are requested to be turned off. 3607 * TSO needs minimum 576 bytes to work correctly. 3608 */ 3609 if (netdev->wanted_features) { 3610 if (!(netdev->wanted_features & NETIF_F_TSO) || 3611 netdev->mtu < 576) 3612 netdev->features &= ~NETIF_F_TSO; 3613 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3614 netdev->mtu < 576) 3615 netdev->features &= ~NETIF_F_TSO6; 3616 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3617 netdev->features &= ~NETIF_F_TSO_ECN; 3618 if (!(netdev->wanted_features & NETIF_F_GRO)) 3619 netdev->features &= ~NETIF_F_GRO; 3620 if (!(netdev->wanted_features & NETIF_F_GSO)) 3621 netdev->features &= ~NETIF_F_GSO; 3622 } 3623 3624 adapter->vsi.id = adapter->vsi_res->vsi_id; 3625 3626 adapter->vsi.back = adapter; 3627 adapter->vsi.base_vector = 1; 3628 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3629 vsi->netdev = adapter->netdev; 3630 vsi->qs_handle = adapter->vsi_res->qset_handle; 3631 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3632 adapter->rss_key_size = vfres->rss_key_size; 3633 adapter->rss_lut_size = vfres->rss_lut_size; 3634 } else { 3635 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3636 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3637 } 3638 3639 return 0; 3640 } 3641 3642 /** 3643 * iavf_init_task - worker thread to perform delayed initialization 3644 * @work: pointer to work_struct containing our data 3645 * 3646 * This task completes the work that was begun in probe. Due to the nature 3647 * of VF-PF communications, we may need to wait tens of milliseconds to get 3648 * responses back from the PF. Rather than busy-wait in probe and bog down the 3649 * whole system, we'll do it in a task so we can sleep. 3650 * This task only runs during driver init. Once we've established 3651 * communications with the PF driver and set up our netdev, the watchdog 3652 * takes over. 3653 **/ 3654 static void iavf_init_task(struct work_struct *work) 3655 { 3656 struct iavf_adapter *adapter = container_of(work, 3657 struct iavf_adapter, 3658 init_task.work); 3659 struct iavf_hw *hw = &adapter->hw; 3660 3661 if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) { 3662 dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__); 3663 return; 3664 } 3665 switch (adapter->state) { 3666 case __IAVF_STARTUP: 3667 if (iavf_startup(adapter) < 0) 3668 goto init_failed; 3669 break; 3670 case __IAVF_INIT_VERSION_CHECK: 3671 if (iavf_init_version_check(adapter) < 0) 3672 goto init_failed; 3673 break; 3674 case __IAVF_INIT_GET_RESOURCES: 3675 if (iavf_init_get_resources(adapter) < 0) 3676 goto init_failed; 3677 goto out; 3678 default: 3679 goto init_failed; 3680 } 3681 3682 queue_delayed_work(iavf_wq, &adapter->init_task, 3683 msecs_to_jiffies(30)); 3684 goto out; 3685 init_failed: 3686 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 3687 dev_err(&adapter->pdev->dev, 3688 "Failed to communicate with PF; waiting before retry\n"); 3689 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3690 iavf_shutdown_adminq(hw); 3691 adapter->state = __IAVF_STARTUP; 3692 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); 3693 goto out; 3694 } 3695 queue_delayed_work(iavf_wq, &adapter->init_task, HZ); 3696 out: 3697 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3698 } 3699 3700 /** 3701 * iavf_shutdown - Shutdown the device in preparation for a reboot 3702 * @pdev: pci device structure 3703 **/ 3704 static void iavf_shutdown(struct pci_dev *pdev) 3705 { 3706 struct net_device *netdev = pci_get_drvdata(pdev); 3707 struct iavf_adapter *adapter = netdev_priv(netdev); 3708 3709 netif_device_detach(netdev); 3710 3711 if (netif_running(netdev)) 3712 iavf_close(netdev); 3713 3714 if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) 3715 dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__); 3716 /* Prevent the watchdog from running. */ 3717 adapter->state = __IAVF_REMOVE; 3718 adapter->aq_required = 0; 3719 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3720 3721 #ifdef CONFIG_PM 3722 pci_save_state(pdev); 3723 3724 #endif 3725 pci_disable_device(pdev); 3726 } 3727 3728 /** 3729 * iavf_probe - Device Initialization Routine 3730 * @pdev: PCI device information struct 3731 * @ent: entry in iavf_pci_tbl 3732 * 3733 * Returns 0 on success, negative on failure 3734 * 3735 * iavf_probe initializes an adapter identified by a pci_dev structure. 3736 * The OS initialization, configuring of the adapter private structure, 3737 * and a hardware reset occur. 3738 **/ 3739 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3740 { 3741 struct net_device *netdev; 3742 struct iavf_adapter *adapter = NULL; 3743 struct iavf_hw *hw = NULL; 3744 int err; 3745 3746 err = pci_enable_device(pdev); 3747 if (err) 3748 return err; 3749 3750 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3751 if (err) { 3752 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3753 if (err) { 3754 dev_err(&pdev->dev, 3755 "DMA configuration failed: 0x%x\n", err); 3756 goto err_dma; 3757 } 3758 } 3759 3760 err = pci_request_regions(pdev, iavf_driver_name); 3761 if (err) { 3762 dev_err(&pdev->dev, 3763 "pci_request_regions failed 0x%x\n", err); 3764 goto err_pci_reg; 3765 } 3766 3767 pci_enable_pcie_error_reporting(pdev); 3768 3769 pci_set_master(pdev); 3770 3771 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3772 IAVF_MAX_REQ_QUEUES); 3773 if (!netdev) { 3774 err = -ENOMEM; 3775 goto err_alloc_etherdev; 3776 } 3777 3778 SET_NETDEV_DEV(netdev, &pdev->dev); 3779 3780 pci_set_drvdata(pdev, netdev); 3781 adapter = netdev_priv(netdev); 3782 3783 adapter->netdev = netdev; 3784 adapter->pdev = pdev; 3785 3786 hw = &adapter->hw; 3787 hw->back = adapter; 3788 3789 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3790 adapter->state = __IAVF_STARTUP; 3791 3792 /* Call save state here because it relies on the adapter struct. */ 3793 pci_save_state(pdev); 3794 3795 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3796 pci_resource_len(pdev, 0)); 3797 if (!hw->hw_addr) { 3798 err = -EIO; 3799 goto err_ioremap; 3800 } 3801 hw->vendor_id = pdev->vendor; 3802 hw->device_id = pdev->device; 3803 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3804 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3805 hw->subsystem_device_id = pdev->subsystem_device; 3806 hw->bus.device = PCI_SLOT(pdev->devfn); 3807 hw->bus.func = PCI_FUNC(pdev->devfn); 3808 hw->bus.bus_id = pdev->bus->number; 3809 3810 /* set up the locks for the AQ, do this only once in probe 3811 * and destroy them only once in remove 3812 */ 3813 mutex_init(&hw->aq.asq_mutex); 3814 mutex_init(&hw->aq.arq_mutex); 3815 3816 spin_lock_init(&adapter->mac_vlan_list_lock); 3817 spin_lock_init(&adapter->cloud_filter_list_lock); 3818 spin_lock_init(&adapter->fdir_fltr_lock); 3819 spin_lock_init(&adapter->adv_rss_lock); 3820 3821 INIT_LIST_HEAD(&adapter->mac_filter_list); 3822 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3823 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3824 INIT_LIST_HEAD(&adapter->fdir_list_head); 3825 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3826 3827 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3828 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3829 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3830 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3831 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); 3832 queue_delayed_work(iavf_wq, &adapter->init_task, 3833 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3834 3835 /* Setup the wait queue for indicating transition to down status */ 3836 init_waitqueue_head(&adapter->down_waitqueue); 3837 3838 return 0; 3839 3840 err_ioremap: 3841 free_netdev(netdev); 3842 err_alloc_etherdev: 3843 pci_disable_pcie_error_reporting(pdev); 3844 pci_release_regions(pdev); 3845 err_pci_reg: 3846 err_dma: 3847 pci_disable_device(pdev); 3848 return err; 3849 } 3850 3851 /** 3852 * iavf_suspend - Power management suspend routine 3853 * @dev_d: device info pointer 3854 * 3855 * Called when the system (VM) is entering sleep/suspend. 3856 **/ 3857 static int __maybe_unused iavf_suspend(struct device *dev_d) 3858 { 3859 struct net_device *netdev = dev_get_drvdata(dev_d); 3860 struct iavf_adapter *adapter = netdev_priv(netdev); 3861 3862 netif_device_detach(netdev); 3863 3864 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3865 &adapter->crit_section)) 3866 usleep_range(500, 1000); 3867 3868 if (netif_running(netdev)) { 3869 rtnl_lock(); 3870 iavf_down(adapter); 3871 rtnl_unlock(); 3872 } 3873 iavf_free_misc_irq(adapter); 3874 iavf_reset_interrupt_capability(adapter); 3875 3876 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3877 3878 return 0; 3879 } 3880 3881 /** 3882 * iavf_resume - Power management resume routine 3883 * @dev_d: device info pointer 3884 * 3885 * Called when the system (VM) is resumed from sleep/suspend. 3886 **/ 3887 static int __maybe_unused iavf_resume(struct device *dev_d) 3888 { 3889 struct pci_dev *pdev = to_pci_dev(dev_d); 3890 struct net_device *netdev = pci_get_drvdata(pdev); 3891 struct iavf_adapter *adapter = netdev_priv(netdev); 3892 u32 err; 3893 3894 pci_set_master(pdev); 3895 3896 rtnl_lock(); 3897 err = iavf_set_interrupt_capability(adapter); 3898 if (err) { 3899 rtnl_unlock(); 3900 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3901 return err; 3902 } 3903 err = iavf_request_misc_irq(adapter); 3904 rtnl_unlock(); 3905 if (err) { 3906 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3907 return err; 3908 } 3909 3910 queue_work(iavf_wq, &adapter->reset_task); 3911 3912 netif_device_attach(netdev); 3913 3914 return err; 3915 } 3916 3917 /** 3918 * iavf_remove - Device Removal Routine 3919 * @pdev: PCI device information struct 3920 * 3921 * iavf_remove is called by the PCI subsystem to alert the driver 3922 * that it should release a PCI device. The could be caused by a 3923 * Hot-Plug event, or because the driver is going to be removed from 3924 * memory. 3925 **/ 3926 static void iavf_remove(struct pci_dev *pdev) 3927 { 3928 struct net_device *netdev = pci_get_drvdata(pdev); 3929 struct iavf_adapter *adapter = netdev_priv(netdev); 3930 struct iavf_fdir_fltr *fdir, *fdirtmp; 3931 struct iavf_vlan_filter *vlf, *vlftmp; 3932 struct iavf_adv_rss *rss, *rsstmp; 3933 struct iavf_mac_filter *f, *ftmp; 3934 struct iavf_cloud_filter *cf, *cftmp; 3935 struct iavf_hw *hw = &adapter->hw; 3936 int err; 3937 /* Indicate we are in remove and not to run reset_task */ 3938 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); 3939 cancel_delayed_work_sync(&adapter->init_task); 3940 cancel_work_sync(&adapter->reset_task); 3941 cancel_delayed_work_sync(&adapter->client_task); 3942 if (adapter->netdev_registered) { 3943 unregister_netdev(netdev); 3944 adapter->netdev_registered = false; 3945 } 3946 if (CLIENT_ALLOWED(adapter)) { 3947 err = iavf_lan_del_device(adapter); 3948 if (err) 3949 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3950 err); 3951 } 3952 3953 iavf_request_reset(adapter); 3954 msleep(50); 3955 /* If the FW isn't responding, kick it once, but only once. */ 3956 if (!iavf_asq_done(hw)) { 3957 iavf_request_reset(adapter); 3958 msleep(50); 3959 } 3960 if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) 3961 dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__); 3962 3963 /* Shut down all the garbage mashers on the detention level */ 3964 adapter->state = __IAVF_REMOVE; 3965 adapter->aq_required = 0; 3966 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3967 iavf_free_all_tx_resources(adapter); 3968 iavf_free_all_rx_resources(adapter); 3969 iavf_misc_irq_disable(adapter); 3970 iavf_free_misc_irq(adapter); 3971 iavf_reset_interrupt_capability(adapter); 3972 iavf_free_q_vectors(adapter); 3973 3974 cancel_delayed_work_sync(&adapter->watchdog_task); 3975 3976 cancel_work_sync(&adapter->adminq_task); 3977 3978 iavf_free_rss(adapter); 3979 3980 if (hw->aq.asq.count) 3981 iavf_shutdown_adminq(hw); 3982 3983 /* destroy the locks only once, here */ 3984 mutex_destroy(&hw->aq.arq_mutex); 3985 mutex_destroy(&hw->aq.asq_mutex); 3986 3987 iounmap(hw->hw_addr); 3988 pci_release_regions(pdev); 3989 iavf_free_queues(adapter); 3990 kfree(adapter->vf_res); 3991 spin_lock_bh(&adapter->mac_vlan_list_lock); 3992 /* If we got removed before an up/down sequence, we've got a filter 3993 * hanging out there that we need to get rid of. 3994 */ 3995 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3996 list_del(&f->list); 3997 kfree(f); 3998 } 3999 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 4000 list) { 4001 list_del(&vlf->list); 4002 kfree(vlf); 4003 } 4004 4005 spin_unlock_bh(&adapter->mac_vlan_list_lock); 4006 4007 spin_lock_bh(&adapter->cloud_filter_list_lock); 4008 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 4009 list_del(&cf->list); 4010 kfree(cf); 4011 } 4012 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4013 4014 spin_lock_bh(&adapter->fdir_fltr_lock); 4015 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 4016 list_del(&fdir->list); 4017 kfree(fdir); 4018 } 4019 spin_unlock_bh(&adapter->fdir_fltr_lock); 4020 4021 spin_lock_bh(&adapter->adv_rss_lock); 4022 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 4023 list) { 4024 list_del(&rss->list); 4025 kfree(rss); 4026 } 4027 spin_unlock_bh(&adapter->adv_rss_lock); 4028 4029 free_netdev(netdev); 4030 4031 pci_disable_pcie_error_reporting(pdev); 4032 4033 pci_disable_device(pdev); 4034 } 4035 4036 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 4037 4038 static struct pci_driver iavf_driver = { 4039 .name = iavf_driver_name, 4040 .id_table = iavf_pci_tbl, 4041 .probe = iavf_probe, 4042 .remove = iavf_remove, 4043 .driver.pm = &iavf_pm_ops, 4044 .shutdown = iavf_shutdown, 4045 }; 4046 4047 /** 4048 * iavf_init_module - Driver Registration Routine 4049 * 4050 * iavf_init_module is the first routine called when the driver is 4051 * loaded. All it does is register with the PCI subsystem. 4052 **/ 4053 static int __init iavf_init_module(void) 4054 { 4055 int ret; 4056 4057 pr_info("iavf: %s\n", iavf_driver_string); 4058 4059 pr_info("%s\n", iavf_copyright); 4060 4061 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4062 iavf_driver_name); 4063 if (!iavf_wq) { 4064 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4065 return -ENOMEM; 4066 } 4067 ret = pci_register_driver(&iavf_driver); 4068 return ret; 4069 } 4070 4071 module_init(iavf_init_module); 4072 4073 /** 4074 * iavf_exit_module - Driver Exit Cleanup Routine 4075 * 4076 * iavf_exit_module is called just before the driver is removed 4077 * from memory. 4078 **/ 4079 static void __exit iavf_exit_module(void) 4080 { 4081 pci_unregister_driver(&iavf_driver); 4082 destroy_workqueue(iavf_wq); 4083 } 4084 4085 module_exit(iavf_exit_module); 4086 4087 /* iavf_main.c */ 4088