1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static void iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_pdev_to_adapter - go from pci_dev to adapter 56 * @pdev: pci_dev pointer 57 */ 58 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) 59 { 60 return netdev_priv(pci_get_drvdata(pdev)); 61 } 62 63 /** 64 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 65 * @hw: pointer to the HW structure 66 * @mem: ptr to mem struct to fill out 67 * @size: size of memory requested 68 * @alignment: what to align the allocation to 69 **/ 70 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 71 struct iavf_dma_mem *mem, 72 u64 size, u32 alignment) 73 { 74 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 75 76 if (!mem) 77 return IAVF_ERR_PARAM; 78 79 mem->size = ALIGN(size, alignment); 80 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 81 (dma_addr_t *)&mem->pa, GFP_KERNEL); 82 if (mem->va) 83 return 0; 84 else 85 return IAVF_ERR_NO_MEMORY; 86 } 87 88 /** 89 * iavf_free_dma_mem_d - OS specific memory free for shared code 90 * @hw: pointer to the HW structure 91 * @mem: ptr to mem struct to free 92 **/ 93 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 94 struct iavf_dma_mem *mem) 95 { 96 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 97 98 if (!mem || !mem->va) 99 return IAVF_ERR_PARAM; 100 dma_free_coherent(&adapter->pdev->dev, mem->size, 101 mem->va, (dma_addr_t)mem->pa); 102 return 0; 103 } 104 105 /** 106 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 107 * @hw: pointer to the HW structure 108 * @mem: ptr to mem struct to fill out 109 * @size: size of memory requested 110 **/ 111 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 112 struct iavf_virt_mem *mem, u32 size) 113 { 114 if (!mem) 115 return IAVF_ERR_PARAM; 116 117 mem->size = size; 118 mem->va = kzalloc(size, GFP_KERNEL); 119 120 if (mem->va) 121 return 0; 122 else 123 return IAVF_ERR_NO_MEMORY; 124 } 125 126 /** 127 * iavf_free_virt_mem_d - OS specific memory free for shared code 128 * @hw: pointer to the HW structure 129 * @mem: ptr to mem struct to free 130 **/ 131 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 132 struct iavf_virt_mem *mem) 133 { 134 if (!mem) 135 return IAVF_ERR_PARAM; 136 137 /* it's ok to kfree a NULL pointer */ 138 kfree(mem->va); 139 140 return 0; 141 } 142 143 /** 144 * iavf_lock_timeout - try to lock mutex but give up after timeout 145 * @lock: mutex that should be locked 146 * @msecs: timeout in msecs 147 * 148 * Returns 0 on success, negative on failure 149 **/ 150 static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) 151 { 152 unsigned int wait, delay = 10; 153 154 for (wait = 0; wait < msecs; wait += delay) { 155 if (mutex_trylock(lock)) 156 return 0; 157 158 msleep(delay); 159 } 160 161 return -1; 162 } 163 164 /** 165 * iavf_schedule_reset - Set the flags and schedule a reset event 166 * @adapter: board private structure 167 **/ 168 void iavf_schedule_reset(struct iavf_adapter *adapter) 169 { 170 if (!(adapter->flags & 171 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 172 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 173 queue_work(iavf_wq, &adapter->reset_task); 174 } 175 } 176 177 /** 178 * iavf_tx_timeout - Respond to a Tx Hang 179 * @netdev: network interface device structure 180 * @txqueue: queue number that is timing out 181 **/ 182 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 183 { 184 struct iavf_adapter *adapter = netdev_priv(netdev); 185 186 adapter->tx_timeout_count++; 187 iavf_schedule_reset(adapter); 188 } 189 190 /** 191 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 192 * @adapter: board private structure 193 **/ 194 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 195 { 196 struct iavf_hw *hw = &adapter->hw; 197 198 if (!adapter->msix_entries) 199 return; 200 201 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 202 203 iavf_flush(hw); 204 205 synchronize_irq(adapter->msix_entries[0].vector); 206 } 207 208 /** 209 * iavf_misc_irq_enable - Enable default interrupt generation settings 210 * @adapter: board private structure 211 **/ 212 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 213 { 214 struct iavf_hw *hw = &adapter->hw; 215 216 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 217 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 218 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 219 220 iavf_flush(hw); 221 } 222 223 /** 224 * iavf_irq_disable - Mask off interrupt generation on the NIC 225 * @adapter: board private structure 226 **/ 227 static void iavf_irq_disable(struct iavf_adapter *adapter) 228 { 229 int i; 230 struct iavf_hw *hw = &adapter->hw; 231 232 if (!adapter->msix_entries) 233 return; 234 235 for (i = 1; i < adapter->num_msix_vectors; i++) { 236 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 237 synchronize_irq(adapter->msix_entries[i].vector); 238 } 239 iavf_flush(hw); 240 } 241 242 /** 243 * iavf_irq_enable_queues - Enable interrupt for specified queues 244 * @adapter: board private structure 245 * @mask: bitmap of queues to enable 246 **/ 247 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 248 { 249 struct iavf_hw *hw = &adapter->hw; 250 int i; 251 252 for (i = 1; i < adapter->num_msix_vectors; i++) { 253 if (mask & BIT(i - 1)) { 254 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 255 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 256 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 257 } 258 } 259 } 260 261 /** 262 * iavf_irq_enable - Enable default interrupt generation settings 263 * @adapter: board private structure 264 * @flush: boolean value whether to run rd32() 265 **/ 266 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 267 { 268 struct iavf_hw *hw = &adapter->hw; 269 270 iavf_misc_irq_enable(adapter); 271 iavf_irq_enable_queues(adapter, ~0); 272 273 if (flush) 274 iavf_flush(hw); 275 } 276 277 /** 278 * iavf_msix_aq - Interrupt handler for vector 0 279 * @irq: interrupt number 280 * @data: pointer to netdev 281 **/ 282 static irqreturn_t iavf_msix_aq(int irq, void *data) 283 { 284 struct net_device *netdev = data; 285 struct iavf_adapter *adapter = netdev_priv(netdev); 286 struct iavf_hw *hw = &adapter->hw; 287 288 /* handle non-queue interrupts, these reads clear the registers */ 289 rd32(hw, IAVF_VFINT_ICR01); 290 rd32(hw, IAVF_VFINT_ICR0_ENA1); 291 292 /* schedule work on the private workqueue */ 293 queue_work(iavf_wq, &adapter->adminq_task); 294 295 return IRQ_HANDLED; 296 } 297 298 /** 299 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 300 * @irq: interrupt number 301 * @data: pointer to a q_vector 302 **/ 303 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 304 { 305 struct iavf_q_vector *q_vector = data; 306 307 if (!q_vector->tx.ring && !q_vector->rx.ring) 308 return IRQ_HANDLED; 309 310 napi_schedule_irqoff(&q_vector->napi); 311 312 return IRQ_HANDLED; 313 } 314 315 /** 316 * iavf_map_vector_to_rxq - associate irqs with rx queues 317 * @adapter: board private structure 318 * @v_idx: interrupt number 319 * @r_idx: queue number 320 **/ 321 static void 322 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 323 { 324 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 325 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 326 struct iavf_hw *hw = &adapter->hw; 327 328 rx_ring->q_vector = q_vector; 329 rx_ring->next = q_vector->rx.ring; 330 rx_ring->vsi = &adapter->vsi; 331 q_vector->rx.ring = rx_ring; 332 q_vector->rx.count++; 333 q_vector->rx.next_update = jiffies + 1; 334 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 335 q_vector->ring_mask |= BIT(r_idx); 336 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 337 q_vector->rx.current_itr >> 1); 338 q_vector->rx.current_itr = q_vector->rx.target_itr; 339 } 340 341 /** 342 * iavf_map_vector_to_txq - associate irqs with tx queues 343 * @adapter: board private structure 344 * @v_idx: interrupt number 345 * @t_idx: queue number 346 **/ 347 static void 348 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 349 { 350 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 351 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 352 struct iavf_hw *hw = &adapter->hw; 353 354 tx_ring->q_vector = q_vector; 355 tx_ring->next = q_vector->tx.ring; 356 tx_ring->vsi = &adapter->vsi; 357 q_vector->tx.ring = tx_ring; 358 q_vector->tx.count++; 359 q_vector->tx.next_update = jiffies + 1; 360 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 361 q_vector->num_ringpairs++; 362 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 363 q_vector->tx.target_itr >> 1); 364 q_vector->tx.current_itr = q_vector->tx.target_itr; 365 } 366 367 /** 368 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 369 * @adapter: board private structure to initialize 370 * 371 * This function maps descriptor rings to the queue-specific vectors 372 * we were allotted through the MSI-X enabling code. Ideally, we'd have 373 * one vector per ring/queue, but on a constrained vector budget, we 374 * group the rings as "efficiently" as possible. You would add new 375 * mapping configurations in here. 376 **/ 377 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 378 { 379 int rings_remaining = adapter->num_active_queues; 380 int ridx = 0, vidx = 0; 381 int q_vectors; 382 383 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 384 385 for (; ridx < rings_remaining; ridx++) { 386 iavf_map_vector_to_rxq(adapter, vidx, ridx); 387 iavf_map_vector_to_txq(adapter, vidx, ridx); 388 389 /* In the case where we have more queues than vectors, continue 390 * round-robin on vectors until all queues are mapped. 391 */ 392 if (++vidx >= q_vectors) 393 vidx = 0; 394 } 395 396 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 397 } 398 399 /** 400 * iavf_irq_affinity_notify - Callback for affinity changes 401 * @notify: context as to what irq was changed 402 * @mask: the new affinity mask 403 * 404 * This is a callback function used by the irq_set_affinity_notifier function 405 * so that we may register to receive changes to the irq affinity masks. 406 **/ 407 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 408 const cpumask_t *mask) 409 { 410 struct iavf_q_vector *q_vector = 411 container_of(notify, struct iavf_q_vector, affinity_notify); 412 413 cpumask_copy(&q_vector->affinity_mask, mask); 414 } 415 416 /** 417 * iavf_irq_affinity_release - Callback for affinity notifier release 418 * @ref: internal core kernel usage 419 * 420 * This is a callback function used by the irq_set_affinity_notifier function 421 * to inform the current notification subscriber that they will no longer 422 * receive notifications. 423 **/ 424 static void iavf_irq_affinity_release(struct kref *ref) {} 425 426 /** 427 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 428 * @adapter: board private structure 429 * @basename: device basename 430 * 431 * Allocates MSI-X vectors for tx and rx handling, and requests 432 * interrupts from the kernel. 433 **/ 434 static int 435 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 436 { 437 unsigned int vector, q_vectors; 438 unsigned int rx_int_idx = 0, tx_int_idx = 0; 439 int irq_num, err; 440 int cpu; 441 442 iavf_irq_disable(adapter); 443 /* Decrement for Other and TCP Timer vectors */ 444 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 445 446 for (vector = 0; vector < q_vectors; vector++) { 447 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 448 449 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 450 451 if (q_vector->tx.ring && q_vector->rx.ring) { 452 snprintf(q_vector->name, sizeof(q_vector->name), 453 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 454 tx_int_idx++; 455 } else if (q_vector->rx.ring) { 456 snprintf(q_vector->name, sizeof(q_vector->name), 457 "iavf-%s-rx-%d", basename, rx_int_idx++); 458 } else if (q_vector->tx.ring) { 459 snprintf(q_vector->name, sizeof(q_vector->name), 460 "iavf-%s-tx-%d", basename, tx_int_idx++); 461 } else { 462 /* skip this unused q_vector */ 463 continue; 464 } 465 err = request_irq(irq_num, 466 iavf_msix_clean_rings, 467 0, 468 q_vector->name, 469 q_vector); 470 if (err) { 471 dev_info(&adapter->pdev->dev, 472 "Request_irq failed, error: %d\n", err); 473 goto free_queue_irqs; 474 } 475 /* register for affinity change notifications */ 476 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 477 q_vector->affinity_notify.release = 478 iavf_irq_affinity_release; 479 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 480 /* Spread the IRQ affinity hints across online CPUs. Note that 481 * get_cpu_mask returns a mask with a permanent lifetime so 482 * it's safe to use as a hint for irq_set_affinity_hint. 483 */ 484 cpu = cpumask_local_spread(q_vector->v_idx, -1); 485 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 486 } 487 488 return 0; 489 490 free_queue_irqs: 491 while (vector) { 492 vector--; 493 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 494 irq_set_affinity_notifier(irq_num, NULL); 495 irq_set_affinity_hint(irq_num, NULL); 496 free_irq(irq_num, &adapter->q_vectors[vector]); 497 } 498 return err; 499 } 500 501 /** 502 * iavf_request_misc_irq - Initialize MSI-X interrupts 503 * @adapter: board private structure 504 * 505 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 506 * vector is only for the admin queue, and stays active even when the netdev 507 * is closed. 508 **/ 509 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 510 { 511 struct net_device *netdev = adapter->netdev; 512 int err; 513 514 snprintf(adapter->misc_vector_name, 515 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 516 dev_name(&adapter->pdev->dev)); 517 err = request_irq(adapter->msix_entries[0].vector, 518 &iavf_msix_aq, 0, 519 adapter->misc_vector_name, netdev); 520 if (err) { 521 dev_err(&adapter->pdev->dev, 522 "request_irq for %s failed: %d\n", 523 adapter->misc_vector_name, err); 524 free_irq(adapter->msix_entries[0].vector, netdev); 525 } 526 return err; 527 } 528 529 /** 530 * iavf_free_traffic_irqs - Free MSI-X interrupts 531 * @adapter: board private structure 532 * 533 * Frees all MSI-X vectors other than 0. 534 **/ 535 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 536 { 537 int vector, irq_num, q_vectors; 538 539 if (!adapter->msix_entries) 540 return; 541 542 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 543 544 for (vector = 0; vector < q_vectors; vector++) { 545 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 546 irq_set_affinity_notifier(irq_num, NULL); 547 irq_set_affinity_hint(irq_num, NULL); 548 free_irq(irq_num, &adapter->q_vectors[vector]); 549 } 550 } 551 552 /** 553 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 554 * @adapter: board private structure 555 * 556 * Frees MSI-X vector 0. 557 **/ 558 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 559 { 560 struct net_device *netdev = adapter->netdev; 561 562 if (!adapter->msix_entries) 563 return; 564 565 free_irq(adapter->msix_entries[0].vector, netdev); 566 } 567 568 /** 569 * iavf_configure_tx - Configure Transmit Unit after Reset 570 * @adapter: board private structure 571 * 572 * Configure the Tx unit of the MAC after a reset. 573 **/ 574 static void iavf_configure_tx(struct iavf_adapter *adapter) 575 { 576 struct iavf_hw *hw = &adapter->hw; 577 int i; 578 579 for (i = 0; i < adapter->num_active_queues; i++) 580 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 581 } 582 583 /** 584 * iavf_configure_rx - Configure Receive Unit after Reset 585 * @adapter: board private structure 586 * 587 * Configure the Rx unit of the MAC after a reset. 588 **/ 589 static void iavf_configure_rx(struct iavf_adapter *adapter) 590 { 591 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 592 struct iavf_hw *hw = &adapter->hw; 593 int i; 594 595 /* Legacy Rx will always default to a 2048 buffer size. */ 596 #if (PAGE_SIZE < 8192) 597 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 598 struct net_device *netdev = adapter->netdev; 599 600 /* For jumbo frames on systems with 4K pages we have to use 601 * an order 1 page, so we might as well increase the size 602 * of our Rx buffer to make better use of the available space 603 */ 604 rx_buf_len = IAVF_RXBUFFER_3072; 605 606 /* We use a 1536 buffer size for configurations with 607 * standard Ethernet mtu. On x86 this gives us enough room 608 * for shared info and 192 bytes of padding. 609 */ 610 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 611 (netdev->mtu <= ETH_DATA_LEN)) 612 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 613 } 614 #endif 615 616 for (i = 0; i < adapter->num_active_queues; i++) { 617 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 618 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 619 620 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 621 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 622 else 623 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 624 } 625 } 626 627 /** 628 * iavf_find_vlan - Search filter list for specific vlan filter 629 * @adapter: board private structure 630 * @vlan: vlan tag 631 * 632 * Returns ptr to the filter object or NULL. Must be called while holding the 633 * mac_vlan_list_lock. 634 **/ 635 static struct 636 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 637 { 638 struct iavf_vlan_filter *f; 639 640 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 641 if (vlan == f->vlan) 642 return f; 643 } 644 return NULL; 645 } 646 647 /** 648 * iavf_add_vlan - Add a vlan filter to the list 649 * @adapter: board private structure 650 * @vlan: VLAN tag 651 * 652 * Returns ptr to the filter object or NULL when no memory available. 653 **/ 654 static struct 655 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 656 { 657 struct iavf_vlan_filter *f = NULL; 658 659 spin_lock_bh(&adapter->mac_vlan_list_lock); 660 661 f = iavf_find_vlan(adapter, vlan); 662 if (!f) { 663 f = kzalloc(sizeof(*f), GFP_ATOMIC); 664 if (!f) 665 goto clearout; 666 667 f->vlan = vlan; 668 669 list_add_tail(&f->list, &adapter->vlan_filter_list); 670 f->add = true; 671 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 672 } 673 674 clearout: 675 spin_unlock_bh(&adapter->mac_vlan_list_lock); 676 return f; 677 } 678 679 /** 680 * iavf_del_vlan - Remove a vlan filter from the list 681 * @adapter: board private structure 682 * @vlan: VLAN tag 683 **/ 684 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 685 { 686 struct iavf_vlan_filter *f; 687 688 spin_lock_bh(&adapter->mac_vlan_list_lock); 689 690 f = iavf_find_vlan(adapter, vlan); 691 if (f) { 692 f->remove = true; 693 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 694 } 695 696 spin_unlock_bh(&adapter->mac_vlan_list_lock); 697 } 698 699 /** 700 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 701 * @netdev: network device struct 702 * @proto: unused protocol data 703 * @vid: VLAN tag 704 **/ 705 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 706 __always_unused __be16 proto, u16 vid) 707 { 708 struct iavf_adapter *adapter = netdev_priv(netdev); 709 710 if (!VLAN_ALLOWED(adapter)) 711 return -EIO; 712 if (iavf_add_vlan(adapter, vid) == NULL) 713 return -ENOMEM; 714 return 0; 715 } 716 717 /** 718 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 719 * @netdev: network device struct 720 * @proto: unused protocol data 721 * @vid: VLAN tag 722 **/ 723 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 724 __always_unused __be16 proto, u16 vid) 725 { 726 struct iavf_adapter *adapter = netdev_priv(netdev); 727 728 if (VLAN_ALLOWED(adapter)) { 729 iavf_del_vlan(adapter, vid); 730 return 0; 731 } 732 return -EIO; 733 } 734 735 /** 736 * iavf_find_filter - Search filter list for specific mac filter 737 * @adapter: board private structure 738 * @macaddr: the MAC address 739 * 740 * Returns ptr to the filter object or NULL. Must be called while holding the 741 * mac_vlan_list_lock. 742 **/ 743 static struct 744 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 745 const u8 *macaddr) 746 { 747 struct iavf_mac_filter *f; 748 749 if (!macaddr) 750 return NULL; 751 752 list_for_each_entry(f, &adapter->mac_filter_list, list) { 753 if (ether_addr_equal(macaddr, f->macaddr)) 754 return f; 755 } 756 return NULL; 757 } 758 759 /** 760 * iavf_add_filter - Add a mac filter to the filter list 761 * @adapter: board private structure 762 * @macaddr: the MAC address 763 * 764 * Returns ptr to the filter object or NULL when no memory available. 765 **/ 766 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 767 const u8 *macaddr) 768 { 769 struct iavf_mac_filter *f; 770 771 if (!macaddr) 772 return NULL; 773 774 f = iavf_find_filter(adapter, macaddr); 775 if (!f) { 776 f = kzalloc(sizeof(*f), GFP_ATOMIC); 777 if (!f) 778 return f; 779 780 ether_addr_copy(f->macaddr, macaddr); 781 782 list_add_tail(&f->list, &adapter->mac_filter_list); 783 f->add = true; 784 f->is_new_mac = true; 785 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 786 } else { 787 f->remove = false; 788 } 789 790 return f; 791 } 792 793 /** 794 * iavf_set_mac - NDO callback to set port mac address 795 * @netdev: network interface device structure 796 * @p: pointer to an address structure 797 * 798 * Returns 0 on success, negative on failure 799 **/ 800 static int iavf_set_mac(struct net_device *netdev, void *p) 801 { 802 struct iavf_adapter *adapter = netdev_priv(netdev); 803 struct iavf_hw *hw = &adapter->hw; 804 struct iavf_mac_filter *f; 805 struct sockaddr *addr = p; 806 807 if (!is_valid_ether_addr(addr->sa_data)) 808 return -EADDRNOTAVAIL; 809 810 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 811 return 0; 812 813 spin_lock_bh(&adapter->mac_vlan_list_lock); 814 815 f = iavf_find_filter(adapter, hw->mac.addr); 816 if (f) { 817 f->remove = true; 818 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 819 } 820 821 f = iavf_add_filter(adapter, addr->sa_data); 822 823 spin_unlock_bh(&adapter->mac_vlan_list_lock); 824 825 if (f) { 826 ether_addr_copy(hw->mac.addr, addr->sa_data); 827 } 828 829 return (f == NULL) ? -ENOMEM : 0; 830 } 831 832 /** 833 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 834 * @netdev: the netdevice 835 * @addr: address to add 836 * 837 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 838 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 839 */ 840 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 841 { 842 struct iavf_adapter *adapter = netdev_priv(netdev); 843 844 if (iavf_add_filter(adapter, addr)) 845 return 0; 846 else 847 return -ENOMEM; 848 } 849 850 /** 851 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 852 * @netdev: the netdevice 853 * @addr: address to add 854 * 855 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 856 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 857 */ 858 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 859 { 860 struct iavf_adapter *adapter = netdev_priv(netdev); 861 struct iavf_mac_filter *f; 862 863 /* Under some circumstances, we might receive a request to delete 864 * our own device address from our uc list. Because we store the 865 * device address in the VSI's MAC/VLAN filter list, we need to ignore 866 * such requests and not delete our device address from this list. 867 */ 868 if (ether_addr_equal(addr, netdev->dev_addr)) 869 return 0; 870 871 f = iavf_find_filter(adapter, addr); 872 if (f) { 873 f->remove = true; 874 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 875 } 876 return 0; 877 } 878 879 /** 880 * iavf_set_rx_mode - NDO callback to set the netdev filters 881 * @netdev: network interface device structure 882 **/ 883 static void iavf_set_rx_mode(struct net_device *netdev) 884 { 885 struct iavf_adapter *adapter = netdev_priv(netdev); 886 887 spin_lock_bh(&adapter->mac_vlan_list_lock); 888 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 889 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 890 spin_unlock_bh(&adapter->mac_vlan_list_lock); 891 892 if (netdev->flags & IFF_PROMISC && 893 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 894 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 895 else if (!(netdev->flags & IFF_PROMISC) && 896 adapter->flags & IAVF_FLAG_PROMISC_ON) 897 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 898 899 if (netdev->flags & IFF_ALLMULTI && 900 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 901 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 902 else if (!(netdev->flags & IFF_ALLMULTI) && 903 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 904 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 905 } 906 907 /** 908 * iavf_napi_enable_all - enable NAPI on all queue vectors 909 * @adapter: board private structure 910 **/ 911 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 912 { 913 int q_idx; 914 struct iavf_q_vector *q_vector; 915 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 916 917 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 918 struct napi_struct *napi; 919 920 q_vector = &adapter->q_vectors[q_idx]; 921 napi = &q_vector->napi; 922 napi_enable(napi); 923 } 924 } 925 926 /** 927 * iavf_napi_disable_all - disable NAPI on all queue vectors 928 * @adapter: board private structure 929 **/ 930 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 931 { 932 int q_idx; 933 struct iavf_q_vector *q_vector; 934 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 935 936 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 937 q_vector = &adapter->q_vectors[q_idx]; 938 napi_disable(&q_vector->napi); 939 } 940 } 941 942 /** 943 * iavf_configure - set up transmit and receive data structures 944 * @adapter: board private structure 945 **/ 946 static void iavf_configure(struct iavf_adapter *adapter) 947 { 948 struct net_device *netdev = adapter->netdev; 949 int i; 950 951 iavf_set_rx_mode(netdev); 952 953 iavf_configure_tx(adapter); 954 iavf_configure_rx(adapter); 955 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 956 957 for (i = 0; i < adapter->num_active_queues; i++) { 958 struct iavf_ring *ring = &adapter->rx_rings[i]; 959 960 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 961 } 962 } 963 964 /** 965 * iavf_up_complete - Finish the last steps of bringing up a connection 966 * @adapter: board private structure 967 * 968 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 969 **/ 970 static void iavf_up_complete(struct iavf_adapter *adapter) 971 { 972 iavf_change_state(adapter, __IAVF_RUNNING); 973 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 974 975 iavf_napi_enable_all(adapter); 976 977 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 978 if (CLIENT_ENABLED(adapter)) 979 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 980 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 981 } 982 983 /** 984 * iavf_down - Shutdown the connection processing 985 * @adapter: board private structure 986 * 987 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 988 **/ 989 void iavf_down(struct iavf_adapter *adapter) 990 { 991 struct net_device *netdev = adapter->netdev; 992 struct iavf_vlan_filter *vlf; 993 struct iavf_cloud_filter *cf; 994 struct iavf_fdir_fltr *fdir; 995 struct iavf_mac_filter *f; 996 struct iavf_adv_rss *rss; 997 998 if (adapter->state <= __IAVF_DOWN_PENDING) 999 return; 1000 1001 netif_carrier_off(netdev); 1002 netif_tx_disable(netdev); 1003 adapter->link_up = false; 1004 iavf_napi_disable_all(adapter); 1005 iavf_irq_disable(adapter); 1006 1007 spin_lock_bh(&adapter->mac_vlan_list_lock); 1008 1009 /* clear the sync flag on all filters */ 1010 __dev_uc_unsync(adapter->netdev, NULL); 1011 __dev_mc_unsync(adapter->netdev, NULL); 1012 1013 /* remove all MAC filters */ 1014 list_for_each_entry(f, &adapter->mac_filter_list, list) { 1015 f->remove = true; 1016 } 1017 1018 /* remove all VLAN filters */ 1019 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1020 vlf->remove = true; 1021 } 1022 1023 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1024 1025 /* remove all cloud filters */ 1026 spin_lock_bh(&adapter->cloud_filter_list_lock); 1027 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1028 cf->del = true; 1029 } 1030 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1031 1032 /* remove all Flow Director filters */ 1033 spin_lock_bh(&adapter->fdir_fltr_lock); 1034 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1035 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1036 } 1037 spin_unlock_bh(&adapter->fdir_fltr_lock); 1038 1039 /* remove all advance RSS configuration */ 1040 spin_lock_bh(&adapter->adv_rss_lock); 1041 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1042 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1043 spin_unlock_bh(&adapter->adv_rss_lock); 1044 1045 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1046 adapter->state != __IAVF_RESETTING) { 1047 /* cancel any current operation */ 1048 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1049 /* Schedule operations to close down the HW. Don't wait 1050 * here for this to complete. The watchdog is still running 1051 * and it will take care of this. 1052 */ 1053 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1054 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1055 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1056 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1057 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1058 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1059 } 1060 1061 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1062 } 1063 1064 /** 1065 * iavf_acquire_msix_vectors - Setup the MSIX capability 1066 * @adapter: board private structure 1067 * @vectors: number of vectors to request 1068 * 1069 * Work with the OS to set up the MSIX vectors needed. 1070 * 1071 * Returns 0 on success, negative on failure 1072 **/ 1073 static int 1074 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1075 { 1076 int err, vector_threshold; 1077 1078 /* We'll want at least 3 (vector_threshold): 1079 * 0) Other (Admin Queue and link, mostly) 1080 * 1) TxQ[0] Cleanup 1081 * 2) RxQ[0] Cleanup 1082 */ 1083 vector_threshold = MIN_MSIX_COUNT; 1084 1085 /* The more we get, the more we will assign to Tx/Rx Cleanup 1086 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1087 * Right now, we simply care about how many we'll get; we'll 1088 * set them up later while requesting irq's. 1089 */ 1090 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1091 vector_threshold, vectors); 1092 if (err < 0) { 1093 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1094 kfree(adapter->msix_entries); 1095 adapter->msix_entries = NULL; 1096 return err; 1097 } 1098 1099 /* Adjust for only the vectors we'll use, which is minimum 1100 * of max_msix_q_vectors + NONQ_VECS, or the number of 1101 * vectors we were allocated. 1102 */ 1103 adapter->num_msix_vectors = err; 1104 return 0; 1105 } 1106 1107 /** 1108 * iavf_free_queues - Free memory for all rings 1109 * @adapter: board private structure to initialize 1110 * 1111 * Free all of the memory associated with queue pairs. 1112 **/ 1113 static void iavf_free_queues(struct iavf_adapter *adapter) 1114 { 1115 if (!adapter->vsi_res) 1116 return; 1117 adapter->num_active_queues = 0; 1118 kfree(adapter->tx_rings); 1119 adapter->tx_rings = NULL; 1120 kfree(adapter->rx_rings); 1121 adapter->rx_rings = NULL; 1122 } 1123 1124 /** 1125 * iavf_alloc_queues - Allocate memory for all rings 1126 * @adapter: board private structure to initialize 1127 * 1128 * We allocate one ring per queue at run-time since we don't know the 1129 * number of queues at compile-time. The polling_netdev array is 1130 * intended for Multiqueue, but should work fine with a single queue. 1131 **/ 1132 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1133 { 1134 int i, num_active_queues; 1135 1136 /* If we're in reset reallocating queues we don't actually know yet for 1137 * certain the PF gave us the number of queues we asked for but we'll 1138 * assume it did. Once basic reset is finished we'll confirm once we 1139 * start negotiating config with PF. 1140 */ 1141 if (adapter->num_req_queues) 1142 num_active_queues = adapter->num_req_queues; 1143 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1144 adapter->num_tc) 1145 num_active_queues = adapter->ch_config.total_qps; 1146 else 1147 num_active_queues = min_t(int, 1148 adapter->vsi_res->num_queue_pairs, 1149 (int)(num_online_cpus())); 1150 1151 1152 adapter->tx_rings = kcalloc(num_active_queues, 1153 sizeof(struct iavf_ring), GFP_KERNEL); 1154 if (!adapter->tx_rings) 1155 goto err_out; 1156 adapter->rx_rings = kcalloc(num_active_queues, 1157 sizeof(struct iavf_ring), GFP_KERNEL); 1158 if (!adapter->rx_rings) 1159 goto err_out; 1160 1161 for (i = 0; i < num_active_queues; i++) { 1162 struct iavf_ring *tx_ring; 1163 struct iavf_ring *rx_ring; 1164 1165 tx_ring = &adapter->tx_rings[i]; 1166 1167 tx_ring->queue_index = i; 1168 tx_ring->netdev = adapter->netdev; 1169 tx_ring->dev = &adapter->pdev->dev; 1170 tx_ring->count = adapter->tx_desc_count; 1171 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1172 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1173 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1174 1175 rx_ring = &adapter->rx_rings[i]; 1176 rx_ring->queue_index = i; 1177 rx_ring->netdev = adapter->netdev; 1178 rx_ring->dev = &adapter->pdev->dev; 1179 rx_ring->count = adapter->rx_desc_count; 1180 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1181 } 1182 1183 adapter->num_active_queues = num_active_queues; 1184 1185 return 0; 1186 1187 err_out: 1188 iavf_free_queues(adapter); 1189 return -ENOMEM; 1190 } 1191 1192 /** 1193 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1194 * @adapter: board private structure to initialize 1195 * 1196 * Attempt to configure the interrupts using the best available 1197 * capabilities of the hardware and the kernel. 1198 **/ 1199 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1200 { 1201 int vector, v_budget; 1202 int pairs = 0; 1203 int err = 0; 1204 1205 if (!adapter->vsi_res) { 1206 err = -EIO; 1207 goto out; 1208 } 1209 pairs = adapter->num_active_queues; 1210 1211 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1212 * us much good if we have more vectors than CPUs. However, we already 1213 * limit the total number of queues by the number of CPUs so we do not 1214 * need any further limiting here. 1215 */ 1216 v_budget = min_t(int, pairs + NONQ_VECS, 1217 (int)adapter->vf_res->max_vectors); 1218 1219 adapter->msix_entries = kcalloc(v_budget, 1220 sizeof(struct msix_entry), GFP_KERNEL); 1221 if (!adapter->msix_entries) { 1222 err = -ENOMEM; 1223 goto out; 1224 } 1225 1226 for (vector = 0; vector < v_budget; vector++) 1227 adapter->msix_entries[vector].entry = vector; 1228 1229 err = iavf_acquire_msix_vectors(adapter, v_budget); 1230 1231 out: 1232 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1233 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1234 return err; 1235 } 1236 1237 /** 1238 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1239 * @adapter: board private structure 1240 * 1241 * Return 0 on success, negative on failure 1242 **/ 1243 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1244 { 1245 struct iavf_aqc_get_set_rss_key_data *rss_key = 1246 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1247 struct iavf_hw *hw = &adapter->hw; 1248 int ret = 0; 1249 1250 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1251 /* bail because we already have a command pending */ 1252 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1253 adapter->current_op); 1254 return -EBUSY; 1255 } 1256 1257 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1258 if (ret) { 1259 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1260 iavf_stat_str(hw, ret), 1261 iavf_aq_str(hw, hw->aq.asq_last_status)); 1262 return ret; 1263 1264 } 1265 1266 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1267 adapter->rss_lut, adapter->rss_lut_size); 1268 if (ret) { 1269 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1270 iavf_stat_str(hw, ret), 1271 iavf_aq_str(hw, hw->aq.asq_last_status)); 1272 } 1273 1274 return ret; 1275 1276 } 1277 1278 /** 1279 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1280 * @adapter: board private structure 1281 * 1282 * Returns 0 on success, negative on failure 1283 **/ 1284 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1285 { 1286 struct iavf_hw *hw = &adapter->hw; 1287 u32 *dw; 1288 u16 i; 1289 1290 dw = (u32 *)adapter->rss_key; 1291 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1292 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1293 1294 dw = (u32 *)adapter->rss_lut; 1295 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1296 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1297 1298 iavf_flush(hw); 1299 1300 return 0; 1301 } 1302 1303 /** 1304 * iavf_config_rss - Configure RSS keys and lut 1305 * @adapter: board private structure 1306 * 1307 * Returns 0 on success, negative on failure 1308 **/ 1309 int iavf_config_rss(struct iavf_adapter *adapter) 1310 { 1311 1312 if (RSS_PF(adapter)) { 1313 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1314 IAVF_FLAG_AQ_SET_RSS_KEY; 1315 return 0; 1316 } else if (RSS_AQ(adapter)) { 1317 return iavf_config_rss_aq(adapter); 1318 } else { 1319 return iavf_config_rss_reg(adapter); 1320 } 1321 } 1322 1323 /** 1324 * iavf_fill_rss_lut - Fill the lut with default values 1325 * @adapter: board private structure 1326 **/ 1327 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1328 { 1329 u16 i; 1330 1331 for (i = 0; i < adapter->rss_lut_size; i++) 1332 adapter->rss_lut[i] = i % adapter->num_active_queues; 1333 } 1334 1335 /** 1336 * iavf_init_rss - Prepare for RSS 1337 * @adapter: board private structure 1338 * 1339 * Return 0 on success, negative on failure 1340 **/ 1341 static int iavf_init_rss(struct iavf_adapter *adapter) 1342 { 1343 struct iavf_hw *hw = &adapter->hw; 1344 int ret; 1345 1346 if (!RSS_PF(adapter)) { 1347 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1348 if (adapter->vf_res->vf_cap_flags & 1349 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1350 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1351 else 1352 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1353 1354 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1355 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1356 } 1357 1358 iavf_fill_rss_lut(adapter); 1359 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1360 ret = iavf_config_rss(adapter); 1361 1362 return ret; 1363 } 1364 1365 /** 1366 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1367 * @adapter: board private structure to initialize 1368 * 1369 * We allocate one q_vector per queue interrupt. If allocation fails we 1370 * return -ENOMEM. 1371 **/ 1372 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1373 { 1374 int q_idx = 0, num_q_vectors; 1375 struct iavf_q_vector *q_vector; 1376 1377 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1378 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1379 GFP_KERNEL); 1380 if (!adapter->q_vectors) 1381 return -ENOMEM; 1382 1383 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1384 q_vector = &adapter->q_vectors[q_idx]; 1385 q_vector->adapter = adapter; 1386 q_vector->vsi = &adapter->vsi; 1387 q_vector->v_idx = q_idx; 1388 q_vector->reg_idx = q_idx; 1389 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1390 netif_napi_add(adapter->netdev, &q_vector->napi, 1391 iavf_napi_poll, NAPI_POLL_WEIGHT); 1392 } 1393 1394 return 0; 1395 } 1396 1397 /** 1398 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1399 * @adapter: board private structure to initialize 1400 * 1401 * This function frees the memory allocated to the q_vectors. In addition if 1402 * NAPI is enabled it will delete any references to the NAPI struct prior 1403 * to freeing the q_vector. 1404 **/ 1405 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1406 { 1407 int q_idx, num_q_vectors; 1408 int napi_vectors; 1409 1410 if (!adapter->q_vectors) 1411 return; 1412 1413 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1414 napi_vectors = adapter->num_active_queues; 1415 1416 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1417 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1418 1419 if (q_idx < napi_vectors) 1420 netif_napi_del(&q_vector->napi); 1421 } 1422 kfree(adapter->q_vectors); 1423 adapter->q_vectors = NULL; 1424 } 1425 1426 /** 1427 * iavf_reset_interrupt_capability - Reset MSIX setup 1428 * @adapter: board private structure 1429 * 1430 **/ 1431 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1432 { 1433 if (!adapter->msix_entries) 1434 return; 1435 1436 pci_disable_msix(adapter->pdev); 1437 kfree(adapter->msix_entries); 1438 adapter->msix_entries = NULL; 1439 } 1440 1441 /** 1442 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1443 * @adapter: board private structure to initialize 1444 * 1445 **/ 1446 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1447 { 1448 int err; 1449 1450 err = iavf_alloc_queues(adapter); 1451 if (err) { 1452 dev_err(&adapter->pdev->dev, 1453 "Unable to allocate memory for queues\n"); 1454 goto err_alloc_queues; 1455 } 1456 1457 rtnl_lock(); 1458 err = iavf_set_interrupt_capability(adapter); 1459 rtnl_unlock(); 1460 if (err) { 1461 dev_err(&adapter->pdev->dev, 1462 "Unable to setup interrupt capabilities\n"); 1463 goto err_set_interrupt; 1464 } 1465 1466 err = iavf_alloc_q_vectors(adapter); 1467 if (err) { 1468 dev_err(&adapter->pdev->dev, 1469 "Unable to allocate memory for queue vectors\n"); 1470 goto err_alloc_q_vectors; 1471 } 1472 1473 /* If we've made it so far while ADq flag being ON, then we haven't 1474 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1475 * resources have been allocated in the reset path. 1476 * Now we can truly claim that ADq is enabled. 1477 */ 1478 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1479 adapter->num_tc) 1480 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1481 adapter->num_tc); 1482 1483 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1484 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1485 adapter->num_active_queues); 1486 1487 return 0; 1488 err_alloc_q_vectors: 1489 iavf_reset_interrupt_capability(adapter); 1490 err_set_interrupt: 1491 iavf_free_queues(adapter); 1492 err_alloc_queues: 1493 return err; 1494 } 1495 1496 /** 1497 * iavf_free_rss - Free memory used by RSS structs 1498 * @adapter: board private structure 1499 **/ 1500 static void iavf_free_rss(struct iavf_adapter *adapter) 1501 { 1502 kfree(adapter->rss_key); 1503 adapter->rss_key = NULL; 1504 1505 kfree(adapter->rss_lut); 1506 adapter->rss_lut = NULL; 1507 } 1508 1509 /** 1510 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1511 * @adapter: board private structure 1512 * 1513 * Returns 0 on success, negative on failure 1514 **/ 1515 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1516 { 1517 struct net_device *netdev = adapter->netdev; 1518 int err; 1519 1520 if (netif_running(netdev)) 1521 iavf_free_traffic_irqs(adapter); 1522 iavf_free_misc_irq(adapter); 1523 iavf_reset_interrupt_capability(adapter); 1524 iavf_free_q_vectors(adapter); 1525 iavf_free_queues(adapter); 1526 1527 err = iavf_init_interrupt_scheme(adapter); 1528 if (err) 1529 goto err; 1530 1531 netif_tx_stop_all_queues(netdev); 1532 1533 err = iavf_request_misc_irq(adapter); 1534 if (err) 1535 goto err; 1536 1537 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1538 1539 iavf_map_rings_to_vectors(adapter); 1540 err: 1541 return err; 1542 } 1543 1544 /** 1545 * iavf_process_aq_command - process aq_required flags 1546 * and sends aq command 1547 * @adapter: pointer to iavf adapter structure 1548 * 1549 * Returns 0 on success 1550 * Returns error code if no command was sent 1551 * or error code if the command failed. 1552 **/ 1553 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1554 { 1555 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1556 return iavf_send_vf_config_msg(adapter); 1557 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1558 iavf_disable_queues(adapter); 1559 return 0; 1560 } 1561 1562 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1563 iavf_map_queues(adapter); 1564 return 0; 1565 } 1566 1567 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1568 iavf_add_ether_addrs(adapter); 1569 return 0; 1570 } 1571 1572 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1573 iavf_add_vlans(adapter); 1574 return 0; 1575 } 1576 1577 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1578 iavf_del_ether_addrs(adapter); 1579 return 0; 1580 } 1581 1582 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1583 iavf_del_vlans(adapter); 1584 return 0; 1585 } 1586 1587 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1588 iavf_enable_vlan_stripping(adapter); 1589 return 0; 1590 } 1591 1592 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1593 iavf_disable_vlan_stripping(adapter); 1594 return 0; 1595 } 1596 1597 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1598 iavf_configure_queues(adapter); 1599 return 0; 1600 } 1601 1602 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1603 iavf_enable_queues(adapter); 1604 return 0; 1605 } 1606 1607 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1608 /* This message goes straight to the firmware, not the 1609 * PF, so we don't have to set current_op as we will 1610 * not get a response through the ARQ. 1611 */ 1612 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1613 return 0; 1614 } 1615 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1616 iavf_get_hena(adapter); 1617 return 0; 1618 } 1619 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1620 iavf_set_hena(adapter); 1621 return 0; 1622 } 1623 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1624 iavf_set_rss_key(adapter); 1625 return 0; 1626 } 1627 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1628 iavf_set_rss_lut(adapter); 1629 return 0; 1630 } 1631 1632 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1633 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1634 FLAG_VF_MULTICAST_PROMISC); 1635 return 0; 1636 } 1637 1638 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1639 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1640 return 0; 1641 } 1642 1643 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1644 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1645 iavf_set_promiscuous(adapter, 0); 1646 return 0; 1647 } 1648 1649 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1650 iavf_enable_channels(adapter); 1651 return 0; 1652 } 1653 1654 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1655 iavf_disable_channels(adapter); 1656 return 0; 1657 } 1658 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1659 iavf_add_cloud_filter(adapter); 1660 return 0; 1661 } 1662 1663 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1664 iavf_del_cloud_filter(adapter); 1665 return 0; 1666 } 1667 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1668 iavf_del_cloud_filter(adapter); 1669 return 0; 1670 } 1671 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1672 iavf_add_cloud_filter(adapter); 1673 return 0; 1674 } 1675 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1676 iavf_add_fdir_filter(adapter); 1677 return IAVF_SUCCESS; 1678 } 1679 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1680 iavf_del_fdir_filter(adapter); 1681 return IAVF_SUCCESS; 1682 } 1683 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1684 iavf_add_adv_rss_cfg(adapter); 1685 return 0; 1686 } 1687 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1688 iavf_del_adv_rss_cfg(adapter); 1689 return 0; 1690 } 1691 return -EAGAIN; 1692 } 1693 1694 /** 1695 * iavf_startup - first step of driver startup 1696 * @adapter: board private structure 1697 * 1698 * Function process __IAVF_STARTUP driver state. 1699 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1700 * when fails the state is changed to __IAVF_INIT_FAILED 1701 **/ 1702 static void iavf_startup(struct iavf_adapter *adapter) 1703 { 1704 struct pci_dev *pdev = adapter->pdev; 1705 struct iavf_hw *hw = &adapter->hw; 1706 int err; 1707 1708 WARN_ON(adapter->state != __IAVF_STARTUP); 1709 1710 /* driver loaded, probe complete */ 1711 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1712 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1713 err = iavf_set_mac_type(hw); 1714 if (err) { 1715 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1716 goto err; 1717 } 1718 1719 err = iavf_check_reset_complete(hw); 1720 if (err) { 1721 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1722 err); 1723 goto err; 1724 } 1725 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1726 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1727 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1728 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1729 1730 err = iavf_init_adminq(hw); 1731 if (err) { 1732 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1733 goto err; 1734 } 1735 err = iavf_send_api_ver(adapter); 1736 if (err) { 1737 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1738 iavf_shutdown_adminq(hw); 1739 goto err; 1740 } 1741 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); 1742 return; 1743 err: 1744 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1745 } 1746 1747 /** 1748 * iavf_init_version_check - second step of driver startup 1749 * @adapter: board private structure 1750 * 1751 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1752 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1753 * when fails the state is changed to __IAVF_INIT_FAILED 1754 **/ 1755 static void iavf_init_version_check(struct iavf_adapter *adapter) 1756 { 1757 struct pci_dev *pdev = adapter->pdev; 1758 struct iavf_hw *hw = &adapter->hw; 1759 int err = -EAGAIN; 1760 1761 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1762 1763 if (!iavf_asq_done(hw)) { 1764 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1765 iavf_shutdown_adminq(hw); 1766 iavf_change_state(adapter, __IAVF_STARTUP); 1767 goto err; 1768 } 1769 1770 /* aq msg sent, awaiting reply */ 1771 err = iavf_verify_api_ver(adapter); 1772 if (err) { 1773 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1774 err = iavf_send_api_ver(adapter); 1775 else 1776 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1777 adapter->pf_version.major, 1778 adapter->pf_version.minor, 1779 VIRTCHNL_VERSION_MAJOR, 1780 VIRTCHNL_VERSION_MINOR); 1781 goto err; 1782 } 1783 err = iavf_send_vf_config_msg(adapter); 1784 if (err) { 1785 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1786 err); 1787 goto err; 1788 } 1789 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); 1790 return; 1791 err: 1792 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1793 } 1794 1795 /** 1796 * iavf_init_get_resources - third step of driver startup 1797 * @adapter: board private structure 1798 * 1799 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1800 * finishes driver initialization procedure. 1801 * When success the state is changed to __IAVF_DOWN 1802 * when fails the state is changed to __IAVF_INIT_FAILED 1803 **/ 1804 static void iavf_init_get_resources(struct iavf_adapter *adapter) 1805 { 1806 struct net_device *netdev = adapter->netdev; 1807 struct pci_dev *pdev = adapter->pdev; 1808 struct iavf_hw *hw = &adapter->hw; 1809 int err; 1810 1811 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1812 /* aq msg sent, awaiting reply */ 1813 if (!adapter->vf_res) { 1814 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1815 GFP_KERNEL); 1816 if (!adapter->vf_res) { 1817 err = -ENOMEM; 1818 goto err; 1819 } 1820 } 1821 err = iavf_get_vf_config(adapter); 1822 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1823 err = iavf_send_vf_config_msg(adapter); 1824 goto err; 1825 } else if (err == IAVF_ERR_PARAM) { 1826 /* We only get ERR_PARAM if the device is in a very bad 1827 * state or if we've been disabled for previous bad 1828 * behavior. Either way, we're done now. 1829 */ 1830 iavf_shutdown_adminq(hw); 1831 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1832 return; 1833 } 1834 if (err) { 1835 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1836 goto err_alloc; 1837 } 1838 1839 err = iavf_process_config(adapter); 1840 if (err) 1841 goto err_alloc; 1842 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1843 1844 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1845 1846 netdev->netdev_ops = &iavf_netdev_ops; 1847 iavf_set_ethtool_ops(netdev); 1848 netdev->watchdog_timeo = 5 * HZ; 1849 1850 /* MTU range: 68 - 9710 */ 1851 netdev->min_mtu = ETH_MIN_MTU; 1852 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1853 1854 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1855 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1856 adapter->hw.mac.addr); 1857 eth_hw_addr_random(netdev); 1858 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1859 } else { 1860 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 1861 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1862 } 1863 1864 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1865 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1866 err = iavf_init_interrupt_scheme(adapter); 1867 if (err) 1868 goto err_sw_init; 1869 iavf_map_rings_to_vectors(adapter); 1870 if (adapter->vf_res->vf_cap_flags & 1871 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1872 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1873 1874 err = iavf_request_misc_irq(adapter); 1875 if (err) 1876 goto err_sw_init; 1877 1878 netif_carrier_off(netdev); 1879 adapter->link_up = false; 1880 1881 /* set the semaphore to prevent any callbacks after device registration 1882 * up to time when state of driver will be set to __IAVF_DOWN 1883 */ 1884 rtnl_lock(); 1885 if (!adapter->netdev_registered) { 1886 err = register_netdevice(netdev); 1887 if (err) { 1888 rtnl_unlock(); 1889 goto err_register; 1890 } 1891 } 1892 1893 adapter->netdev_registered = true; 1894 1895 netif_tx_stop_all_queues(netdev); 1896 if (CLIENT_ALLOWED(adapter)) { 1897 err = iavf_lan_add_device(adapter); 1898 if (err) 1899 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1900 err); 1901 } 1902 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1903 if (netdev->features & NETIF_F_GRO) 1904 dev_info(&pdev->dev, "GRO is enabled\n"); 1905 1906 iavf_change_state(adapter, __IAVF_DOWN); 1907 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1908 rtnl_unlock(); 1909 1910 iavf_misc_irq_enable(adapter); 1911 wake_up(&adapter->down_waitqueue); 1912 1913 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1914 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1915 if (!adapter->rss_key || !adapter->rss_lut) { 1916 err = -ENOMEM; 1917 goto err_mem; 1918 } 1919 if (RSS_AQ(adapter)) 1920 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1921 else 1922 iavf_init_rss(adapter); 1923 1924 return; 1925 err_mem: 1926 iavf_free_rss(adapter); 1927 err_register: 1928 iavf_free_misc_irq(adapter); 1929 err_sw_init: 1930 iavf_reset_interrupt_capability(adapter); 1931 err_alloc: 1932 kfree(adapter->vf_res); 1933 adapter->vf_res = NULL; 1934 err: 1935 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1936 } 1937 1938 /** 1939 * iavf_watchdog_task - Periodic call-back task 1940 * @work: pointer to work_struct 1941 **/ 1942 static void iavf_watchdog_task(struct work_struct *work) 1943 { 1944 struct iavf_adapter *adapter = container_of(work, 1945 struct iavf_adapter, 1946 watchdog_task.work); 1947 struct iavf_hw *hw = &adapter->hw; 1948 u32 reg_val; 1949 1950 if (!mutex_trylock(&adapter->crit_lock)) 1951 goto restart_watchdog; 1952 1953 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1954 iavf_change_state(adapter, __IAVF_COMM_FAILED); 1955 1956 if (adapter->flags & IAVF_FLAG_RESET_NEEDED && 1957 adapter->state != __IAVF_RESETTING) { 1958 iavf_change_state(adapter, __IAVF_RESETTING); 1959 adapter->aq_required = 0; 1960 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1961 } 1962 1963 switch (adapter->state) { 1964 case __IAVF_STARTUP: 1965 iavf_startup(adapter); 1966 mutex_unlock(&adapter->crit_lock); 1967 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1968 msecs_to_jiffies(30)); 1969 return; 1970 case __IAVF_INIT_VERSION_CHECK: 1971 iavf_init_version_check(adapter); 1972 mutex_unlock(&adapter->crit_lock); 1973 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1974 msecs_to_jiffies(30)); 1975 return; 1976 case __IAVF_INIT_GET_RESOURCES: 1977 iavf_init_get_resources(adapter); 1978 mutex_unlock(&adapter->crit_lock); 1979 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1980 msecs_to_jiffies(1)); 1981 return; 1982 case __IAVF_INIT_FAILED: 1983 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 1984 dev_err(&adapter->pdev->dev, 1985 "Failed to communicate with PF; waiting before retry\n"); 1986 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 1987 iavf_shutdown_adminq(hw); 1988 mutex_unlock(&adapter->crit_lock); 1989 queue_delayed_work(iavf_wq, 1990 &adapter->watchdog_task, (5 * HZ)); 1991 return; 1992 } 1993 /* Try again from failed step*/ 1994 iavf_change_state(adapter, adapter->last_state); 1995 mutex_unlock(&adapter->crit_lock); 1996 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); 1997 return; 1998 case __IAVF_COMM_FAILED: 1999 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2000 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2001 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 2002 reg_val == VIRTCHNL_VFR_COMPLETED) { 2003 /* A chance for redemption! */ 2004 dev_err(&adapter->pdev->dev, 2005 "Hardware came out of reset. Attempting reinit.\n"); 2006 /* When init task contacts the PF and 2007 * gets everything set up again, it'll restart the 2008 * watchdog for us. Down, boy. Sit. Stay. Woof. 2009 */ 2010 iavf_change_state(adapter, __IAVF_STARTUP); 2011 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2012 } 2013 adapter->aq_required = 0; 2014 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2015 queue_delayed_work(iavf_wq, 2016 &adapter->watchdog_task, 2017 msecs_to_jiffies(10)); 2018 return; 2019 case __IAVF_RESETTING: 2020 mutex_unlock(&adapter->crit_lock); 2021 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2022 return; 2023 case __IAVF_DOWN: 2024 case __IAVF_DOWN_PENDING: 2025 case __IAVF_TESTING: 2026 case __IAVF_RUNNING: 2027 if (adapter->current_op) { 2028 if (!iavf_asq_done(hw)) { 2029 dev_dbg(&adapter->pdev->dev, 2030 "Admin queue timeout\n"); 2031 iavf_send_api_ver(adapter); 2032 } 2033 } else { 2034 /* An error will be returned if no commands were 2035 * processed; use this opportunity to update stats 2036 */ 2037 if (iavf_process_aq_command(adapter) && 2038 adapter->state == __IAVF_RUNNING) 2039 iavf_request_stats(adapter); 2040 } 2041 if (adapter->state == __IAVF_RUNNING) 2042 iavf_detect_recover_hung(&adapter->vsi); 2043 break; 2044 case __IAVF_REMOVE: 2045 mutex_unlock(&adapter->crit_lock); 2046 return; 2047 default: 2048 return; 2049 } 2050 2051 /* check for hw reset */ 2052 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2053 if (!reg_val) { 2054 iavf_change_state(adapter, __IAVF_RESETTING); 2055 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2056 adapter->aq_required = 0; 2057 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2058 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2059 queue_work(iavf_wq, &adapter->reset_task); 2060 mutex_unlock(&adapter->crit_lock); 2061 queue_delayed_work(iavf_wq, 2062 &adapter->watchdog_task, HZ * 2); 2063 return; 2064 } 2065 2066 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2067 mutex_unlock(&adapter->crit_lock); 2068 restart_watchdog: 2069 queue_work(iavf_wq, &adapter->adminq_task); 2070 if (adapter->aq_required) 2071 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2072 msecs_to_jiffies(20)); 2073 else 2074 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2075 } 2076 2077 static void iavf_disable_vf(struct iavf_adapter *adapter) 2078 { 2079 struct iavf_mac_filter *f, *ftmp; 2080 struct iavf_vlan_filter *fv, *fvtmp; 2081 struct iavf_cloud_filter *cf, *cftmp; 2082 2083 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2084 2085 /* We don't use netif_running() because it may be true prior to 2086 * ndo_open() returning, so we can't assume it means all our open 2087 * tasks have finished, since we're not holding the rtnl_lock here. 2088 */ 2089 if (adapter->state == __IAVF_RUNNING) { 2090 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2091 netif_carrier_off(adapter->netdev); 2092 netif_tx_disable(adapter->netdev); 2093 adapter->link_up = false; 2094 iavf_napi_disable_all(adapter); 2095 iavf_irq_disable(adapter); 2096 iavf_free_traffic_irqs(adapter); 2097 iavf_free_all_tx_resources(adapter); 2098 iavf_free_all_rx_resources(adapter); 2099 } 2100 2101 spin_lock_bh(&adapter->mac_vlan_list_lock); 2102 2103 /* Delete all of the filters */ 2104 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2105 list_del(&f->list); 2106 kfree(f); 2107 } 2108 2109 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2110 list_del(&fv->list); 2111 kfree(fv); 2112 } 2113 2114 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2115 2116 spin_lock_bh(&adapter->cloud_filter_list_lock); 2117 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2118 list_del(&cf->list); 2119 kfree(cf); 2120 adapter->num_cloud_filters--; 2121 } 2122 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2123 2124 iavf_free_misc_irq(adapter); 2125 iavf_reset_interrupt_capability(adapter); 2126 iavf_free_queues(adapter); 2127 iavf_free_q_vectors(adapter); 2128 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2129 iavf_shutdown_adminq(&adapter->hw); 2130 adapter->netdev->flags &= ~IFF_UP; 2131 mutex_unlock(&adapter->crit_lock); 2132 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2133 iavf_change_state(adapter, __IAVF_DOWN); 2134 wake_up(&adapter->down_waitqueue); 2135 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2136 } 2137 2138 /** 2139 * iavf_reset_task - Call-back task to handle hardware reset 2140 * @work: pointer to work_struct 2141 * 2142 * During reset we need to shut down and reinitialize the admin queue 2143 * before we can use it to communicate with the PF again. We also clear 2144 * and reinit the rings because that context is lost as well. 2145 **/ 2146 static void iavf_reset_task(struct work_struct *work) 2147 { 2148 struct iavf_adapter *adapter = container_of(work, 2149 struct iavf_adapter, 2150 reset_task); 2151 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2152 struct net_device *netdev = adapter->netdev; 2153 struct iavf_hw *hw = &adapter->hw; 2154 struct iavf_mac_filter *f, *ftmp; 2155 struct iavf_vlan_filter *vlf; 2156 struct iavf_cloud_filter *cf; 2157 u32 reg_val; 2158 int i = 0, err; 2159 bool running; 2160 2161 /* When device is being removed it doesn't make sense to run the reset 2162 * task, just return in such a case. 2163 */ 2164 if (mutex_is_locked(&adapter->remove_lock)) 2165 return; 2166 2167 if (iavf_lock_timeout(&adapter->crit_lock, 200)) { 2168 schedule_work(&adapter->reset_task); 2169 return; 2170 } 2171 while (!mutex_trylock(&adapter->client_lock)) 2172 usleep_range(500, 1000); 2173 if (CLIENT_ENABLED(adapter)) { 2174 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2175 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2176 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2177 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2178 cancel_delayed_work_sync(&adapter->client_task); 2179 iavf_notify_client_close(&adapter->vsi, true); 2180 } 2181 iavf_misc_irq_disable(adapter); 2182 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2183 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2184 /* Restart the AQ here. If we have been reset but didn't 2185 * detect it, or if the PF had to reinit, our AQ will be hosed. 2186 */ 2187 iavf_shutdown_adminq(hw); 2188 iavf_init_adminq(hw); 2189 iavf_request_reset(adapter); 2190 } 2191 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2192 2193 /* poll until we see the reset actually happen */ 2194 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2195 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2196 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2197 if (!reg_val) 2198 break; 2199 usleep_range(5000, 10000); 2200 } 2201 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2202 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2203 goto continue_reset; /* act like the reset happened */ 2204 } 2205 2206 /* wait until the reset is complete and the PF is responding to us */ 2207 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2208 /* sleep first to make sure a minimum wait time is met */ 2209 msleep(IAVF_RESET_WAIT_MS); 2210 2211 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2212 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2213 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2214 break; 2215 } 2216 2217 pci_set_master(adapter->pdev); 2218 2219 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2220 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2221 reg_val); 2222 iavf_disable_vf(adapter); 2223 mutex_unlock(&adapter->client_lock); 2224 return; /* Do not attempt to reinit. It's dead, Jim. */ 2225 } 2226 2227 continue_reset: 2228 /* We don't use netif_running() because it may be true prior to 2229 * ndo_open() returning, so we can't assume it means all our open 2230 * tasks have finished, since we're not holding the rtnl_lock here. 2231 */ 2232 running = ((adapter->state == __IAVF_RUNNING) || 2233 (adapter->state == __IAVF_RESETTING)); 2234 2235 if (running) { 2236 netif_carrier_off(netdev); 2237 netif_tx_stop_all_queues(netdev); 2238 adapter->link_up = false; 2239 iavf_napi_disable_all(adapter); 2240 } 2241 iavf_irq_disable(adapter); 2242 2243 iavf_change_state(adapter, __IAVF_RESETTING); 2244 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2245 2246 /* free the Tx/Rx rings and descriptors, might be better to just 2247 * re-use them sometime in the future 2248 */ 2249 iavf_free_all_rx_resources(adapter); 2250 iavf_free_all_tx_resources(adapter); 2251 2252 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2253 /* kill and reinit the admin queue */ 2254 iavf_shutdown_adminq(hw); 2255 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2256 err = iavf_init_adminq(hw); 2257 if (err) 2258 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2259 err); 2260 adapter->aq_required = 0; 2261 2262 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2263 err = iavf_reinit_interrupt_scheme(adapter); 2264 if (err) 2265 goto reset_err; 2266 } 2267 2268 if (RSS_AQ(adapter)) { 2269 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2270 } else { 2271 err = iavf_init_rss(adapter); 2272 if (err) 2273 goto reset_err; 2274 } 2275 2276 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2277 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2278 2279 spin_lock_bh(&adapter->mac_vlan_list_lock); 2280 2281 /* Delete filter for the current MAC address, it could have 2282 * been changed by the PF via administratively set MAC. 2283 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2284 */ 2285 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2286 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2287 list_del(&f->list); 2288 kfree(f); 2289 } 2290 } 2291 /* re-add all MAC filters */ 2292 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2293 f->add = true; 2294 } 2295 /* re-add all VLAN filters */ 2296 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2297 vlf->add = true; 2298 } 2299 2300 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2301 2302 /* check if TCs are running and re-add all cloud filters */ 2303 spin_lock_bh(&adapter->cloud_filter_list_lock); 2304 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2305 adapter->num_tc) { 2306 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2307 cf->add = true; 2308 } 2309 } 2310 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2311 2312 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2313 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2314 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2315 iavf_misc_irq_enable(adapter); 2316 2317 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2318 2319 /* We were running when the reset started, so we need to restore some 2320 * state here. 2321 */ 2322 if (running) { 2323 /* allocate transmit descriptors */ 2324 err = iavf_setup_all_tx_resources(adapter); 2325 if (err) 2326 goto reset_err; 2327 2328 /* allocate receive descriptors */ 2329 err = iavf_setup_all_rx_resources(adapter); 2330 if (err) 2331 goto reset_err; 2332 2333 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2334 err = iavf_request_traffic_irqs(adapter, netdev->name); 2335 if (err) 2336 goto reset_err; 2337 2338 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2339 } 2340 2341 iavf_configure(adapter); 2342 2343 /* iavf_up_complete() will switch device back 2344 * to __IAVF_RUNNING 2345 */ 2346 iavf_up_complete(adapter); 2347 2348 iavf_irq_enable(adapter, true); 2349 } else { 2350 iavf_change_state(adapter, __IAVF_DOWN); 2351 wake_up(&adapter->down_waitqueue); 2352 } 2353 mutex_unlock(&adapter->client_lock); 2354 mutex_unlock(&adapter->crit_lock); 2355 2356 return; 2357 reset_err: 2358 mutex_unlock(&adapter->client_lock); 2359 mutex_unlock(&adapter->crit_lock); 2360 if (running) 2361 iavf_change_state(adapter, __IAVF_RUNNING); 2362 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2363 iavf_close(netdev); 2364 } 2365 2366 /** 2367 * iavf_adminq_task - worker thread to clean the admin queue 2368 * @work: pointer to work_struct containing our data 2369 **/ 2370 static void iavf_adminq_task(struct work_struct *work) 2371 { 2372 struct iavf_adapter *adapter = 2373 container_of(work, struct iavf_adapter, adminq_task); 2374 struct iavf_hw *hw = &adapter->hw; 2375 struct iavf_arq_event_info event; 2376 enum virtchnl_ops v_op; 2377 enum iavf_status ret, v_ret; 2378 u32 val, oldval; 2379 u16 pending; 2380 2381 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2382 goto out; 2383 2384 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2385 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2386 if (!event.msg_buf) 2387 goto out; 2388 2389 if (iavf_lock_timeout(&adapter->crit_lock, 200)) 2390 goto freedom; 2391 do { 2392 ret = iavf_clean_arq_element(hw, &event, &pending); 2393 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2394 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2395 2396 if (ret || !v_op) 2397 break; /* No event to process or error cleaning ARQ */ 2398 2399 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2400 event.msg_len); 2401 if (pending != 0) 2402 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2403 } while (pending); 2404 mutex_unlock(&adapter->crit_lock); 2405 2406 if ((adapter->flags & 2407 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2408 adapter->state == __IAVF_RESETTING) 2409 goto freedom; 2410 2411 /* check for error indications */ 2412 val = rd32(hw, hw->aq.arq.len); 2413 if (val == 0xdeadbeef) /* indicates device in reset */ 2414 goto freedom; 2415 oldval = val; 2416 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2417 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2418 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2419 } 2420 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2421 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2422 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2423 } 2424 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2425 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2426 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2427 } 2428 if (oldval != val) 2429 wr32(hw, hw->aq.arq.len, val); 2430 2431 val = rd32(hw, hw->aq.asq.len); 2432 oldval = val; 2433 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2434 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2435 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2436 } 2437 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2438 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2439 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2440 } 2441 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2442 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2443 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2444 } 2445 if (oldval != val) 2446 wr32(hw, hw->aq.asq.len, val); 2447 2448 freedom: 2449 kfree(event.msg_buf); 2450 out: 2451 /* re-enable Admin queue interrupt cause */ 2452 iavf_misc_irq_enable(adapter); 2453 } 2454 2455 /** 2456 * iavf_client_task - worker thread to perform client work 2457 * @work: pointer to work_struct containing our data 2458 * 2459 * This task handles client interactions. Because client calls can be 2460 * reentrant, we can't handle them in the watchdog. 2461 **/ 2462 static void iavf_client_task(struct work_struct *work) 2463 { 2464 struct iavf_adapter *adapter = 2465 container_of(work, struct iavf_adapter, client_task.work); 2466 2467 /* If we can't get the client bit, just give up. We'll be rescheduled 2468 * later. 2469 */ 2470 2471 if (!mutex_trylock(&adapter->client_lock)) 2472 return; 2473 2474 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2475 iavf_client_subtask(adapter); 2476 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2477 goto out; 2478 } 2479 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2480 iavf_notify_client_l2_params(&adapter->vsi); 2481 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2482 goto out; 2483 } 2484 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2485 iavf_notify_client_close(&adapter->vsi, false); 2486 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2487 goto out; 2488 } 2489 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2490 iavf_notify_client_open(&adapter->vsi); 2491 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2492 } 2493 out: 2494 mutex_unlock(&adapter->client_lock); 2495 } 2496 2497 /** 2498 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2499 * @adapter: board private structure 2500 * 2501 * Free all transmit software resources 2502 **/ 2503 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2504 { 2505 int i; 2506 2507 if (!adapter->tx_rings) 2508 return; 2509 2510 for (i = 0; i < adapter->num_active_queues; i++) 2511 if (adapter->tx_rings[i].desc) 2512 iavf_free_tx_resources(&adapter->tx_rings[i]); 2513 } 2514 2515 /** 2516 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2517 * @adapter: board private structure 2518 * 2519 * If this function returns with an error, then it's possible one or 2520 * more of the rings is populated (while the rest are not). It is the 2521 * callers duty to clean those orphaned rings. 2522 * 2523 * Return 0 on success, negative on failure 2524 **/ 2525 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2526 { 2527 int i, err = 0; 2528 2529 for (i = 0; i < adapter->num_active_queues; i++) { 2530 adapter->tx_rings[i].count = adapter->tx_desc_count; 2531 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2532 if (!err) 2533 continue; 2534 dev_err(&adapter->pdev->dev, 2535 "Allocation for Tx Queue %u failed\n", i); 2536 break; 2537 } 2538 2539 return err; 2540 } 2541 2542 /** 2543 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2544 * @adapter: board private structure 2545 * 2546 * If this function returns with an error, then it's possible one or 2547 * more of the rings is populated (while the rest are not). It is the 2548 * callers duty to clean those orphaned rings. 2549 * 2550 * Return 0 on success, negative on failure 2551 **/ 2552 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2553 { 2554 int i, err = 0; 2555 2556 for (i = 0; i < adapter->num_active_queues; i++) { 2557 adapter->rx_rings[i].count = adapter->rx_desc_count; 2558 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2559 if (!err) 2560 continue; 2561 dev_err(&adapter->pdev->dev, 2562 "Allocation for Rx Queue %u failed\n", i); 2563 break; 2564 } 2565 return err; 2566 } 2567 2568 /** 2569 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2570 * @adapter: board private structure 2571 * 2572 * Free all receive software resources 2573 **/ 2574 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2575 { 2576 int i; 2577 2578 if (!adapter->rx_rings) 2579 return; 2580 2581 for (i = 0; i < adapter->num_active_queues; i++) 2582 if (adapter->rx_rings[i].desc) 2583 iavf_free_rx_resources(&adapter->rx_rings[i]); 2584 } 2585 2586 /** 2587 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2588 * @adapter: board private structure 2589 * @max_tx_rate: max Tx bw for a tc 2590 **/ 2591 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2592 u64 max_tx_rate) 2593 { 2594 int speed = 0, ret = 0; 2595 2596 if (ADV_LINK_SUPPORT(adapter)) { 2597 if (adapter->link_speed_mbps < U32_MAX) { 2598 speed = adapter->link_speed_mbps; 2599 goto validate_bw; 2600 } else { 2601 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2602 return -EINVAL; 2603 } 2604 } 2605 2606 switch (adapter->link_speed) { 2607 case VIRTCHNL_LINK_SPEED_40GB: 2608 speed = SPEED_40000; 2609 break; 2610 case VIRTCHNL_LINK_SPEED_25GB: 2611 speed = SPEED_25000; 2612 break; 2613 case VIRTCHNL_LINK_SPEED_20GB: 2614 speed = SPEED_20000; 2615 break; 2616 case VIRTCHNL_LINK_SPEED_10GB: 2617 speed = SPEED_10000; 2618 break; 2619 case VIRTCHNL_LINK_SPEED_5GB: 2620 speed = SPEED_5000; 2621 break; 2622 case VIRTCHNL_LINK_SPEED_2_5GB: 2623 speed = SPEED_2500; 2624 break; 2625 case VIRTCHNL_LINK_SPEED_1GB: 2626 speed = SPEED_1000; 2627 break; 2628 case VIRTCHNL_LINK_SPEED_100MB: 2629 speed = SPEED_100; 2630 break; 2631 default: 2632 break; 2633 } 2634 2635 validate_bw: 2636 if (max_tx_rate > speed) { 2637 dev_err(&adapter->pdev->dev, 2638 "Invalid tx rate specified\n"); 2639 ret = -EINVAL; 2640 } 2641 2642 return ret; 2643 } 2644 2645 /** 2646 * iavf_validate_ch_config - validate queue mapping info 2647 * @adapter: board private structure 2648 * @mqprio_qopt: queue parameters 2649 * 2650 * This function validates if the config provided by the user to 2651 * configure queue channels is valid or not. Returns 0 on a valid 2652 * config. 2653 **/ 2654 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2655 struct tc_mqprio_qopt_offload *mqprio_qopt) 2656 { 2657 u64 total_max_rate = 0; 2658 int i, num_qps = 0; 2659 u64 tx_rate = 0; 2660 int ret = 0; 2661 2662 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2663 mqprio_qopt->qopt.num_tc < 1) 2664 return -EINVAL; 2665 2666 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2667 if (!mqprio_qopt->qopt.count[i] || 2668 mqprio_qopt->qopt.offset[i] != num_qps) 2669 return -EINVAL; 2670 if (mqprio_qopt->min_rate[i]) { 2671 dev_err(&adapter->pdev->dev, 2672 "Invalid min tx rate (greater than 0) specified\n"); 2673 return -EINVAL; 2674 } 2675 /*convert to Mbps */ 2676 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2677 IAVF_MBPS_DIVISOR); 2678 total_max_rate += tx_rate; 2679 num_qps += mqprio_qopt->qopt.count[i]; 2680 } 2681 if (num_qps > IAVF_MAX_REQ_QUEUES) 2682 return -EINVAL; 2683 2684 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2685 return ret; 2686 } 2687 2688 /** 2689 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2690 * @adapter: board private structure 2691 **/ 2692 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2693 { 2694 struct iavf_cloud_filter *cf, *cftmp; 2695 2696 spin_lock_bh(&adapter->cloud_filter_list_lock); 2697 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2698 list) { 2699 list_del(&cf->list); 2700 kfree(cf); 2701 adapter->num_cloud_filters--; 2702 } 2703 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2704 } 2705 2706 /** 2707 * __iavf_setup_tc - configure multiple traffic classes 2708 * @netdev: network interface device structure 2709 * @type_data: tc offload data 2710 * 2711 * This function processes the config information provided by the 2712 * user to configure traffic classes/queue channels and packages the 2713 * information to request the PF to setup traffic classes. 2714 * 2715 * Returns 0 on success. 2716 **/ 2717 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2718 { 2719 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2720 struct iavf_adapter *adapter = netdev_priv(netdev); 2721 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2722 u8 num_tc = 0, total_qps = 0; 2723 int ret = 0, netdev_tc = 0; 2724 u64 max_tx_rate; 2725 u16 mode; 2726 int i; 2727 2728 num_tc = mqprio_qopt->qopt.num_tc; 2729 mode = mqprio_qopt->mode; 2730 2731 /* delete queue_channel */ 2732 if (!mqprio_qopt->qopt.hw) { 2733 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2734 /* reset the tc configuration */ 2735 netdev_reset_tc(netdev); 2736 adapter->num_tc = 0; 2737 netif_tx_stop_all_queues(netdev); 2738 netif_tx_disable(netdev); 2739 iavf_del_all_cloud_filters(adapter); 2740 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2741 goto exit; 2742 } else { 2743 return -EINVAL; 2744 } 2745 } 2746 2747 /* add queue channel */ 2748 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2749 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2750 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2751 return -EOPNOTSUPP; 2752 } 2753 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2754 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2755 return -EINVAL; 2756 } 2757 2758 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2759 if (ret) 2760 return ret; 2761 /* Return if same TC config is requested */ 2762 if (adapter->num_tc == num_tc) 2763 return 0; 2764 adapter->num_tc = num_tc; 2765 2766 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2767 if (i < num_tc) { 2768 adapter->ch_config.ch_info[i].count = 2769 mqprio_qopt->qopt.count[i]; 2770 adapter->ch_config.ch_info[i].offset = 2771 mqprio_qopt->qopt.offset[i]; 2772 total_qps += mqprio_qopt->qopt.count[i]; 2773 max_tx_rate = mqprio_qopt->max_rate[i]; 2774 /* convert to Mbps */ 2775 max_tx_rate = div_u64(max_tx_rate, 2776 IAVF_MBPS_DIVISOR); 2777 adapter->ch_config.ch_info[i].max_tx_rate = 2778 max_tx_rate; 2779 } else { 2780 adapter->ch_config.ch_info[i].count = 1; 2781 adapter->ch_config.ch_info[i].offset = 0; 2782 } 2783 } 2784 adapter->ch_config.total_qps = total_qps; 2785 netif_tx_stop_all_queues(netdev); 2786 netif_tx_disable(netdev); 2787 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2788 netdev_reset_tc(netdev); 2789 /* Report the tc mapping up the stack */ 2790 netdev_set_num_tc(adapter->netdev, num_tc); 2791 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2792 u16 qcount = mqprio_qopt->qopt.count[i]; 2793 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2794 2795 if (i < num_tc) 2796 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2797 qoffset); 2798 } 2799 } 2800 exit: 2801 return ret; 2802 } 2803 2804 /** 2805 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2806 * @adapter: board private structure 2807 * @f: pointer to struct flow_cls_offload 2808 * @filter: pointer to cloud filter structure 2809 */ 2810 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2811 struct flow_cls_offload *f, 2812 struct iavf_cloud_filter *filter) 2813 { 2814 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2815 struct flow_dissector *dissector = rule->match.dissector; 2816 u16 n_proto_mask = 0; 2817 u16 n_proto_key = 0; 2818 u8 field_flags = 0; 2819 u16 addr_type = 0; 2820 u16 n_proto = 0; 2821 int i = 0; 2822 struct virtchnl_filter *vf = &filter->f; 2823 2824 if (dissector->used_keys & 2825 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2826 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2827 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2828 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2829 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2830 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2831 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2832 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2833 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2834 dissector->used_keys); 2835 return -EOPNOTSUPP; 2836 } 2837 2838 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2839 struct flow_match_enc_keyid match; 2840 2841 flow_rule_match_enc_keyid(rule, &match); 2842 if (match.mask->keyid != 0) 2843 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2844 } 2845 2846 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2847 struct flow_match_basic match; 2848 2849 flow_rule_match_basic(rule, &match); 2850 n_proto_key = ntohs(match.key->n_proto); 2851 n_proto_mask = ntohs(match.mask->n_proto); 2852 2853 if (n_proto_key == ETH_P_ALL) { 2854 n_proto_key = 0; 2855 n_proto_mask = 0; 2856 } 2857 n_proto = n_proto_key & n_proto_mask; 2858 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2859 return -EINVAL; 2860 if (n_proto == ETH_P_IPV6) { 2861 /* specify flow type as TCP IPv6 */ 2862 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2863 } 2864 2865 if (match.key->ip_proto != IPPROTO_TCP) { 2866 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2867 return -EINVAL; 2868 } 2869 } 2870 2871 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2872 struct flow_match_eth_addrs match; 2873 2874 flow_rule_match_eth_addrs(rule, &match); 2875 2876 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2877 if (!is_zero_ether_addr(match.mask->dst)) { 2878 if (is_broadcast_ether_addr(match.mask->dst)) { 2879 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2880 } else { 2881 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2882 match.mask->dst); 2883 return IAVF_ERR_CONFIG; 2884 } 2885 } 2886 2887 if (!is_zero_ether_addr(match.mask->src)) { 2888 if (is_broadcast_ether_addr(match.mask->src)) { 2889 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2890 } else { 2891 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2892 match.mask->src); 2893 return IAVF_ERR_CONFIG; 2894 } 2895 } 2896 2897 if (!is_zero_ether_addr(match.key->dst)) 2898 if (is_valid_ether_addr(match.key->dst) || 2899 is_multicast_ether_addr(match.key->dst)) { 2900 /* set the mask if a valid dst_mac address */ 2901 for (i = 0; i < ETH_ALEN; i++) 2902 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2903 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2904 match.key->dst); 2905 } 2906 2907 if (!is_zero_ether_addr(match.key->src)) 2908 if (is_valid_ether_addr(match.key->src) || 2909 is_multicast_ether_addr(match.key->src)) { 2910 /* set the mask if a valid dst_mac address */ 2911 for (i = 0; i < ETH_ALEN; i++) 2912 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2913 ether_addr_copy(vf->data.tcp_spec.src_mac, 2914 match.key->src); 2915 } 2916 } 2917 2918 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2919 struct flow_match_vlan match; 2920 2921 flow_rule_match_vlan(rule, &match); 2922 if (match.mask->vlan_id) { 2923 if (match.mask->vlan_id == VLAN_VID_MASK) { 2924 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2925 } else { 2926 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2927 match.mask->vlan_id); 2928 return IAVF_ERR_CONFIG; 2929 } 2930 } 2931 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2932 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2933 } 2934 2935 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2936 struct flow_match_control match; 2937 2938 flow_rule_match_control(rule, &match); 2939 addr_type = match.key->addr_type; 2940 } 2941 2942 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2943 struct flow_match_ipv4_addrs match; 2944 2945 flow_rule_match_ipv4_addrs(rule, &match); 2946 if (match.mask->dst) { 2947 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2948 field_flags |= IAVF_CLOUD_FIELD_IIP; 2949 } else { 2950 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2951 be32_to_cpu(match.mask->dst)); 2952 return IAVF_ERR_CONFIG; 2953 } 2954 } 2955 2956 if (match.mask->src) { 2957 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2958 field_flags |= IAVF_CLOUD_FIELD_IIP; 2959 } else { 2960 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2961 be32_to_cpu(match.mask->dst)); 2962 return IAVF_ERR_CONFIG; 2963 } 2964 } 2965 2966 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2967 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2968 return IAVF_ERR_CONFIG; 2969 } 2970 if (match.key->dst) { 2971 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2972 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2973 } 2974 if (match.key->src) { 2975 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2976 vf->data.tcp_spec.src_ip[0] = match.key->src; 2977 } 2978 } 2979 2980 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2981 struct flow_match_ipv6_addrs match; 2982 2983 flow_rule_match_ipv6_addrs(rule, &match); 2984 2985 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2986 if (ipv6_addr_any(&match.mask->dst)) { 2987 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2988 IPV6_ADDR_ANY); 2989 return IAVF_ERR_CONFIG; 2990 } 2991 2992 /* src and dest IPv6 address should not be LOOPBACK 2993 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2994 */ 2995 if (ipv6_addr_loopback(&match.key->dst) || 2996 ipv6_addr_loopback(&match.key->src)) { 2997 dev_err(&adapter->pdev->dev, 2998 "ipv6 addr should not be loopback\n"); 2999 return IAVF_ERR_CONFIG; 3000 } 3001 if (!ipv6_addr_any(&match.mask->dst) || 3002 !ipv6_addr_any(&match.mask->src)) 3003 field_flags |= IAVF_CLOUD_FIELD_IIP; 3004 3005 for (i = 0; i < 4; i++) 3006 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 3007 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 3008 sizeof(vf->data.tcp_spec.dst_ip)); 3009 for (i = 0; i < 4; i++) 3010 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 3011 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 3012 sizeof(vf->data.tcp_spec.src_ip)); 3013 } 3014 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 3015 struct flow_match_ports match; 3016 3017 flow_rule_match_ports(rule, &match); 3018 if (match.mask->src) { 3019 if (match.mask->src == cpu_to_be16(0xffff)) { 3020 field_flags |= IAVF_CLOUD_FIELD_IIP; 3021 } else { 3022 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 3023 be16_to_cpu(match.mask->src)); 3024 return IAVF_ERR_CONFIG; 3025 } 3026 } 3027 3028 if (match.mask->dst) { 3029 if (match.mask->dst == cpu_to_be16(0xffff)) { 3030 field_flags |= IAVF_CLOUD_FIELD_IIP; 3031 } else { 3032 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 3033 be16_to_cpu(match.mask->dst)); 3034 return IAVF_ERR_CONFIG; 3035 } 3036 } 3037 if (match.key->dst) { 3038 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 3039 vf->data.tcp_spec.dst_port = match.key->dst; 3040 } 3041 3042 if (match.key->src) { 3043 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 3044 vf->data.tcp_spec.src_port = match.key->src; 3045 } 3046 } 3047 vf->field_flags = field_flags; 3048 3049 return 0; 3050 } 3051 3052 /** 3053 * iavf_handle_tclass - Forward to a traffic class on the device 3054 * @adapter: board private structure 3055 * @tc: traffic class index on the device 3056 * @filter: pointer to cloud filter structure 3057 */ 3058 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3059 struct iavf_cloud_filter *filter) 3060 { 3061 if (tc == 0) 3062 return 0; 3063 if (tc < adapter->num_tc) { 3064 if (!filter->f.data.tcp_spec.dst_port) { 3065 dev_err(&adapter->pdev->dev, 3066 "Specify destination port to redirect to traffic class other than TC0\n"); 3067 return -EINVAL; 3068 } 3069 } 3070 /* redirect to a traffic class on the same device */ 3071 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3072 filter->f.action_meta = tc; 3073 return 0; 3074 } 3075 3076 /** 3077 * iavf_configure_clsflower - Add tc flower filters 3078 * @adapter: board private structure 3079 * @cls_flower: Pointer to struct flow_cls_offload 3080 */ 3081 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3082 struct flow_cls_offload *cls_flower) 3083 { 3084 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3085 struct iavf_cloud_filter *filter = NULL; 3086 int err = -EINVAL, count = 50; 3087 3088 if (tc < 0) { 3089 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3090 return -EINVAL; 3091 } 3092 3093 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3094 if (!filter) 3095 return -ENOMEM; 3096 3097 while (!mutex_trylock(&adapter->crit_lock)) { 3098 if (--count == 0) 3099 goto err; 3100 udelay(1); 3101 } 3102 3103 filter->cookie = cls_flower->cookie; 3104 3105 /* set the mask to all zeroes to begin with */ 3106 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3107 /* start out with flow type and eth type IPv4 to begin with */ 3108 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3109 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3110 if (err < 0) 3111 goto err; 3112 3113 err = iavf_handle_tclass(adapter, tc, filter); 3114 if (err < 0) 3115 goto err; 3116 3117 /* add filter to the list */ 3118 spin_lock_bh(&adapter->cloud_filter_list_lock); 3119 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3120 adapter->num_cloud_filters++; 3121 filter->add = true; 3122 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3123 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3124 err: 3125 if (err) 3126 kfree(filter); 3127 3128 mutex_unlock(&adapter->crit_lock); 3129 return err; 3130 } 3131 3132 /* iavf_find_cf - Find the cloud filter in the list 3133 * @adapter: Board private structure 3134 * @cookie: filter specific cookie 3135 * 3136 * Returns ptr to the filter object or NULL. Must be called while holding the 3137 * cloud_filter_list_lock. 3138 */ 3139 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3140 unsigned long *cookie) 3141 { 3142 struct iavf_cloud_filter *filter = NULL; 3143 3144 if (!cookie) 3145 return NULL; 3146 3147 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3148 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3149 return filter; 3150 } 3151 return NULL; 3152 } 3153 3154 /** 3155 * iavf_delete_clsflower - Remove tc flower filters 3156 * @adapter: board private structure 3157 * @cls_flower: Pointer to struct flow_cls_offload 3158 */ 3159 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3160 struct flow_cls_offload *cls_flower) 3161 { 3162 struct iavf_cloud_filter *filter = NULL; 3163 int err = 0; 3164 3165 spin_lock_bh(&adapter->cloud_filter_list_lock); 3166 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3167 if (filter) { 3168 filter->del = true; 3169 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3170 } else { 3171 err = -EINVAL; 3172 } 3173 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3174 3175 return err; 3176 } 3177 3178 /** 3179 * iavf_setup_tc_cls_flower - flower classifier offloads 3180 * @adapter: board private structure 3181 * @cls_flower: pointer to flow_cls_offload struct with flow info 3182 */ 3183 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3184 struct flow_cls_offload *cls_flower) 3185 { 3186 switch (cls_flower->command) { 3187 case FLOW_CLS_REPLACE: 3188 return iavf_configure_clsflower(adapter, cls_flower); 3189 case FLOW_CLS_DESTROY: 3190 return iavf_delete_clsflower(adapter, cls_flower); 3191 case FLOW_CLS_STATS: 3192 return -EOPNOTSUPP; 3193 default: 3194 return -EOPNOTSUPP; 3195 } 3196 } 3197 3198 /** 3199 * iavf_setup_tc_block_cb - block callback for tc 3200 * @type: type of offload 3201 * @type_data: offload data 3202 * @cb_priv: 3203 * 3204 * This function is the block callback for traffic classes 3205 **/ 3206 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3207 void *cb_priv) 3208 { 3209 struct iavf_adapter *adapter = cb_priv; 3210 3211 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3212 return -EOPNOTSUPP; 3213 3214 switch (type) { 3215 case TC_SETUP_CLSFLOWER: 3216 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3217 default: 3218 return -EOPNOTSUPP; 3219 } 3220 } 3221 3222 static LIST_HEAD(iavf_block_cb_list); 3223 3224 /** 3225 * iavf_setup_tc - configure multiple traffic classes 3226 * @netdev: network interface device structure 3227 * @type: type of offload 3228 * @type_data: tc offload data 3229 * 3230 * This function is the callback to ndo_setup_tc in the 3231 * netdev_ops. 3232 * 3233 * Returns 0 on success 3234 **/ 3235 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3236 void *type_data) 3237 { 3238 struct iavf_adapter *adapter = netdev_priv(netdev); 3239 3240 switch (type) { 3241 case TC_SETUP_QDISC_MQPRIO: 3242 return __iavf_setup_tc(netdev, type_data); 3243 case TC_SETUP_BLOCK: 3244 return flow_block_cb_setup_simple(type_data, 3245 &iavf_block_cb_list, 3246 iavf_setup_tc_block_cb, 3247 adapter, adapter, true); 3248 default: 3249 return -EOPNOTSUPP; 3250 } 3251 } 3252 3253 /** 3254 * iavf_open - Called when a network interface is made active 3255 * @netdev: network interface device structure 3256 * 3257 * Returns 0 on success, negative value on failure 3258 * 3259 * The open entry point is called when a network interface is made 3260 * active by the system (IFF_UP). At this point all resources needed 3261 * for transmit and receive operations are allocated, the interrupt 3262 * handler is registered with the OS, the watchdog is started, 3263 * and the stack is notified that the interface is ready. 3264 **/ 3265 static int iavf_open(struct net_device *netdev) 3266 { 3267 struct iavf_adapter *adapter = netdev_priv(netdev); 3268 int err; 3269 3270 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3271 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3272 return -EIO; 3273 } 3274 3275 while (!mutex_trylock(&adapter->crit_lock)) 3276 usleep_range(500, 1000); 3277 3278 if (adapter->state != __IAVF_DOWN) { 3279 err = -EBUSY; 3280 goto err_unlock; 3281 } 3282 3283 if (adapter->state == __IAVF_RUNNING && 3284 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { 3285 dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); 3286 err = 0; 3287 goto err_unlock; 3288 } 3289 3290 /* allocate transmit descriptors */ 3291 err = iavf_setup_all_tx_resources(adapter); 3292 if (err) 3293 goto err_setup_tx; 3294 3295 /* allocate receive descriptors */ 3296 err = iavf_setup_all_rx_resources(adapter); 3297 if (err) 3298 goto err_setup_rx; 3299 3300 /* clear any pending interrupts, may auto mask */ 3301 err = iavf_request_traffic_irqs(adapter, netdev->name); 3302 if (err) 3303 goto err_req_irq; 3304 3305 spin_lock_bh(&adapter->mac_vlan_list_lock); 3306 3307 iavf_add_filter(adapter, adapter->hw.mac.addr); 3308 3309 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3310 3311 iavf_configure(adapter); 3312 3313 iavf_up_complete(adapter); 3314 3315 iavf_irq_enable(adapter, true); 3316 3317 mutex_unlock(&adapter->crit_lock); 3318 3319 return 0; 3320 3321 err_req_irq: 3322 iavf_down(adapter); 3323 iavf_free_traffic_irqs(adapter); 3324 err_setup_rx: 3325 iavf_free_all_rx_resources(adapter); 3326 err_setup_tx: 3327 iavf_free_all_tx_resources(adapter); 3328 err_unlock: 3329 mutex_unlock(&adapter->crit_lock); 3330 3331 return err; 3332 } 3333 3334 /** 3335 * iavf_close - Disables a network interface 3336 * @netdev: network interface device structure 3337 * 3338 * Returns 0, this is not allowed to fail 3339 * 3340 * The close entry point is called when an interface is de-activated 3341 * by the OS. The hardware is still under the drivers control, but 3342 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3343 * are freed, along with all transmit and receive resources. 3344 **/ 3345 static int iavf_close(struct net_device *netdev) 3346 { 3347 struct iavf_adapter *adapter = netdev_priv(netdev); 3348 int status; 3349 3350 if (adapter->state <= __IAVF_DOWN_PENDING) 3351 return 0; 3352 3353 while (!mutex_trylock(&adapter->crit_lock)) 3354 usleep_range(500, 1000); 3355 3356 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3357 if (CLIENT_ENABLED(adapter)) 3358 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3359 3360 iavf_down(adapter); 3361 iavf_change_state(adapter, __IAVF_DOWN_PENDING); 3362 iavf_free_traffic_irqs(adapter); 3363 3364 mutex_unlock(&adapter->crit_lock); 3365 3366 /* We explicitly don't free resources here because the hardware is 3367 * still active and can DMA into memory. Resources are cleared in 3368 * iavf_virtchnl_completion() after we get confirmation from the PF 3369 * driver that the rings have been stopped. 3370 * 3371 * Also, we wait for state to transition to __IAVF_DOWN before 3372 * returning. State change occurs in iavf_virtchnl_completion() after 3373 * VF resources are released (which occurs after PF driver processes and 3374 * responds to admin queue commands). 3375 */ 3376 3377 status = wait_event_timeout(adapter->down_waitqueue, 3378 adapter->state == __IAVF_DOWN, 3379 msecs_to_jiffies(500)); 3380 if (!status) 3381 netdev_warn(netdev, "Device resources not yet released\n"); 3382 return 0; 3383 } 3384 3385 /** 3386 * iavf_change_mtu - Change the Maximum Transfer Unit 3387 * @netdev: network interface device structure 3388 * @new_mtu: new value for maximum frame size 3389 * 3390 * Returns 0 on success, negative on failure 3391 **/ 3392 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3393 { 3394 struct iavf_adapter *adapter = netdev_priv(netdev); 3395 3396 netdev->mtu = new_mtu; 3397 if (CLIENT_ENABLED(adapter)) { 3398 iavf_notify_client_l2_params(&adapter->vsi); 3399 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3400 } 3401 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3402 queue_work(iavf_wq, &adapter->reset_task); 3403 3404 return 0; 3405 } 3406 3407 /** 3408 * iavf_set_features - set the netdev feature flags 3409 * @netdev: ptr to the netdev being adjusted 3410 * @features: the feature set that the stack is suggesting 3411 * Note: expects to be called while under rtnl_lock() 3412 **/ 3413 static int iavf_set_features(struct net_device *netdev, 3414 netdev_features_t features) 3415 { 3416 struct iavf_adapter *adapter = netdev_priv(netdev); 3417 3418 /* Don't allow changing VLAN_RX flag when adapter is not capable 3419 * of VLAN offload 3420 */ 3421 if (!VLAN_ALLOWED(adapter)) { 3422 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3423 return -EINVAL; 3424 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3425 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3426 adapter->aq_required |= 3427 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3428 else 3429 adapter->aq_required |= 3430 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3431 } 3432 3433 return 0; 3434 } 3435 3436 /** 3437 * iavf_features_check - Validate encapsulated packet conforms to limits 3438 * @skb: skb buff 3439 * @dev: This physical port's netdev 3440 * @features: Offload features that the stack believes apply 3441 **/ 3442 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3443 struct net_device *dev, 3444 netdev_features_t features) 3445 { 3446 size_t len; 3447 3448 /* No point in doing any of this if neither checksum nor GSO are 3449 * being requested for this frame. We can rule out both by just 3450 * checking for CHECKSUM_PARTIAL 3451 */ 3452 if (skb->ip_summed != CHECKSUM_PARTIAL) 3453 return features; 3454 3455 /* We cannot support GSO if the MSS is going to be less than 3456 * 64 bytes. If it is then we need to drop support for GSO. 3457 */ 3458 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3459 features &= ~NETIF_F_GSO_MASK; 3460 3461 /* MACLEN can support at most 63 words */ 3462 len = skb_network_header(skb) - skb->data; 3463 if (len & ~(63 * 2)) 3464 goto out_err; 3465 3466 /* IPLEN and EIPLEN can support at most 127 dwords */ 3467 len = skb_transport_header(skb) - skb_network_header(skb); 3468 if (len & ~(127 * 4)) 3469 goto out_err; 3470 3471 if (skb->encapsulation) { 3472 /* L4TUNLEN can support 127 words */ 3473 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3474 if (len & ~(127 * 2)) 3475 goto out_err; 3476 3477 /* IPLEN can support at most 127 dwords */ 3478 len = skb_inner_transport_header(skb) - 3479 skb_inner_network_header(skb); 3480 if (len & ~(127 * 4)) 3481 goto out_err; 3482 } 3483 3484 /* No need to validate L4LEN as TCP is the only protocol with a 3485 * a flexible value and we support all possible values supported 3486 * by TCP, which is at most 15 dwords 3487 */ 3488 3489 return features; 3490 out_err: 3491 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3492 } 3493 3494 /** 3495 * iavf_fix_features - fix up the netdev feature bits 3496 * @netdev: our net device 3497 * @features: desired feature bits 3498 * 3499 * Returns fixed-up features bits 3500 **/ 3501 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3502 netdev_features_t features) 3503 { 3504 struct iavf_adapter *adapter = netdev_priv(netdev); 3505 3506 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3507 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3508 NETIF_F_HW_VLAN_CTAG_RX | 3509 NETIF_F_HW_VLAN_CTAG_FILTER); 3510 3511 return features; 3512 } 3513 3514 static const struct net_device_ops iavf_netdev_ops = { 3515 .ndo_open = iavf_open, 3516 .ndo_stop = iavf_close, 3517 .ndo_start_xmit = iavf_xmit_frame, 3518 .ndo_set_rx_mode = iavf_set_rx_mode, 3519 .ndo_validate_addr = eth_validate_addr, 3520 .ndo_set_mac_address = iavf_set_mac, 3521 .ndo_change_mtu = iavf_change_mtu, 3522 .ndo_tx_timeout = iavf_tx_timeout, 3523 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3524 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3525 .ndo_features_check = iavf_features_check, 3526 .ndo_fix_features = iavf_fix_features, 3527 .ndo_set_features = iavf_set_features, 3528 .ndo_setup_tc = iavf_setup_tc, 3529 }; 3530 3531 /** 3532 * iavf_check_reset_complete - check that VF reset is complete 3533 * @hw: pointer to hw struct 3534 * 3535 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3536 **/ 3537 static int iavf_check_reset_complete(struct iavf_hw *hw) 3538 { 3539 u32 rstat; 3540 int i; 3541 3542 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3543 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3544 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3545 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3546 (rstat == VIRTCHNL_VFR_COMPLETED)) 3547 return 0; 3548 usleep_range(10, 20); 3549 } 3550 return -EBUSY; 3551 } 3552 3553 /** 3554 * iavf_process_config - Process the config information we got from the PF 3555 * @adapter: board private structure 3556 * 3557 * Verify that we have a valid config struct, and set up our netdev features 3558 * and our VSI struct. 3559 **/ 3560 int iavf_process_config(struct iavf_adapter *adapter) 3561 { 3562 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3563 int i, num_req_queues = adapter->num_req_queues; 3564 struct net_device *netdev = adapter->netdev; 3565 struct iavf_vsi *vsi = &adapter->vsi; 3566 netdev_features_t hw_enc_features; 3567 netdev_features_t hw_features; 3568 3569 /* got VF config message back from PF, now we can parse it */ 3570 for (i = 0; i < vfres->num_vsis; i++) { 3571 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3572 adapter->vsi_res = &vfres->vsi_res[i]; 3573 } 3574 if (!adapter->vsi_res) { 3575 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3576 return -ENODEV; 3577 } 3578 3579 if (num_req_queues && 3580 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3581 /* Problem. The PF gave us fewer queues than what we had 3582 * negotiated in our request. Need a reset to see if we can't 3583 * get back to a working state. 3584 */ 3585 dev_err(&adapter->pdev->dev, 3586 "Requested %d queues, but PF only gave us %d.\n", 3587 num_req_queues, 3588 adapter->vsi_res->num_queue_pairs); 3589 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3590 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3591 iavf_schedule_reset(adapter); 3592 return -ENODEV; 3593 } 3594 adapter->num_req_queues = 0; 3595 3596 hw_enc_features = NETIF_F_SG | 3597 NETIF_F_IP_CSUM | 3598 NETIF_F_IPV6_CSUM | 3599 NETIF_F_HIGHDMA | 3600 NETIF_F_SOFT_FEATURES | 3601 NETIF_F_TSO | 3602 NETIF_F_TSO_ECN | 3603 NETIF_F_TSO6 | 3604 NETIF_F_SCTP_CRC | 3605 NETIF_F_RXHASH | 3606 NETIF_F_RXCSUM | 3607 0; 3608 3609 /* advertise to stack only if offloads for encapsulated packets is 3610 * supported 3611 */ 3612 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3613 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3614 NETIF_F_GSO_GRE | 3615 NETIF_F_GSO_GRE_CSUM | 3616 NETIF_F_GSO_IPXIP4 | 3617 NETIF_F_GSO_IPXIP6 | 3618 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3619 NETIF_F_GSO_PARTIAL | 3620 0; 3621 3622 if (!(vfres->vf_cap_flags & 3623 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3624 netdev->gso_partial_features |= 3625 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3626 3627 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3628 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3629 netdev->hw_enc_features |= hw_enc_features; 3630 } 3631 /* record features VLANs can make use of */ 3632 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3633 3634 /* Write features and hw_features separately to avoid polluting 3635 * with, or dropping, features that are set when we registered. 3636 */ 3637 hw_features = hw_enc_features; 3638 3639 /* Enable VLAN features if supported */ 3640 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3641 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3642 NETIF_F_HW_VLAN_CTAG_RX); 3643 /* Enable cloud filter if ADQ is supported */ 3644 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3645 hw_features |= NETIF_F_HW_TC; 3646 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3647 hw_features |= NETIF_F_GSO_UDP_L4; 3648 3649 netdev->hw_features |= hw_features; 3650 3651 netdev->features |= hw_features; 3652 3653 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3654 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3655 3656 netdev->priv_flags |= IFF_UNICAST_FLT; 3657 3658 /* Do not turn on offloads when they are requested to be turned off. 3659 * TSO needs minimum 576 bytes to work correctly. 3660 */ 3661 if (netdev->wanted_features) { 3662 if (!(netdev->wanted_features & NETIF_F_TSO) || 3663 netdev->mtu < 576) 3664 netdev->features &= ~NETIF_F_TSO; 3665 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3666 netdev->mtu < 576) 3667 netdev->features &= ~NETIF_F_TSO6; 3668 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3669 netdev->features &= ~NETIF_F_TSO_ECN; 3670 if (!(netdev->wanted_features & NETIF_F_GRO)) 3671 netdev->features &= ~NETIF_F_GRO; 3672 if (!(netdev->wanted_features & NETIF_F_GSO)) 3673 netdev->features &= ~NETIF_F_GSO; 3674 } 3675 3676 adapter->vsi.id = adapter->vsi_res->vsi_id; 3677 3678 adapter->vsi.back = adapter; 3679 adapter->vsi.base_vector = 1; 3680 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3681 vsi->netdev = adapter->netdev; 3682 vsi->qs_handle = adapter->vsi_res->qset_handle; 3683 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3684 adapter->rss_key_size = vfres->rss_key_size; 3685 adapter->rss_lut_size = vfres->rss_lut_size; 3686 } else { 3687 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3688 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3689 } 3690 3691 return 0; 3692 } 3693 3694 /** 3695 * iavf_shutdown - Shutdown the device in preparation for a reboot 3696 * @pdev: pci device structure 3697 **/ 3698 static void iavf_shutdown(struct pci_dev *pdev) 3699 { 3700 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3701 struct net_device *netdev = adapter->netdev; 3702 3703 netif_device_detach(netdev); 3704 3705 if (netif_running(netdev)) 3706 iavf_close(netdev); 3707 3708 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3709 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 3710 /* Prevent the watchdog from running. */ 3711 iavf_change_state(adapter, __IAVF_REMOVE); 3712 adapter->aq_required = 0; 3713 mutex_unlock(&adapter->crit_lock); 3714 3715 #ifdef CONFIG_PM 3716 pci_save_state(pdev); 3717 3718 #endif 3719 pci_disable_device(pdev); 3720 } 3721 3722 /** 3723 * iavf_probe - Device Initialization Routine 3724 * @pdev: PCI device information struct 3725 * @ent: entry in iavf_pci_tbl 3726 * 3727 * Returns 0 on success, negative on failure 3728 * 3729 * iavf_probe initializes an adapter identified by a pci_dev structure. 3730 * The OS initialization, configuring of the adapter private structure, 3731 * and a hardware reset occur. 3732 **/ 3733 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3734 { 3735 struct net_device *netdev; 3736 struct iavf_adapter *adapter = NULL; 3737 struct iavf_hw *hw = NULL; 3738 int err; 3739 3740 err = pci_enable_device(pdev); 3741 if (err) 3742 return err; 3743 3744 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3745 if (err) { 3746 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3747 if (err) { 3748 dev_err(&pdev->dev, 3749 "DMA configuration failed: 0x%x\n", err); 3750 goto err_dma; 3751 } 3752 } 3753 3754 err = pci_request_regions(pdev, iavf_driver_name); 3755 if (err) { 3756 dev_err(&pdev->dev, 3757 "pci_request_regions failed 0x%x\n", err); 3758 goto err_pci_reg; 3759 } 3760 3761 pci_enable_pcie_error_reporting(pdev); 3762 3763 pci_set_master(pdev); 3764 3765 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3766 IAVF_MAX_REQ_QUEUES); 3767 if (!netdev) { 3768 err = -ENOMEM; 3769 goto err_alloc_etherdev; 3770 } 3771 3772 SET_NETDEV_DEV(netdev, &pdev->dev); 3773 3774 pci_set_drvdata(pdev, netdev); 3775 adapter = netdev_priv(netdev); 3776 3777 adapter->netdev = netdev; 3778 adapter->pdev = pdev; 3779 3780 hw = &adapter->hw; 3781 hw->back = adapter; 3782 3783 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3784 iavf_change_state(adapter, __IAVF_STARTUP); 3785 3786 /* Call save state here because it relies on the adapter struct. */ 3787 pci_save_state(pdev); 3788 3789 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3790 pci_resource_len(pdev, 0)); 3791 if (!hw->hw_addr) { 3792 err = -EIO; 3793 goto err_ioremap; 3794 } 3795 hw->vendor_id = pdev->vendor; 3796 hw->device_id = pdev->device; 3797 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3798 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3799 hw->subsystem_device_id = pdev->subsystem_device; 3800 hw->bus.device = PCI_SLOT(pdev->devfn); 3801 hw->bus.func = PCI_FUNC(pdev->devfn); 3802 hw->bus.bus_id = pdev->bus->number; 3803 3804 /* set up the locks for the AQ, do this only once in probe 3805 * and destroy them only once in remove 3806 */ 3807 mutex_init(&adapter->crit_lock); 3808 mutex_init(&adapter->client_lock); 3809 mutex_init(&adapter->remove_lock); 3810 mutex_init(&hw->aq.asq_mutex); 3811 mutex_init(&hw->aq.arq_mutex); 3812 3813 spin_lock_init(&adapter->mac_vlan_list_lock); 3814 spin_lock_init(&adapter->cloud_filter_list_lock); 3815 spin_lock_init(&adapter->fdir_fltr_lock); 3816 spin_lock_init(&adapter->adv_rss_lock); 3817 3818 INIT_LIST_HEAD(&adapter->mac_filter_list); 3819 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3820 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3821 INIT_LIST_HEAD(&adapter->fdir_list_head); 3822 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3823 3824 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3825 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3826 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3827 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3828 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 3829 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3830 3831 /* Setup the wait queue for indicating transition to down status */ 3832 init_waitqueue_head(&adapter->down_waitqueue); 3833 3834 return 0; 3835 3836 err_ioremap: 3837 free_netdev(netdev); 3838 err_alloc_etherdev: 3839 pci_disable_pcie_error_reporting(pdev); 3840 pci_release_regions(pdev); 3841 err_pci_reg: 3842 err_dma: 3843 pci_disable_device(pdev); 3844 return err; 3845 } 3846 3847 /** 3848 * iavf_suspend - Power management suspend routine 3849 * @dev_d: device info pointer 3850 * 3851 * Called when the system (VM) is entering sleep/suspend. 3852 **/ 3853 static int __maybe_unused iavf_suspend(struct device *dev_d) 3854 { 3855 struct net_device *netdev = dev_get_drvdata(dev_d); 3856 struct iavf_adapter *adapter = netdev_priv(netdev); 3857 3858 netif_device_detach(netdev); 3859 3860 while (!mutex_trylock(&adapter->crit_lock)) 3861 usleep_range(500, 1000); 3862 3863 if (netif_running(netdev)) { 3864 rtnl_lock(); 3865 iavf_down(adapter); 3866 rtnl_unlock(); 3867 } 3868 iavf_free_misc_irq(adapter); 3869 iavf_reset_interrupt_capability(adapter); 3870 3871 mutex_unlock(&adapter->crit_lock); 3872 3873 return 0; 3874 } 3875 3876 /** 3877 * iavf_resume - Power management resume routine 3878 * @dev_d: device info pointer 3879 * 3880 * Called when the system (VM) is resumed from sleep/suspend. 3881 **/ 3882 static int __maybe_unused iavf_resume(struct device *dev_d) 3883 { 3884 struct pci_dev *pdev = to_pci_dev(dev_d); 3885 struct iavf_adapter *adapter; 3886 u32 err; 3887 3888 adapter = iavf_pdev_to_adapter(pdev); 3889 3890 pci_set_master(pdev); 3891 3892 rtnl_lock(); 3893 err = iavf_set_interrupt_capability(adapter); 3894 if (err) { 3895 rtnl_unlock(); 3896 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3897 return err; 3898 } 3899 err = iavf_request_misc_irq(adapter); 3900 rtnl_unlock(); 3901 if (err) { 3902 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3903 return err; 3904 } 3905 3906 queue_work(iavf_wq, &adapter->reset_task); 3907 3908 netif_device_attach(adapter->netdev); 3909 3910 return err; 3911 } 3912 3913 /** 3914 * iavf_remove - Device Removal Routine 3915 * @pdev: PCI device information struct 3916 * 3917 * iavf_remove is called by the PCI subsystem to alert the driver 3918 * that it should release a PCI device. The could be caused by a 3919 * Hot-Plug event, or because the driver is going to be removed from 3920 * memory. 3921 **/ 3922 static void iavf_remove(struct pci_dev *pdev) 3923 { 3924 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 3925 enum iavf_state_t prev_state = adapter->last_state; 3926 struct net_device *netdev = adapter->netdev; 3927 struct iavf_fdir_fltr *fdir, *fdirtmp; 3928 struct iavf_vlan_filter *vlf, *vlftmp; 3929 struct iavf_adv_rss *rss, *rsstmp; 3930 struct iavf_mac_filter *f, *ftmp; 3931 struct iavf_cloud_filter *cf, *cftmp; 3932 struct iavf_hw *hw = &adapter->hw; 3933 int err; 3934 /* Indicate we are in remove and not to run reset_task */ 3935 mutex_lock(&adapter->remove_lock); 3936 cancel_work_sync(&adapter->reset_task); 3937 cancel_delayed_work_sync(&adapter->watchdog_task); 3938 cancel_delayed_work_sync(&adapter->client_task); 3939 if (adapter->netdev_registered) { 3940 unregister_netdev(netdev); 3941 adapter->netdev_registered = false; 3942 } 3943 if (CLIENT_ALLOWED(adapter)) { 3944 err = iavf_lan_del_device(adapter); 3945 if (err) 3946 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3947 err); 3948 } 3949 3950 iavf_request_reset(adapter); 3951 msleep(50); 3952 /* If the FW isn't responding, kick it once, but only once. */ 3953 if (!iavf_asq_done(hw)) { 3954 iavf_request_reset(adapter); 3955 msleep(50); 3956 } 3957 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3958 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 3959 3960 /* Shut down all the garbage mashers on the detention level */ 3961 iavf_change_state(adapter, __IAVF_REMOVE); 3962 adapter->aq_required = 0; 3963 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3964 3965 iavf_free_all_tx_resources(adapter); 3966 iavf_free_all_rx_resources(adapter); 3967 iavf_misc_irq_disable(adapter); 3968 iavf_free_misc_irq(adapter); 3969 3970 /* In case we enter iavf_remove from erroneous state, free traffic irqs 3971 * here, so as to not cause a kernel crash, when calling 3972 * iavf_reset_interrupt_capability. 3973 */ 3974 if ((adapter->last_state == __IAVF_RESETTING && 3975 prev_state != __IAVF_DOWN) || 3976 (adapter->last_state == __IAVF_RUNNING && 3977 !(netdev->flags & IFF_UP))) 3978 iavf_free_traffic_irqs(adapter); 3979 3980 iavf_reset_interrupt_capability(adapter); 3981 iavf_free_q_vectors(adapter); 3982 3983 cancel_delayed_work_sync(&adapter->watchdog_task); 3984 3985 cancel_work_sync(&adapter->adminq_task); 3986 3987 iavf_free_rss(adapter); 3988 3989 if (hw->aq.asq.count) 3990 iavf_shutdown_adminq(hw); 3991 3992 /* destroy the locks only once, here */ 3993 mutex_destroy(&hw->aq.arq_mutex); 3994 mutex_destroy(&hw->aq.asq_mutex); 3995 mutex_destroy(&adapter->client_lock); 3996 mutex_unlock(&adapter->crit_lock); 3997 mutex_destroy(&adapter->crit_lock); 3998 mutex_unlock(&adapter->remove_lock); 3999 mutex_destroy(&adapter->remove_lock); 4000 4001 iounmap(hw->hw_addr); 4002 pci_release_regions(pdev); 4003 iavf_free_queues(adapter); 4004 kfree(adapter->vf_res); 4005 spin_lock_bh(&adapter->mac_vlan_list_lock); 4006 /* If we got removed before an up/down sequence, we've got a filter 4007 * hanging out there that we need to get rid of. 4008 */ 4009 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 4010 list_del(&f->list); 4011 kfree(f); 4012 } 4013 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 4014 list) { 4015 list_del(&vlf->list); 4016 kfree(vlf); 4017 } 4018 4019 spin_unlock_bh(&adapter->mac_vlan_list_lock); 4020 4021 spin_lock_bh(&adapter->cloud_filter_list_lock); 4022 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 4023 list_del(&cf->list); 4024 kfree(cf); 4025 } 4026 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4027 4028 spin_lock_bh(&adapter->fdir_fltr_lock); 4029 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 4030 list_del(&fdir->list); 4031 kfree(fdir); 4032 } 4033 spin_unlock_bh(&adapter->fdir_fltr_lock); 4034 4035 spin_lock_bh(&adapter->adv_rss_lock); 4036 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 4037 list) { 4038 list_del(&rss->list); 4039 kfree(rss); 4040 } 4041 spin_unlock_bh(&adapter->adv_rss_lock); 4042 4043 free_netdev(netdev); 4044 4045 pci_disable_pcie_error_reporting(pdev); 4046 4047 pci_disable_device(pdev); 4048 } 4049 4050 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 4051 4052 static struct pci_driver iavf_driver = { 4053 .name = iavf_driver_name, 4054 .id_table = iavf_pci_tbl, 4055 .probe = iavf_probe, 4056 .remove = iavf_remove, 4057 .driver.pm = &iavf_pm_ops, 4058 .shutdown = iavf_shutdown, 4059 }; 4060 4061 /** 4062 * iavf_init_module - Driver Registration Routine 4063 * 4064 * iavf_init_module is the first routine called when the driver is 4065 * loaded. All it does is register with the PCI subsystem. 4066 **/ 4067 static int __init iavf_init_module(void) 4068 { 4069 int ret; 4070 4071 pr_info("iavf: %s\n", iavf_driver_string); 4072 4073 pr_info("%s\n", iavf_copyright); 4074 4075 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4076 iavf_driver_name); 4077 if (!iavf_wq) { 4078 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4079 return -ENOMEM; 4080 } 4081 ret = pci_register_driver(&iavf_driver); 4082 return ret; 4083 } 4084 4085 module_init(iavf_init_module); 4086 4087 /** 4088 * iavf_exit_module - Driver Exit Cleanup Routine 4089 * 4090 * iavf_exit_module is called just before the driver is removed 4091 * from memory. 4092 **/ 4093 static void __exit iavf_exit_module(void) 4094 { 4095 pci_unregister_driver(&iavf_driver); 4096 destroy_workqueue(iavf_wq); 4097 } 4098 4099 module_exit(iavf_exit_module); 4100 4101 /* iavf_main.c */ 4102