1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static void iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 56 * @hw: pointer to the HW structure 57 * @mem: ptr to mem struct to fill out 58 * @size: size of memory requested 59 * @alignment: what to align the allocation to 60 **/ 61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 62 struct iavf_dma_mem *mem, 63 u64 size, u32 alignment) 64 { 65 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 66 67 if (!mem) 68 return IAVF_ERR_PARAM; 69 70 mem->size = ALIGN(size, alignment); 71 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 72 (dma_addr_t *)&mem->pa, GFP_KERNEL); 73 if (mem->va) 74 return 0; 75 else 76 return IAVF_ERR_NO_MEMORY; 77 } 78 79 /** 80 * iavf_free_dma_mem_d - OS specific memory free for shared code 81 * @hw: pointer to the HW structure 82 * @mem: ptr to mem struct to free 83 **/ 84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 85 struct iavf_dma_mem *mem) 86 { 87 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 88 89 if (!mem || !mem->va) 90 return IAVF_ERR_PARAM; 91 dma_free_coherent(&adapter->pdev->dev, mem->size, 92 mem->va, (dma_addr_t)mem->pa); 93 return 0; 94 } 95 96 /** 97 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 98 * @hw: pointer to the HW structure 99 * @mem: ptr to mem struct to fill out 100 * @size: size of memory requested 101 **/ 102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 103 struct iavf_virt_mem *mem, u32 size) 104 { 105 if (!mem) 106 return IAVF_ERR_PARAM; 107 108 mem->size = size; 109 mem->va = kzalloc(size, GFP_KERNEL); 110 111 if (mem->va) 112 return 0; 113 else 114 return IAVF_ERR_NO_MEMORY; 115 } 116 117 /** 118 * iavf_free_virt_mem_d - OS specific memory free for shared code 119 * @hw: pointer to the HW structure 120 * @mem: ptr to mem struct to free 121 **/ 122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 123 struct iavf_virt_mem *mem) 124 { 125 if (!mem) 126 return IAVF_ERR_PARAM; 127 128 /* it's ok to kfree a NULL pointer */ 129 kfree(mem->va); 130 131 return 0; 132 } 133 134 /** 135 * iavf_lock_timeout - try to lock mutex but give up after timeout 136 * @lock: mutex that should be locked 137 * @msecs: timeout in msecs 138 * 139 * Returns 0 on success, negative on failure 140 **/ 141 static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) 142 { 143 unsigned int wait, delay = 10; 144 145 for (wait = 0; wait < msecs; wait += delay) { 146 if (mutex_trylock(lock)) 147 return 0; 148 149 msleep(delay); 150 } 151 152 return -1; 153 } 154 155 /** 156 * iavf_schedule_reset - Set the flags and schedule a reset event 157 * @adapter: board private structure 158 **/ 159 void iavf_schedule_reset(struct iavf_adapter *adapter) 160 { 161 if (!(adapter->flags & 162 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 163 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 164 queue_work(iavf_wq, &adapter->reset_task); 165 } 166 } 167 168 /** 169 * iavf_tx_timeout - Respond to a Tx Hang 170 * @netdev: network interface device structure 171 * @txqueue: queue number that is timing out 172 **/ 173 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 174 { 175 struct iavf_adapter *adapter = netdev_priv(netdev); 176 177 adapter->tx_timeout_count++; 178 iavf_schedule_reset(adapter); 179 } 180 181 /** 182 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 183 * @adapter: board private structure 184 **/ 185 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 186 { 187 struct iavf_hw *hw = &adapter->hw; 188 189 if (!adapter->msix_entries) 190 return; 191 192 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 193 194 iavf_flush(hw); 195 196 synchronize_irq(adapter->msix_entries[0].vector); 197 } 198 199 /** 200 * iavf_misc_irq_enable - Enable default interrupt generation settings 201 * @adapter: board private structure 202 **/ 203 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 204 { 205 struct iavf_hw *hw = &adapter->hw; 206 207 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 208 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 209 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 210 211 iavf_flush(hw); 212 } 213 214 /** 215 * iavf_irq_disable - Mask off interrupt generation on the NIC 216 * @adapter: board private structure 217 **/ 218 static void iavf_irq_disable(struct iavf_adapter *adapter) 219 { 220 int i; 221 struct iavf_hw *hw = &adapter->hw; 222 223 if (!adapter->msix_entries) 224 return; 225 226 for (i = 1; i < adapter->num_msix_vectors; i++) { 227 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 228 synchronize_irq(adapter->msix_entries[i].vector); 229 } 230 iavf_flush(hw); 231 } 232 233 /** 234 * iavf_irq_enable_queues - Enable interrupt for specified queues 235 * @adapter: board private structure 236 * @mask: bitmap of queues to enable 237 **/ 238 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 239 { 240 struct iavf_hw *hw = &adapter->hw; 241 int i; 242 243 for (i = 1; i < adapter->num_msix_vectors; i++) { 244 if (mask & BIT(i - 1)) { 245 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 246 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 247 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 248 } 249 } 250 } 251 252 /** 253 * iavf_irq_enable - Enable default interrupt generation settings 254 * @adapter: board private structure 255 * @flush: boolean value whether to run rd32() 256 **/ 257 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 258 { 259 struct iavf_hw *hw = &adapter->hw; 260 261 iavf_misc_irq_enable(adapter); 262 iavf_irq_enable_queues(adapter, ~0); 263 264 if (flush) 265 iavf_flush(hw); 266 } 267 268 /** 269 * iavf_msix_aq - Interrupt handler for vector 0 270 * @irq: interrupt number 271 * @data: pointer to netdev 272 **/ 273 static irqreturn_t iavf_msix_aq(int irq, void *data) 274 { 275 struct net_device *netdev = data; 276 struct iavf_adapter *adapter = netdev_priv(netdev); 277 struct iavf_hw *hw = &adapter->hw; 278 279 /* handle non-queue interrupts, these reads clear the registers */ 280 rd32(hw, IAVF_VFINT_ICR01); 281 rd32(hw, IAVF_VFINT_ICR0_ENA1); 282 283 /* schedule work on the private workqueue */ 284 queue_work(iavf_wq, &adapter->adminq_task); 285 286 return IRQ_HANDLED; 287 } 288 289 /** 290 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 291 * @irq: interrupt number 292 * @data: pointer to a q_vector 293 **/ 294 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 295 { 296 struct iavf_q_vector *q_vector = data; 297 298 if (!q_vector->tx.ring && !q_vector->rx.ring) 299 return IRQ_HANDLED; 300 301 napi_schedule_irqoff(&q_vector->napi); 302 303 return IRQ_HANDLED; 304 } 305 306 /** 307 * iavf_map_vector_to_rxq - associate irqs with rx queues 308 * @adapter: board private structure 309 * @v_idx: interrupt number 310 * @r_idx: queue number 311 **/ 312 static void 313 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 314 { 315 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 316 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 317 struct iavf_hw *hw = &adapter->hw; 318 319 rx_ring->q_vector = q_vector; 320 rx_ring->next = q_vector->rx.ring; 321 rx_ring->vsi = &adapter->vsi; 322 q_vector->rx.ring = rx_ring; 323 q_vector->rx.count++; 324 q_vector->rx.next_update = jiffies + 1; 325 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 326 q_vector->ring_mask |= BIT(r_idx); 327 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 328 q_vector->rx.current_itr >> 1); 329 q_vector->rx.current_itr = q_vector->rx.target_itr; 330 } 331 332 /** 333 * iavf_map_vector_to_txq - associate irqs with tx queues 334 * @adapter: board private structure 335 * @v_idx: interrupt number 336 * @t_idx: queue number 337 **/ 338 static void 339 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 340 { 341 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 342 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 343 struct iavf_hw *hw = &adapter->hw; 344 345 tx_ring->q_vector = q_vector; 346 tx_ring->next = q_vector->tx.ring; 347 tx_ring->vsi = &adapter->vsi; 348 q_vector->tx.ring = tx_ring; 349 q_vector->tx.count++; 350 q_vector->tx.next_update = jiffies + 1; 351 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 352 q_vector->num_ringpairs++; 353 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 354 q_vector->tx.target_itr >> 1); 355 q_vector->tx.current_itr = q_vector->tx.target_itr; 356 } 357 358 /** 359 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 360 * @adapter: board private structure to initialize 361 * 362 * This function maps descriptor rings to the queue-specific vectors 363 * we were allotted through the MSI-X enabling code. Ideally, we'd have 364 * one vector per ring/queue, but on a constrained vector budget, we 365 * group the rings as "efficiently" as possible. You would add new 366 * mapping configurations in here. 367 **/ 368 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 369 { 370 int rings_remaining = adapter->num_active_queues; 371 int ridx = 0, vidx = 0; 372 int q_vectors; 373 374 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 375 376 for (; ridx < rings_remaining; ridx++) { 377 iavf_map_vector_to_rxq(adapter, vidx, ridx); 378 iavf_map_vector_to_txq(adapter, vidx, ridx); 379 380 /* In the case where we have more queues than vectors, continue 381 * round-robin on vectors until all queues are mapped. 382 */ 383 if (++vidx >= q_vectors) 384 vidx = 0; 385 } 386 387 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 388 } 389 390 /** 391 * iavf_irq_affinity_notify - Callback for affinity changes 392 * @notify: context as to what irq was changed 393 * @mask: the new affinity mask 394 * 395 * This is a callback function used by the irq_set_affinity_notifier function 396 * so that we may register to receive changes to the irq affinity masks. 397 **/ 398 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 399 const cpumask_t *mask) 400 { 401 struct iavf_q_vector *q_vector = 402 container_of(notify, struct iavf_q_vector, affinity_notify); 403 404 cpumask_copy(&q_vector->affinity_mask, mask); 405 } 406 407 /** 408 * iavf_irq_affinity_release - Callback for affinity notifier release 409 * @ref: internal core kernel usage 410 * 411 * This is a callback function used by the irq_set_affinity_notifier function 412 * to inform the current notification subscriber that they will no longer 413 * receive notifications. 414 **/ 415 static void iavf_irq_affinity_release(struct kref *ref) {} 416 417 /** 418 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 419 * @adapter: board private structure 420 * @basename: device basename 421 * 422 * Allocates MSI-X vectors for tx and rx handling, and requests 423 * interrupts from the kernel. 424 **/ 425 static int 426 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 427 { 428 unsigned int vector, q_vectors; 429 unsigned int rx_int_idx = 0, tx_int_idx = 0; 430 int irq_num, err; 431 int cpu; 432 433 iavf_irq_disable(adapter); 434 /* Decrement for Other and TCP Timer vectors */ 435 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 436 437 for (vector = 0; vector < q_vectors; vector++) { 438 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 439 440 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 441 442 if (q_vector->tx.ring && q_vector->rx.ring) { 443 snprintf(q_vector->name, sizeof(q_vector->name), 444 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 445 tx_int_idx++; 446 } else if (q_vector->rx.ring) { 447 snprintf(q_vector->name, sizeof(q_vector->name), 448 "iavf-%s-rx-%d", basename, rx_int_idx++); 449 } else if (q_vector->tx.ring) { 450 snprintf(q_vector->name, sizeof(q_vector->name), 451 "iavf-%s-tx-%d", basename, tx_int_idx++); 452 } else { 453 /* skip this unused q_vector */ 454 continue; 455 } 456 err = request_irq(irq_num, 457 iavf_msix_clean_rings, 458 0, 459 q_vector->name, 460 q_vector); 461 if (err) { 462 dev_info(&adapter->pdev->dev, 463 "Request_irq failed, error: %d\n", err); 464 goto free_queue_irqs; 465 } 466 /* register for affinity change notifications */ 467 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 468 q_vector->affinity_notify.release = 469 iavf_irq_affinity_release; 470 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 471 /* Spread the IRQ affinity hints across online CPUs. Note that 472 * get_cpu_mask returns a mask with a permanent lifetime so 473 * it's safe to use as a hint for irq_set_affinity_hint. 474 */ 475 cpu = cpumask_local_spread(q_vector->v_idx, -1); 476 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 477 } 478 479 return 0; 480 481 free_queue_irqs: 482 while (vector) { 483 vector--; 484 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 485 irq_set_affinity_notifier(irq_num, NULL); 486 irq_set_affinity_hint(irq_num, NULL); 487 free_irq(irq_num, &adapter->q_vectors[vector]); 488 } 489 return err; 490 } 491 492 /** 493 * iavf_request_misc_irq - Initialize MSI-X interrupts 494 * @adapter: board private structure 495 * 496 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 497 * vector is only for the admin queue, and stays active even when the netdev 498 * is closed. 499 **/ 500 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 501 { 502 struct net_device *netdev = adapter->netdev; 503 int err; 504 505 snprintf(adapter->misc_vector_name, 506 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 507 dev_name(&adapter->pdev->dev)); 508 err = request_irq(adapter->msix_entries[0].vector, 509 &iavf_msix_aq, 0, 510 adapter->misc_vector_name, netdev); 511 if (err) { 512 dev_err(&adapter->pdev->dev, 513 "request_irq for %s failed: %d\n", 514 adapter->misc_vector_name, err); 515 free_irq(adapter->msix_entries[0].vector, netdev); 516 } 517 return err; 518 } 519 520 /** 521 * iavf_free_traffic_irqs - Free MSI-X interrupts 522 * @adapter: board private structure 523 * 524 * Frees all MSI-X vectors other than 0. 525 **/ 526 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 527 { 528 int vector, irq_num, q_vectors; 529 530 if (!adapter->msix_entries) 531 return; 532 533 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 534 535 for (vector = 0; vector < q_vectors; vector++) { 536 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 537 irq_set_affinity_notifier(irq_num, NULL); 538 irq_set_affinity_hint(irq_num, NULL); 539 free_irq(irq_num, &adapter->q_vectors[vector]); 540 } 541 } 542 543 /** 544 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 545 * @adapter: board private structure 546 * 547 * Frees MSI-X vector 0. 548 **/ 549 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 550 { 551 struct net_device *netdev = adapter->netdev; 552 553 if (!adapter->msix_entries) 554 return; 555 556 free_irq(adapter->msix_entries[0].vector, netdev); 557 } 558 559 /** 560 * iavf_configure_tx - Configure Transmit Unit after Reset 561 * @adapter: board private structure 562 * 563 * Configure the Tx unit of the MAC after a reset. 564 **/ 565 static void iavf_configure_tx(struct iavf_adapter *adapter) 566 { 567 struct iavf_hw *hw = &adapter->hw; 568 int i; 569 570 for (i = 0; i < adapter->num_active_queues; i++) 571 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 572 } 573 574 /** 575 * iavf_configure_rx - Configure Receive Unit after Reset 576 * @adapter: board private structure 577 * 578 * Configure the Rx unit of the MAC after a reset. 579 **/ 580 static void iavf_configure_rx(struct iavf_adapter *adapter) 581 { 582 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 583 struct iavf_hw *hw = &adapter->hw; 584 int i; 585 586 /* Legacy Rx will always default to a 2048 buffer size. */ 587 #if (PAGE_SIZE < 8192) 588 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 589 struct net_device *netdev = adapter->netdev; 590 591 /* For jumbo frames on systems with 4K pages we have to use 592 * an order 1 page, so we might as well increase the size 593 * of our Rx buffer to make better use of the available space 594 */ 595 rx_buf_len = IAVF_RXBUFFER_3072; 596 597 /* We use a 1536 buffer size for configurations with 598 * standard Ethernet mtu. On x86 this gives us enough room 599 * for shared info and 192 bytes of padding. 600 */ 601 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 602 (netdev->mtu <= ETH_DATA_LEN)) 603 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 604 } 605 #endif 606 607 for (i = 0; i < adapter->num_active_queues; i++) { 608 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 609 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 610 611 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 612 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 613 else 614 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 615 } 616 } 617 618 /** 619 * iavf_find_vlan - Search filter list for specific vlan filter 620 * @adapter: board private structure 621 * @vlan: vlan tag 622 * 623 * Returns ptr to the filter object or NULL. Must be called while holding the 624 * mac_vlan_list_lock. 625 **/ 626 static struct 627 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 628 { 629 struct iavf_vlan_filter *f; 630 631 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 632 if (vlan == f->vlan) 633 return f; 634 } 635 return NULL; 636 } 637 638 /** 639 * iavf_add_vlan - Add a vlan filter to the list 640 * @adapter: board private structure 641 * @vlan: VLAN tag 642 * 643 * Returns ptr to the filter object or NULL when no memory available. 644 **/ 645 static struct 646 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 647 { 648 struct iavf_vlan_filter *f = NULL; 649 650 spin_lock_bh(&adapter->mac_vlan_list_lock); 651 652 f = iavf_find_vlan(adapter, vlan); 653 if (!f) { 654 f = kzalloc(sizeof(*f), GFP_ATOMIC); 655 if (!f) 656 goto clearout; 657 658 f->vlan = vlan; 659 660 list_add_tail(&f->list, &adapter->vlan_filter_list); 661 f->add = true; 662 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 663 } 664 665 clearout: 666 spin_unlock_bh(&adapter->mac_vlan_list_lock); 667 return f; 668 } 669 670 /** 671 * iavf_del_vlan - Remove a vlan filter from the list 672 * @adapter: board private structure 673 * @vlan: VLAN tag 674 **/ 675 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 676 { 677 struct iavf_vlan_filter *f; 678 679 spin_lock_bh(&adapter->mac_vlan_list_lock); 680 681 f = iavf_find_vlan(adapter, vlan); 682 if (f) { 683 f->remove = true; 684 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 685 } 686 687 spin_unlock_bh(&adapter->mac_vlan_list_lock); 688 } 689 690 /** 691 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 692 * @netdev: network device struct 693 * @proto: unused protocol data 694 * @vid: VLAN tag 695 **/ 696 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 697 __always_unused __be16 proto, u16 vid) 698 { 699 struct iavf_adapter *adapter = netdev_priv(netdev); 700 701 if (!VLAN_ALLOWED(adapter)) 702 return -EIO; 703 if (iavf_add_vlan(adapter, vid) == NULL) 704 return -ENOMEM; 705 return 0; 706 } 707 708 /** 709 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 710 * @netdev: network device struct 711 * @proto: unused protocol data 712 * @vid: VLAN tag 713 **/ 714 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 715 __always_unused __be16 proto, u16 vid) 716 { 717 struct iavf_adapter *adapter = netdev_priv(netdev); 718 719 if (VLAN_ALLOWED(adapter)) { 720 iavf_del_vlan(adapter, vid); 721 return 0; 722 } 723 return -EIO; 724 } 725 726 /** 727 * iavf_find_filter - Search filter list for specific mac filter 728 * @adapter: board private structure 729 * @macaddr: the MAC address 730 * 731 * Returns ptr to the filter object or NULL. Must be called while holding the 732 * mac_vlan_list_lock. 733 **/ 734 static struct 735 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 736 const u8 *macaddr) 737 { 738 struct iavf_mac_filter *f; 739 740 if (!macaddr) 741 return NULL; 742 743 list_for_each_entry(f, &adapter->mac_filter_list, list) { 744 if (ether_addr_equal(macaddr, f->macaddr)) 745 return f; 746 } 747 return NULL; 748 } 749 750 /** 751 * iavf_add_filter - Add a mac filter to the filter list 752 * @adapter: board private structure 753 * @macaddr: the MAC address 754 * 755 * Returns ptr to the filter object or NULL when no memory available. 756 **/ 757 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 758 const u8 *macaddr) 759 { 760 struct iavf_mac_filter *f; 761 762 if (!macaddr) 763 return NULL; 764 765 f = iavf_find_filter(adapter, macaddr); 766 if (!f) { 767 f = kzalloc(sizeof(*f), GFP_ATOMIC); 768 if (!f) 769 return f; 770 771 ether_addr_copy(f->macaddr, macaddr); 772 773 list_add_tail(&f->list, &adapter->mac_filter_list); 774 f->add = true; 775 f->is_new_mac = true; 776 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 777 } else { 778 f->remove = false; 779 } 780 781 return f; 782 } 783 784 /** 785 * iavf_set_mac - NDO callback to set port mac address 786 * @netdev: network interface device structure 787 * @p: pointer to an address structure 788 * 789 * Returns 0 on success, negative on failure 790 **/ 791 static int iavf_set_mac(struct net_device *netdev, void *p) 792 { 793 struct iavf_adapter *adapter = netdev_priv(netdev); 794 struct iavf_hw *hw = &adapter->hw; 795 struct iavf_mac_filter *f; 796 struct sockaddr *addr = p; 797 798 if (!is_valid_ether_addr(addr->sa_data)) 799 return -EADDRNOTAVAIL; 800 801 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 802 return 0; 803 804 spin_lock_bh(&adapter->mac_vlan_list_lock); 805 806 f = iavf_find_filter(adapter, hw->mac.addr); 807 if (f) { 808 f->remove = true; 809 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 810 } 811 812 f = iavf_add_filter(adapter, addr->sa_data); 813 814 spin_unlock_bh(&adapter->mac_vlan_list_lock); 815 816 if (f) { 817 ether_addr_copy(hw->mac.addr, addr->sa_data); 818 } 819 820 return (f == NULL) ? -ENOMEM : 0; 821 } 822 823 /** 824 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 825 * @netdev: the netdevice 826 * @addr: address to add 827 * 828 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 829 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 830 */ 831 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 832 { 833 struct iavf_adapter *adapter = netdev_priv(netdev); 834 835 if (iavf_add_filter(adapter, addr)) 836 return 0; 837 else 838 return -ENOMEM; 839 } 840 841 /** 842 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 843 * @netdev: the netdevice 844 * @addr: address to add 845 * 846 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 847 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 848 */ 849 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 850 { 851 struct iavf_adapter *adapter = netdev_priv(netdev); 852 struct iavf_mac_filter *f; 853 854 /* Under some circumstances, we might receive a request to delete 855 * our own device address from our uc list. Because we store the 856 * device address in the VSI's MAC/VLAN filter list, we need to ignore 857 * such requests and not delete our device address from this list. 858 */ 859 if (ether_addr_equal(addr, netdev->dev_addr)) 860 return 0; 861 862 f = iavf_find_filter(adapter, addr); 863 if (f) { 864 f->remove = true; 865 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 866 } 867 return 0; 868 } 869 870 /** 871 * iavf_set_rx_mode - NDO callback to set the netdev filters 872 * @netdev: network interface device structure 873 **/ 874 static void iavf_set_rx_mode(struct net_device *netdev) 875 { 876 struct iavf_adapter *adapter = netdev_priv(netdev); 877 878 spin_lock_bh(&adapter->mac_vlan_list_lock); 879 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 880 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 881 spin_unlock_bh(&adapter->mac_vlan_list_lock); 882 883 if (netdev->flags & IFF_PROMISC && 884 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 885 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 886 else if (!(netdev->flags & IFF_PROMISC) && 887 adapter->flags & IAVF_FLAG_PROMISC_ON) 888 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 889 890 if (netdev->flags & IFF_ALLMULTI && 891 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 892 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 893 else if (!(netdev->flags & IFF_ALLMULTI) && 894 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 895 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 896 } 897 898 /** 899 * iavf_napi_enable_all - enable NAPI on all queue vectors 900 * @adapter: board private structure 901 **/ 902 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 903 { 904 int q_idx; 905 struct iavf_q_vector *q_vector; 906 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 907 908 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 909 struct napi_struct *napi; 910 911 q_vector = &adapter->q_vectors[q_idx]; 912 napi = &q_vector->napi; 913 napi_enable(napi); 914 } 915 } 916 917 /** 918 * iavf_napi_disable_all - disable NAPI on all queue vectors 919 * @adapter: board private structure 920 **/ 921 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 922 { 923 int q_idx; 924 struct iavf_q_vector *q_vector; 925 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 926 927 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 928 q_vector = &adapter->q_vectors[q_idx]; 929 napi_disable(&q_vector->napi); 930 } 931 } 932 933 /** 934 * iavf_configure - set up transmit and receive data structures 935 * @adapter: board private structure 936 **/ 937 static void iavf_configure(struct iavf_adapter *adapter) 938 { 939 struct net_device *netdev = adapter->netdev; 940 int i; 941 942 iavf_set_rx_mode(netdev); 943 944 iavf_configure_tx(adapter); 945 iavf_configure_rx(adapter); 946 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 947 948 for (i = 0; i < adapter->num_active_queues; i++) { 949 struct iavf_ring *ring = &adapter->rx_rings[i]; 950 951 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 952 } 953 } 954 955 /** 956 * iavf_up_complete - Finish the last steps of bringing up a connection 957 * @adapter: board private structure 958 * 959 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 960 **/ 961 static void iavf_up_complete(struct iavf_adapter *adapter) 962 { 963 iavf_change_state(adapter, __IAVF_RUNNING); 964 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 965 966 iavf_napi_enable_all(adapter); 967 968 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 969 if (CLIENT_ENABLED(adapter)) 970 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 971 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 972 } 973 974 /** 975 * iavf_down - Shutdown the connection processing 976 * @adapter: board private structure 977 * 978 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 979 **/ 980 void iavf_down(struct iavf_adapter *adapter) 981 { 982 struct net_device *netdev = adapter->netdev; 983 struct iavf_vlan_filter *vlf; 984 struct iavf_cloud_filter *cf; 985 struct iavf_fdir_fltr *fdir; 986 struct iavf_mac_filter *f; 987 struct iavf_adv_rss *rss; 988 989 if (adapter->state <= __IAVF_DOWN_PENDING) 990 return; 991 992 netif_carrier_off(netdev); 993 netif_tx_disable(netdev); 994 adapter->link_up = false; 995 iavf_napi_disable_all(adapter); 996 iavf_irq_disable(adapter); 997 998 spin_lock_bh(&adapter->mac_vlan_list_lock); 999 1000 /* clear the sync flag on all filters */ 1001 __dev_uc_unsync(adapter->netdev, NULL); 1002 __dev_mc_unsync(adapter->netdev, NULL); 1003 1004 /* remove all MAC filters */ 1005 list_for_each_entry(f, &adapter->mac_filter_list, list) { 1006 f->remove = true; 1007 } 1008 1009 /* remove all VLAN filters */ 1010 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1011 vlf->remove = true; 1012 } 1013 1014 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1015 1016 /* remove all cloud filters */ 1017 spin_lock_bh(&adapter->cloud_filter_list_lock); 1018 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1019 cf->del = true; 1020 } 1021 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1022 1023 /* remove all Flow Director filters */ 1024 spin_lock_bh(&adapter->fdir_fltr_lock); 1025 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1026 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1027 } 1028 spin_unlock_bh(&adapter->fdir_fltr_lock); 1029 1030 /* remove all advance RSS configuration */ 1031 spin_lock_bh(&adapter->adv_rss_lock); 1032 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1033 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1034 spin_unlock_bh(&adapter->adv_rss_lock); 1035 1036 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1037 adapter->state != __IAVF_RESETTING) { 1038 /* cancel any current operation */ 1039 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1040 /* Schedule operations to close down the HW. Don't wait 1041 * here for this to complete. The watchdog is still running 1042 * and it will take care of this. 1043 */ 1044 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1045 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1046 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1047 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1048 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1049 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1050 } 1051 1052 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1053 } 1054 1055 /** 1056 * iavf_acquire_msix_vectors - Setup the MSIX capability 1057 * @adapter: board private structure 1058 * @vectors: number of vectors to request 1059 * 1060 * Work with the OS to set up the MSIX vectors needed. 1061 * 1062 * Returns 0 on success, negative on failure 1063 **/ 1064 static int 1065 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1066 { 1067 int err, vector_threshold; 1068 1069 /* We'll want at least 3 (vector_threshold): 1070 * 0) Other (Admin Queue and link, mostly) 1071 * 1) TxQ[0] Cleanup 1072 * 2) RxQ[0] Cleanup 1073 */ 1074 vector_threshold = MIN_MSIX_COUNT; 1075 1076 /* The more we get, the more we will assign to Tx/Rx Cleanup 1077 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1078 * Right now, we simply care about how many we'll get; we'll 1079 * set them up later while requesting irq's. 1080 */ 1081 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1082 vector_threshold, vectors); 1083 if (err < 0) { 1084 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1085 kfree(adapter->msix_entries); 1086 adapter->msix_entries = NULL; 1087 return err; 1088 } 1089 1090 /* Adjust for only the vectors we'll use, which is minimum 1091 * of max_msix_q_vectors + NONQ_VECS, or the number of 1092 * vectors we were allocated. 1093 */ 1094 adapter->num_msix_vectors = err; 1095 return 0; 1096 } 1097 1098 /** 1099 * iavf_free_queues - Free memory for all rings 1100 * @adapter: board private structure to initialize 1101 * 1102 * Free all of the memory associated with queue pairs. 1103 **/ 1104 static void iavf_free_queues(struct iavf_adapter *adapter) 1105 { 1106 if (!adapter->vsi_res) 1107 return; 1108 adapter->num_active_queues = 0; 1109 kfree(adapter->tx_rings); 1110 adapter->tx_rings = NULL; 1111 kfree(adapter->rx_rings); 1112 adapter->rx_rings = NULL; 1113 } 1114 1115 /** 1116 * iavf_alloc_queues - Allocate memory for all rings 1117 * @adapter: board private structure to initialize 1118 * 1119 * We allocate one ring per queue at run-time since we don't know the 1120 * number of queues at compile-time. The polling_netdev array is 1121 * intended for Multiqueue, but should work fine with a single queue. 1122 **/ 1123 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1124 { 1125 int i, num_active_queues; 1126 1127 /* If we're in reset reallocating queues we don't actually know yet for 1128 * certain the PF gave us the number of queues we asked for but we'll 1129 * assume it did. Once basic reset is finished we'll confirm once we 1130 * start negotiating config with PF. 1131 */ 1132 if (adapter->num_req_queues) 1133 num_active_queues = adapter->num_req_queues; 1134 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1135 adapter->num_tc) 1136 num_active_queues = adapter->ch_config.total_qps; 1137 else 1138 num_active_queues = min_t(int, 1139 adapter->vsi_res->num_queue_pairs, 1140 (int)(num_online_cpus())); 1141 1142 1143 adapter->tx_rings = kcalloc(num_active_queues, 1144 sizeof(struct iavf_ring), GFP_KERNEL); 1145 if (!adapter->tx_rings) 1146 goto err_out; 1147 adapter->rx_rings = kcalloc(num_active_queues, 1148 sizeof(struct iavf_ring), GFP_KERNEL); 1149 if (!adapter->rx_rings) 1150 goto err_out; 1151 1152 for (i = 0; i < num_active_queues; i++) { 1153 struct iavf_ring *tx_ring; 1154 struct iavf_ring *rx_ring; 1155 1156 tx_ring = &adapter->tx_rings[i]; 1157 1158 tx_ring->queue_index = i; 1159 tx_ring->netdev = adapter->netdev; 1160 tx_ring->dev = &adapter->pdev->dev; 1161 tx_ring->count = adapter->tx_desc_count; 1162 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1163 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1164 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1165 1166 rx_ring = &adapter->rx_rings[i]; 1167 rx_ring->queue_index = i; 1168 rx_ring->netdev = adapter->netdev; 1169 rx_ring->dev = &adapter->pdev->dev; 1170 rx_ring->count = adapter->rx_desc_count; 1171 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1172 } 1173 1174 adapter->num_active_queues = num_active_queues; 1175 1176 return 0; 1177 1178 err_out: 1179 iavf_free_queues(adapter); 1180 return -ENOMEM; 1181 } 1182 1183 /** 1184 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1185 * @adapter: board private structure to initialize 1186 * 1187 * Attempt to configure the interrupts using the best available 1188 * capabilities of the hardware and the kernel. 1189 **/ 1190 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1191 { 1192 int vector, v_budget; 1193 int pairs = 0; 1194 int err = 0; 1195 1196 if (!adapter->vsi_res) { 1197 err = -EIO; 1198 goto out; 1199 } 1200 pairs = adapter->num_active_queues; 1201 1202 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1203 * us much good if we have more vectors than CPUs. However, we already 1204 * limit the total number of queues by the number of CPUs so we do not 1205 * need any further limiting here. 1206 */ 1207 v_budget = min_t(int, pairs + NONQ_VECS, 1208 (int)adapter->vf_res->max_vectors); 1209 1210 adapter->msix_entries = kcalloc(v_budget, 1211 sizeof(struct msix_entry), GFP_KERNEL); 1212 if (!adapter->msix_entries) { 1213 err = -ENOMEM; 1214 goto out; 1215 } 1216 1217 for (vector = 0; vector < v_budget; vector++) 1218 adapter->msix_entries[vector].entry = vector; 1219 1220 err = iavf_acquire_msix_vectors(adapter, v_budget); 1221 1222 out: 1223 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1224 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1225 return err; 1226 } 1227 1228 /** 1229 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1230 * @adapter: board private structure 1231 * 1232 * Return 0 on success, negative on failure 1233 **/ 1234 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1235 { 1236 struct iavf_aqc_get_set_rss_key_data *rss_key = 1237 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1238 struct iavf_hw *hw = &adapter->hw; 1239 int ret = 0; 1240 1241 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1242 /* bail because we already have a command pending */ 1243 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1244 adapter->current_op); 1245 return -EBUSY; 1246 } 1247 1248 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1249 if (ret) { 1250 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1251 iavf_stat_str(hw, ret), 1252 iavf_aq_str(hw, hw->aq.asq_last_status)); 1253 return ret; 1254 1255 } 1256 1257 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1258 adapter->rss_lut, adapter->rss_lut_size); 1259 if (ret) { 1260 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1261 iavf_stat_str(hw, ret), 1262 iavf_aq_str(hw, hw->aq.asq_last_status)); 1263 } 1264 1265 return ret; 1266 1267 } 1268 1269 /** 1270 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1271 * @adapter: board private structure 1272 * 1273 * Returns 0 on success, negative on failure 1274 **/ 1275 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1276 { 1277 struct iavf_hw *hw = &adapter->hw; 1278 u32 *dw; 1279 u16 i; 1280 1281 dw = (u32 *)adapter->rss_key; 1282 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1283 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1284 1285 dw = (u32 *)adapter->rss_lut; 1286 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1287 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1288 1289 iavf_flush(hw); 1290 1291 return 0; 1292 } 1293 1294 /** 1295 * iavf_config_rss - Configure RSS keys and lut 1296 * @adapter: board private structure 1297 * 1298 * Returns 0 on success, negative on failure 1299 **/ 1300 int iavf_config_rss(struct iavf_adapter *adapter) 1301 { 1302 1303 if (RSS_PF(adapter)) { 1304 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1305 IAVF_FLAG_AQ_SET_RSS_KEY; 1306 return 0; 1307 } else if (RSS_AQ(adapter)) { 1308 return iavf_config_rss_aq(adapter); 1309 } else { 1310 return iavf_config_rss_reg(adapter); 1311 } 1312 } 1313 1314 /** 1315 * iavf_fill_rss_lut - Fill the lut with default values 1316 * @adapter: board private structure 1317 **/ 1318 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1319 { 1320 u16 i; 1321 1322 for (i = 0; i < adapter->rss_lut_size; i++) 1323 adapter->rss_lut[i] = i % adapter->num_active_queues; 1324 } 1325 1326 /** 1327 * iavf_init_rss - Prepare for RSS 1328 * @adapter: board private structure 1329 * 1330 * Return 0 on success, negative on failure 1331 **/ 1332 static int iavf_init_rss(struct iavf_adapter *adapter) 1333 { 1334 struct iavf_hw *hw = &adapter->hw; 1335 int ret; 1336 1337 if (!RSS_PF(adapter)) { 1338 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1339 if (adapter->vf_res->vf_cap_flags & 1340 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1341 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1342 else 1343 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1344 1345 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1346 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1347 } 1348 1349 iavf_fill_rss_lut(adapter); 1350 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1351 ret = iavf_config_rss(adapter); 1352 1353 return ret; 1354 } 1355 1356 /** 1357 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1358 * @adapter: board private structure to initialize 1359 * 1360 * We allocate one q_vector per queue interrupt. If allocation fails we 1361 * return -ENOMEM. 1362 **/ 1363 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1364 { 1365 int q_idx = 0, num_q_vectors; 1366 struct iavf_q_vector *q_vector; 1367 1368 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1369 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1370 GFP_KERNEL); 1371 if (!adapter->q_vectors) 1372 return -ENOMEM; 1373 1374 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1375 q_vector = &adapter->q_vectors[q_idx]; 1376 q_vector->adapter = adapter; 1377 q_vector->vsi = &adapter->vsi; 1378 q_vector->v_idx = q_idx; 1379 q_vector->reg_idx = q_idx; 1380 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1381 netif_napi_add(adapter->netdev, &q_vector->napi, 1382 iavf_napi_poll, NAPI_POLL_WEIGHT); 1383 } 1384 1385 return 0; 1386 } 1387 1388 /** 1389 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1390 * @adapter: board private structure to initialize 1391 * 1392 * This function frees the memory allocated to the q_vectors. In addition if 1393 * NAPI is enabled it will delete any references to the NAPI struct prior 1394 * to freeing the q_vector. 1395 **/ 1396 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1397 { 1398 int q_idx, num_q_vectors; 1399 int napi_vectors; 1400 1401 if (!adapter->q_vectors) 1402 return; 1403 1404 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1405 napi_vectors = adapter->num_active_queues; 1406 1407 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1408 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1409 1410 if (q_idx < napi_vectors) 1411 netif_napi_del(&q_vector->napi); 1412 } 1413 kfree(adapter->q_vectors); 1414 adapter->q_vectors = NULL; 1415 } 1416 1417 /** 1418 * iavf_reset_interrupt_capability - Reset MSIX setup 1419 * @adapter: board private structure 1420 * 1421 **/ 1422 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1423 { 1424 if (!adapter->msix_entries) 1425 return; 1426 1427 pci_disable_msix(adapter->pdev); 1428 kfree(adapter->msix_entries); 1429 adapter->msix_entries = NULL; 1430 } 1431 1432 /** 1433 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1434 * @adapter: board private structure to initialize 1435 * 1436 **/ 1437 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1438 { 1439 int err; 1440 1441 err = iavf_alloc_queues(adapter); 1442 if (err) { 1443 dev_err(&adapter->pdev->dev, 1444 "Unable to allocate memory for queues\n"); 1445 goto err_alloc_queues; 1446 } 1447 1448 rtnl_lock(); 1449 err = iavf_set_interrupt_capability(adapter); 1450 rtnl_unlock(); 1451 if (err) { 1452 dev_err(&adapter->pdev->dev, 1453 "Unable to setup interrupt capabilities\n"); 1454 goto err_set_interrupt; 1455 } 1456 1457 err = iavf_alloc_q_vectors(adapter); 1458 if (err) { 1459 dev_err(&adapter->pdev->dev, 1460 "Unable to allocate memory for queue vectors\n"); 1461 goto err_alloc_q_vectors; 1462 } 1463 1464 /* If we've made it so far while ADq flag being ON, then we haven't 1465 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1466 * resources have been allocated in the reset path. 1467 * Now we can truly claim that ADq is enabled. 1468 */ 1469 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1470 adapter->num_tc) 1471 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1472 adapter->num_tc); 1473 1474 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1475 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1476 adapter->num_active_queues); 1477 1478 return 0; 1479 err_alloc_q_vectors: 1480 iavf_reset_interrupt_capability(adapter); 1481 err_set_interrupt: 1482 iavf_free_queues(adapter); 1483 err_alloc_queues: 1484 return err; 1485 } 1486 1487 /** 1488 * iavf_free_rss - Free memory used by RSS structs 1489 * @adapter: board private structure 1490 **/ 1491 static void iavf_free_rss(struct iavf_adapter *adapter) 1492 { 1493 kfree(adapter->rss_key); 1494 adapter->rss_key = NULL; 1495 1496 kfree(adapter->rss_lut); 1497 adapter->rss_lut = NULL; 1498 } 1499 1500 /** 1501 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1502 * @adapter: board private structure 1503 * 1504 * Returns 0 on success, negative on failure 1505 **/ 1506 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1507 { 1508 struct net_device *netdev = adapter->netdev; 1509 int err; 1510 1511 if (netif_running(netdev)) 1512 iavf_free_traffic_irqs(adapter); 1513 iavf_free_misc_irq(adapter); 1514 iavf_reset_interrupt_capability(adapter); 1515 iavf_free_q_vectors(adapter); 1516 iavf_free_queues(adapter); 1517 1518 err = iavf_init_interrupt_scheme(adapter); 1519 if (err) 1520 goto err; 1521 1522 netif_tx_stop_all_queues(netdev); 1523 1524 err = iavf_request_misc_irq(adapter); 1525 if (err) 1526 goto err; 1527 1528 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1529 1530 iavf_map_rings_to_vectors(adapter); 1531 err: 1532 return err; 1533 } 1534 1535 /** 1536 * iavf_process_aq_command - process aq_required flags 1537 * and sends aq command 1538 * @adapter: pointer to iavf adapter structure 1539 * 1540 * Returns 0 on success 1541 * Returns error code if no command was sent 1542 * or error code if the command failed. 1543 **/ 1544 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1545 { 1546 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1547 return iavf_send_vf_config_msg(adapter); 1548 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1549 iavf_disable_queues(adapter); 1550 return 0; 1551 } 1552 1553 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1554 iavf_map_queues(adapter); 1555 return 0; 1556 } 1557 1558 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1559 iavf_add_ether_addrs(adapter); 1560 return 0; 1561 } 1562 1563 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1564 iavf_add_vlans(adapter); 1565 return 0; 1566 } 1567 1568 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1569 iavf_del_ether_addrs(adapter); 1570 return 0; 1571 } 1572 1573 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1574 iavf_del_vlans(adapter); 1575 return 0; 1576 } 1577 1578 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1579 iavf_enable_vlan_stripping(adapter); 1580 return 0; 1581 } 1582 1583 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1584 iavf_disable_vlan_stripping(adapter); 1585 return 0; 1586 } 1587 1588 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1589 iavf_configure_queues(adapter); 1590 return 0; 1591 } 1592 1593 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1594 iavf_enable_queues(adapter); 1595 return 0; 1596 } 1597 1598 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1599 /* This message goes straight to the firmware, not the 1600 * PF, so we don't have to set current_op as we will 1601 * not get a response through the ARQ. 1602 */ 1603 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1604 return 0; 1605 } 1606 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1607 iavf_get_hena(adapter); 1608 return 0; 1609 } 1610 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1611 iavf_set_hena(adapter); 1612 return 0; 1613 } 1614 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1615 iavf_set_rss_key(adapter); 1616 return 0; 1617 } 1618 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1619 iavf_set_rss_lut(adapter); 1620 return 0; 1621 } 1622 1623 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1624 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1625 FLAG_VF_MULTICAST_PROMISC); 1626 return 0; 1627 } 1628 1629 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1630 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1631 return 0; 1632 } 1633 1634 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1635 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1636 iavf_set_promiscuous(adapter, 0); 1637 return 0; 1638 } 1639 1640 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1641 iavf_enable_channels(adapter); 1642 return 0; 1643 } 1644 1645 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1646 iavf_disable_channels(adapter); 1647 return 0; 1648 } 1649 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1650 iavf_add_cloud_filter(adapter); 1651 return 0; 1652 } 1653 1654 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1655 iavf_del_cloud_filter(adapter); 1656 return 0; 1657 } 1658 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1659 iavf_del_cloud_filter(adapter); 1660 return 0; 1661 } 1662 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1663 iavf_add_cloud_filter(adapter); 1664 return 0; 1665 } 1666 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1667 iavf_add_fdir_filter(adapter); 1668 return IAVF_SUCCESS; 1669 } 1670 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1671 iavf_del_fdir_filter(adapter); 1672 return IAVF_SUCCESS; 1673 } 1674 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1675 iavf_add_adv_rss_cfg(adapter); 1676 return 0; 1677 } 1678 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1679 iavf_del_adv_rss_cfg(adapter); 1680 return 0; 1681 } 1682 return -EAGAIN; 1683 } 1684 1685 /** 1686 * iavf_startup - first step of driver startup 1687 * @adapter: board private structure 1688 * 1689 * Function process __IAVF_STARTUP driver state. 1690 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1691 * when fails the state is changed to __IAVF_INIT_FAILED 1692 **/ 1693 static void iavf_startup(struct iavf_adapter *adapter) 1694 { 1695 struct pci_dev *pdev = adapter->pdev; 1696 struct iavf_hw *hw = &adapter->hw; 1697 int err; 1698 1699 WARN_ON(adapter->state != __IAVF_STARTUP); 1700 1701 /* driver loaded, probe complete */ 1702 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1703 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1704 err = iavf_set_mac_type(hw); 1705 if (err) { 1706 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1707 goto err; 1708 } 1709 1710 err = iavf_check_reset_complete(hw); 1711 if (err) { 1712 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1713 err); 1714 goto err; 1715 } 1716 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1717 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1718 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1719 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1720 1721 err = iavf_init_adminq(hw); 1722 if (err) { 1723 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1724 goto err; 1725 } 1726 err = iavf_send_api_ver(adapter); 1727 if (err) { 1728 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1729 iavf_shutdown_adminq(hw); 1730 goto err; 1731 } 1732 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); 1733 return; 1734 err: 1735 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1736 } 1737 1738 /** 1739 * iavf_init_version_check - second step of driver startup 1740 * @adapter: board private structure 1741 * 1742 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1743 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1744 * when fails the state is changed to __IAVF_INIT_FAILED 1745 **/ 1746 static void iavf_init_version_check(struct iavf_adapter *adapter) 1747 { 1748 struct pci_dev *pdev = adapter->pdev; 1749 struct iavf_hw *hw = &adapter->hw; 1750 int err = -EAGAIN; 1751 1752 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1753 1754 if (!iavf_asq_done(hw)) { 1755 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1756 iavf_shutdown_adminq(hw); 1757 iavf_change_state(adapter, __IAVF_STARTUP); 1758 goto err; 1759 } 1760 1761 /* aq msg sent, awaiting reply */ 1762 err = iavf_verify_api_ver(adapter); 1763 if (err) { 1764 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1765 err = iavf_send_api_ver(adapter); 1766 else 1767 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1768 adapter->pf_version.major, 1769 adapter->pf_version.minor, 1770 VIRTCHNL_VERSION_MAJOR, 1771 VIRTCHNL_VERSION_MINOR); 1772 goto err; 1773 } 1774 err = iavf_send_vf_config_msg(adapter); 1775 if (err) { 1776 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1777 err); 1778 goto err; 1779 } 1780 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); 1781 return; 1782 err: 1783 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1784 } 1785 1786 /** 1787 * iavf_init_get_resources - third step of driver startup 1788 * @adapter: board private structure 1789 * 1790 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1791 * finishes driver initialization procedure. 1792 * When success the state is changed to __IAVF_DOWN 1793 * when fails the state is changed to __IAVF_INIT_FAILED 1794 **/ 1795 static void iavf_init_get_resources(struct iavf_adapter *adapter) 1796 { 1797 struct net_device *netdev = adapter->netdev; 1798 struct pci_dev *pdev = adapter->pdev; 1799 struct iavf_hw *hw = &adapter->hw; 1800 int err; 1801 1802 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1803 /* aq msg sent, awaiting reply */ 1804 if (!adapter->vf_res) { 1805 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1806 GFP_KERNEL); 1807 if (!adapter->vf_res) { 1808 err = -ENOMEM; 1809 goto err; 1810 } 1811 } 1812 err = iavf_get_vf_config(adapter); 1813 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1814 err = iavf_send_vf_config_msg(adapter); 1815 goto err; 1816 } else if (err == IAVF_ERR_PARAM) { 1817 /* We only get ERR_PARAM if the device is in a very bad 1818 * state or if we've been disabled for previous bad 1819 * behavior. Either way, we're done now. 1820 */ 1821 iavf_shutdown_adminq(hw); 1822 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1823 return; 1824 } 1825 if (err) { 1826 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1827 goto err_alloc; 1828 } 1829 1830 err = iavf_process_config(adapter); 1831 if (err) 1832 goto err_alloc; 1833 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1834 1835 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1836 1837 netdev->netdev_ops = &iavf_netdev_ops; 1838 iavf_set_ethtool_ops(netdev); 1839 netdev->watchdog_timeo = 5 * HZ; 1840 1841 /* MTU range: 68 - 9710 */ 1842 netdev->min_mtu = ETH_MIN_MTU; 1843 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1844 1845 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1846 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1847 adapter->hw.mac.addr); 1848 eth_hw_addr_random(netdev); 1849 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1850 } else { 1851 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 1852 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1853 } 1854 1855 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1856 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1857 err = iavf_init_interrupt_scheme(adapter); 1858 if (err) 1859 goto err_sw_init; 1860 iavf_map_rings_to_vectors(adapter); 1861 if (adapter->vf_res->vf_cap_flags & 1862 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1863 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1864 1865 err = iavf_request_misc_irq(adapter); 1866 if (err) 1867 goto err_sw_init; 1868 1869 netif_carrier_off(netdev); 1870 adapter->link_up = false; 1871 1872 /* set the semaphore to prevent any callbacks after device registration 1873 * up to time when state of driver will be set to __IAVF_DOWN 1874 */ 1875 rtnl_lock(); 1876 if (!adapter->netdev_registered) { 1877 err = register_netdevice(netdev); 1878 if (err) { 1879 rtnl_unlock(); 1880 goto err_register; 1881 } 1882 } 1883 1884 adapter->netdev_registered = true; 1885 1886 netif_tx_stop_all_queues(netdev); 1887 if (CLIENT_ALLOWED(adapter)) { 1888 err = iavf_lan_add_device(adapter); 1889 if (err) 1890 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1891 err); 1892 } 1893 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1894 if (netdev->features & NETIF_F_GRO) 1895 dev_info(&pdev->dev, "GRO is enabled\n"); 1896 1897 iavf_change_state(adapter, __IAVF_DOWN); 1898 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1899 rtnl_unlock(); 1900 1901 iavf_misc_irq_enable(adapter); 1902 wake_up(&adapter->down_waitqueue); 1903 1904 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1905 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1906 if (!adapter->rss_key || !adapter->rss_lut) { 1907 err = -ENOMEM; 1908 goto err_mem; 1909 } 1910 if (RSS_AQ(adapter)) 1911 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1912 else 1913 iavf_init_rss(adapter); 1914 1915 return; 1916 err_mem: 1917 iavf_free_rss(adapter); 1918 err_register: 1919 iavf_free_misc_irq(adapter); 1920 err_sw_init: 1921 iavf_reset_interrupt_capability(adapter); 1922 err_alloc: 1923 kfree(adapter->vf_res); 1924 adapter->vf_res = NULL; 1925 err: 1926 iavf_change_state(adapter, __IAVF_INIT_FAILED); 1927 } 1928 1929 /** 1930 * iavf_watchdog_task - Periodic call-back task 1931 * @work: pointer to work_struct 1932 **/ 1933 static void iavf_watchdog_task(struct work_struct *work) 1934 { 1935 struct iavf_adapter *adapter = container_of(work, 1936 struct iavf_adapter, 1937 watchdog_task.work); 1938 struct iavf_hw *hw = &adapter->hw; 1939 u32 reg_val; 1940 1941 if (!mutex_trylock(&adapter->crit_lock)) 1942 goto restart_watchdog; 1943 1944 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1945 iavf_change_state(adapter, __IAVF_COMM_FAILED); 1946 1947 if (adapter->flags & IAVF_FLAG_RESET_NEEDED && 1948 adapter->state != __IAVF_RESETTING) { 1949 iavf_change_state(adapter, __IAVF_RESETTING); 1950 adapter->aq_required = 0; 1951 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1952 } 1953 1954 switch (adapter->state) { 1955 case __IAVF_STARTUP: 1956 iavf_startup(adapter); 1957 mutex_unlock(&adapter->crit_lock); 1958 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1959 msecs_to_jiffies(30)); 1960 return; 1961 case __IAVF_INIT_VERSION_CHECK: 1962 iavf_init_version_check(adapter); 1963 mutex_unlock(&adapter->crit_lock); 1964 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1965 msecs_to_jiffies(30)); 1966 return; 1967 case __IAVF_INIT_GET_RESOURCES: 1968 iavf_init_get_resources(adapter); 1969 mutex_unlock(&adapter->crit_lock); 1970 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1971 msecs_to_jiffies(1)); 1972 return; 1973 case __IAVF_INIT_FAILED: 1974 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 1975 dev_err(&adapter->pdev->dev, 1976 "Failed to communicate with PF; waiting before retry\n"); 1977 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 1978 iavf_shutdown_adminq(hw); 1979 mutex_unlock(&adapter->crit_lock); 1980 queue_delayed_work(iavf_wq, 1981 &adapter->watchdog_task, (5 * HZ)); 1982 return; 1983 } 1984 /* Try again from failed step*/ 1985 iavf_change_state(adapter, adapter->last_state); 1986 mutex_unlock(&adapter->crit_lock); 1987 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); 1988 return; 1989 case __IAVF_COMM_FAILED: 1990 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 1991 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1992 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 1993 reg_val == VIRTCHNL_VFR_COMPLETED) { 1994 /* A chance for redemption! */ 1995 dev_err(&adapter->pdev->dev, 1996 "Hardware came out of reset. Attempting reinit.\n"); 1997 /* When init task contacts the PF and 1998 * gets everything set up again, it'll restart the 1999 * watchdog for us. Down, boy. Sit. Stay. Woof. 2000 */ 2001 iavf_change_state(adapter, __IAVF_STARTUP); 2002 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2003 } 2004 adapter->aq_required = 0; 2005 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2006 queue_delayed_work(iavf_wq, 2007 &adapter->watchdog_task, 2008 msecs_to_jiffies(10)); 2009 return; 2010 case __IAVF_RESETTING: 2011 mutex_unlock(&adapter->crit_lock); 2012 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2013 return; 2014 case __IAVF_DOWN: 2015 case __IAVF_DOWN_PENDING: 2016 case __IAVF_TESTING: 2017 case __IAVF_RUNNING: 2018 if (adapter->current_op) { 2019 if (!iavf_asq_done(hw)) { 2020 dev_dbg(&adapter->pdev->dev, 2021 "Admin queue timeout\n"); 2022 iavf_send_api_ver(adapter); 2023 } 2024 } else { 2025 /* An error will be returned if no commands were 2026 * processed; use this opportunity to update stats 2027 */ 2028 if (iavf_process_aq_command(adapter) && 2029 adapter->state == __IAVF_RUNNING) 2030 iavf_request_stats(adapter); 2031 } 2032 if (adapter->state == __IAVF_RUNNING) 2033 iavf_detect_recover_hung(&adapter->vsi); 2034 break; 2035 case __IAVF_REMOVE: 2036 mutex_unlock(&adapter->crit_lock); 2037 return; 2038 default: 2039 return; 2040 } 2041 2042 /* check for hw reset */ 2043 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2044 if (!reg_val) { 2045 iavf_change_state(adapter, __IAVF_RESETTING); 2046 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2047 adapter->aq_required = 0; 2048 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2049 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2050 queue_work(iavf_wq, &adapter->reset_task); 2051 mutex_unlock(&adapter->crit_lock); 2052 queue_delayed_work(iavf_wq, 2053 &adapter->watchdog_task, HZ * 2); 2054 return; 2055 } 2056 2057 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2058 mutex_unlock(&adapter->crit_lock); 2059 restart_watchdog: 2060 queue_work(iavf_wq, &adapter->adminq_task); 2061 if (adapter->aq_required) 2062 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2063 msecs_to_jiffies(20)); 2064 else 2065 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2066 } 2067 2068 static void iavf_disable_vf(struct iavf_adapter *adapter) 2069 { 2070 struct iavf_mac_filter *f, *ftmp; 2071 struct iavf_vlan_filter *fv, *fvtmp; 2072 struct iavf_cloud_filter *cf, *cftmp; 2073 2074 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2075 2076 /* We don't use netif_running() because it may be true prior to 2077 * ndo_open() returning, so we can't assume it means all our open 2078 * tasks have finished, since we're not holding the rtnl_lock here. 2079 */ 2080 if (adapter->state == __IAVF_RUNNING) { 2081 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2082 netif_carrier_off(adapter->netdev); 2083 netif_tx_disable(adapter->netdev); 2084 adapter->link_up = false; 2085 iavf_napi_disable_all(adapter); 2086 iavf_irq_disable(adapter); 2087 iavf_free_traffic_irqs(adapter); 2088 iavf_free_all_tx_resources(adapter); 2089 iavf_free_all_rx_resources(adapter); 2090 } 2091 2092 spin_lock_bh(&adapter->mac_vlan_list_lock); 2093 2094 /* Delete all of the filters */ 2095 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2096 list_del(&f->list); 2097 kfree(f); 2098 } 2099 2100 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2101 list_del(&fv->list); 2102 kfree(fv); 2103 } 2104 2105 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2106 2107 spin_lock_bh(&adapter->cloud_filter_list_lock); 2108 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2109 list_del(&cf->list); 2110 kfree(cf); 2111 adapter->num_cloud_filters--; 2112 } 2113 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2114 2115 iavf_free_misc_irq(adapter); 2116 iavf_reset_interrupt_capability(adapter); 2117 iavf_free_queues(adapter); 2118 iavf_free_q_vectors(adapter); 2119 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2120 iavf_shutdown_adminq(&adapter->hw); 2121 adapter->netdev->flags &= ~IFF_UP; 2122 mutex_unlock(&adapter->crit_lock); 2123 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2124 iavf_change_state(adapter, __IAVF_DOWN); 2125 wake_up(&adapter->down_waitqueue); 2126 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2127 } 2128 2129 /** 2130 * iavf_reset_task - Call-back task to handle hardware reset 2131 * @work: pointer to work_struct 2132 * 2133 * During reset we need to shut down and reinitialize the admin queue 2134 * before we can use it to communicate with the PF again. We also clear 2135 * and reinit the rings because that context is lost as well. 2136 **/ 2137 static void iavf_reset_task(struct work_struct *work) 2138 { 2139 struct iavf_adapter *adapter = container_of(work, 2140 struct iavf_adapter, 2141 reset_task); 2142 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2143 struct net_device *netdev = adapter->netdev; 2144 struct iavf_hw *hw = &adapter->hw; 2145 struct iavf_mac_filter *f, *ftmp; 2146 struct iavf_vlan_filter *vlf; 2147 struct iavf_cloud_filter *cf; 2148 u32 reg_val; 2149 int i = 0, err; 2150 bool running; 2151 2152 /* When device is being removed it doesn't make sense to run the reset 2153 * task, just return in such a case. 2154 */ 2155 if (mutex_is_locked(&adapter->remove_lock)) 2156 return; 2157 2158 if (iavf_lock_timeout(&adapter->crit_lock, 200)) { 2159 schedule_work(&adapter->reset_task); 2160 return; 2161 } 2162 while (!mutex_trylock(&adapter->client_lock)) 2163 usleep_range(500, 1000); 2164 if (CLIENT_ENABLED(adapter)) { 2165 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2166 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2167 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2168 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2169 cancel_delayed_work_sync(&adapter->client_task); 2170 iavf_notify_client_close(&adapter->vsi, true); 2171 } 2172 iavf_misc_irq_disable(adapter); 2173 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2174 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2175 /* Restart the AQ here. If we have been reset but didn't 2176 * detect it, or if the PF had to reinit, our AQ will be hosed. 2177 */ 2178 iavf_shutdown_adminq(hw); 2179 iavf_init_adminq(hw); 2180 iavf_request_reset(adapter); 2181 } 2182 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2183 2184 /* poll until we see the reset actually happen */ 2185 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2186 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2187 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2188 if (!reg_val) 2189 break; 2190 usleep_range(5000, 10000); 2191 } 2192 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2193 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2194 goto continue_reset; /* act like the reset happened */ 2195 } 2196 2197 /* wait until the reset is complete and the PF is responding to us */ 2198 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2199 /* sleep first to make sure a minimum wait time is met */ 2200 msleep(IAVF_RESET_WAIT_MS); 2201 2202 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2203 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2204 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2205 break; 2206 } 2207 2208 pci_set_master(adapter->pdev); 2209 2210 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2211 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2212 reg_val); 2213 iavf_disable_vf(adapter); 2214 mutex_unlock(&adapter->client_lock); 2215 return; /* Do not attempt to reinit. It's dead, Jim. */ 2216 } 2217 2218 continue_reset: 2219 /* We don't use netif_running() because it may be true prior to 2220 * ndo_open() returning, so we can't assume it means all our open 2221 * tasks have finished, since we're not holding the rtnl_lock here. 2222 */ 2223 running = ((adapter->state == __IAVF_RUNNING) || 2224 (adapter->state == __IAVF_RESETTING)); 2225 2226 if (running) { 2227 netif_carrier_off(netdev); 2228 netif_tx_stop_all_queues(netdev); 2229 adapter->link_up = false; 2230 iavf_napi_disable_all(adapter); 2231 } 2232 iavf_irq_disable(adapter); 2233 2234 iavf_change_state(adapter, __IAVF_RESETTING); 2235 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2236 2237 /* free the Tx/Rx rings and descriptors, might be better to just 2238 * re-use them sometime in the future 2239 */ 2240 iavf_free_all_rx_resources(adapter); 2241 iavf_free_all_tx_resources(adapter); 2242 2243 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2244 /* kill and reinit the admin queue */ 2245 iavf_shutdown_adminq(hw); 2246 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2247 err = iavf_init_adminq(hw); 2248 if (err) 2249 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2250 err); 2251 adapter->aq_required = 0; 2252 2253 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2254 err = iavf_reinit_interrupt_scheme(adapter); 2255 if (err) 2256 goto reset_err; 2257 } 2258 2259 if (RSS_AQ(adapter)) { 2260 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2261 } else { 2262 err = iavf_init_rss(adapter); 2263 if (err) 2264 goto reset_err; 2265 } 2266 2267 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2268 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2269 2270 spin_lock_bh(&adapter->mac_vlan_list_lock); 2271 2272 /* Delete filter for the current MAC address, it could have 2273 * been changed by the PF via administratively set MAC. 2274 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2275 */ 2276 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2277 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2278 list_del(&f->list); 2279 kfree(f); 2280 } 2281 } 2282 /* re-add all MAC filters */ 2283 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2284 f->add = true; 2285 } 2286 /* re-add all VLAN filters */ 2287 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2288 vlf->add = true; 2289 } 2290 2291 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2292 2293 /* check if TCs are running and re-add all cloud filters */ 2294 spin_lock_bh(&adapter->cloud_filter_list_lock); 2295 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2296 adapter->num_tc) { 2297 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2298 cf->add = true; 2299 } 2300 } 2301 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2302 2303 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2304 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2305 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2306 iavf_misc_irq_enable(adapter); 2307 2308 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2309 2310 /* We were running when the reset started, so we need to restore some 2311 * state here. 2312 */ 2313 if (running) { 2314 /* allocate transmit descriptors */ 2315 err = iavf_setup_all_tx_resources(adapter); 2316 if (err) 2317 goto reset_err; 2318 2319 /* allocate receive descriptors */ 2320 err = iavf_setup_all_rx_resources(adapter); 2321 if (err) 2322 goto reset_err; 2323 2324 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2325 err = iavf_request_traffic_irqs(adapter, netdev->name); 2326 if (err) 2327 goto reset_err; 2328 2329 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2330 } 2331 2332 iavf_configure(adapter); 2333 2334 /* iavf_up_complete() will switch device back 2335 * to __IAVF_RUNNING 2336 */ 2337 iavf_up_complete(adapter); 2338 2339 iavf_irq_enable(adapter, true); 2340 } else { 2341 iavf_change_state(adapter, __IAVF_DOWN); 2342 wake_up(&adapter->down_waitqueue); 2343 } 2344 mutex_unlock(&adapter->client_lock); 2345 mutex_unlock(&adapter->crit_lock); 2346 2347 return; 2348 reset_err: 2349 mutex_unlock(&adapter->client_lock); 2350 mutex_unlock(&adapter->crit_lock); 2351 if (running) 2352 iavf_change_state(adapter, __IAVF_RUNNING); 2353 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2354 iavf_close(netdev); 2355 } 2356 2357 /** 2358 * iavf_adminq_task - worker thread to clean the admin queue 2359 * @work: pointer to work_struct containing our data 2360 **/ 2361 static void iavf_adminq_task(struct work_struct *work) 2362 { 2363 struct iavf_adapter *adapter = 2364 container_of(work, struct iavf_adapter, adminq_task); 2365 struct iavf_hw *hw = &adapter->hw; 2366 struct iavf_arq_event_info event; 2367 enum virtchnl_ops v_op; 2368 enum iavf_status ret, v_ret; 2369 u32 val, oldval; 2370 u16 pending; 2371 2372 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2373 goto out; 2374 2375 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2376 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2377 if (!event.msg_buf) 2378 goto out; 2379 2380 if (iavf_lock_timeout(&adapter->crit_lock, 200)) 2381 goto freedom; 2382 do { 2383 ret = iavf_clean_arq_element(hw, &event, &pending); 2384 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2385 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2386 2387 if (ret || !v_op) 2388 break; /* No event to process or error cleaning ARQ */ 2389 2390 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2391 event.msg_len); 2392 if (pending != 0) 2393 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2394 } while (pending); 2395 mutex_unlock(&adapter->crit_lock); 2396 2397 if ((adapter->flags & 2398 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2399 adapter->state == __IAVF_RESETTING) 2400 goto freedom; 2401 2402 /* check for error indications */ 2403 val = rd32(hw, hw->aq.arq.len); 2404 if (val == 0xdeadbeef) /* indicates device in reset */ 2405 goto freedom; 2406 oldval = val; 2407 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2408 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2409 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2410 } 2411 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2412 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2413 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2414 } 2415 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2416 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2417 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2418 } 2419 if (oldval != val) 2420 wr32(hw, hw->aq.arq.len, val); 2421 2422 val = rd32(hw, hw->aq.asq.len); 2423 oldval = val; 2424 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2425 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2426 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2427 } 2428 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2429 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2430 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2431 } 2432 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2433 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2434 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2435 } 2436 if (oldval != val) 2437 wr32(hw, hw->aq.asq.len, val); 2438 2439 freedom: 2440 kfree(event.msg_buf); 2441 out: 2442 /* re-enable Admin queue interrupt cause */ 2443 iavf_misc_irq_enable(adapter); 2444 } 2445 2446 /** 2447 * iavf_client_task - worker thread to perform client work 2448 * @work: pointer to work_struct containing our data 2449 * 2450 * This task handles client interactions. Because client calls can be 2451 * reentrant, we can't handle them in the watchdog. 2452 **/ 2453 static void iavf_client_task(struct work_struct *work) 2454 { 2455 struct iavf_adapter *adapter = 2456 container_of(work, struct iavf_adapter, client_task.work); 2457 2458 /* If we can't get the client bit, just give up. We'll be rescheduled 2459 * later. 2460 */ 2461 2462 if (!mutex_trylock(&adapter->client_lock)) 2463 return; 2464 2465 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2466 iavf_client_subtask(adapter); 2467 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2468 goto out; 2469 } 2470 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2471 iavf_notify_client_l2_params(&adapter->vsi); 2472 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2473 goto out; 2474 } 2475 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2476 iavf_notify_client_close(&adapter->vsi, false); 2477 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2478 goto out; 2479 } 2480 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2481 iavf_notify_client_open(&adapter->vsi); 2482 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2483 } 2484 out: 2485 mutex_unlock(&adapter->client_lock); 2486 } 2487 2488 /** 2489 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2490 * @adapter: board private structure 2491 * 2492 * Free all transmit software resources 2493 **/ 2494 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2495 { 2496 int i; 2497 2498 if (!adapter->tx_rings) 2499 return; 2500 2501 for (i = 0; i < adapter->num_active_queues; i++) 2502 if (adapter->tx_rings[i].desc) 2503 iavf_free_tx_resources(&adapter->tx_rings[i]); 2504 } 2505 2506 /** 2507 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2508 * @adapter: board private structure 2509 * 2510 * If this function returns with an error, then it's possible one or 2511 * more of the rings is populated (while the rest are not). It is the 2512 * callers duty to clean those orphaned rings. 2513 * 2514 * Return 0 on success, negative on failure 2515 **/ 2516 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2517 { 2518 int i, err = 0; 2519 2520 for (i = 0; i < adapter->num_active_queues; i++) { 2521 adapter->tx_rings[i].count = adapter->tx_desc_count; 2522 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2523 if (!err) 2524 continue; 2525 dev_err(&adapter->pdev->dev, 2526 "Allocation for Tx Queue %u failed\n", i); 2527 break; 2528 } 2529 2530 return err; 2531 } 2532 2533 /** 2534 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2535 * @adapter: board private structure 2536 * 2537 * If this function returns with an error, then it's possible one or 2538 * more of the rings is populated (while the rest are not). It is the 2539 * callers duty to clean those orphaned rings. 2540 * 2541 * Return 0 on success, negative on failure 2542 **/ 2543 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2544 { 2545 int i, err = 0; 2546 2547 for (i = 0; i < adapter->num_active_queues; i++) { 2548 adapter->rx_rings[i].count = adapter->rx_desc_count; 2549 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2550 if (!err) 2551 continue; 2552 dev_err(&adapter->pdev->dev, 2553 "Allocation for Rx Queue %u failed\n", i); 2554 break; 2555 } 2556 return err; 2557 } 2558 2559 /** 2560 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2561 * @adapter: board private structure 2562 * 2563 * Free all receive software resources 2564 **/ 2565 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2566 { 2567 int i; 2568 2569 if (!adapter->rx_rings) 2570 return; 2571 2572 for (i = 0; i < adapter->num_active_queues; i++) 2573 if (adapter->rx_rings[i].desc) 2574 iavf_free_rx_resources(&adapter->rx_rings[i]); 2575 } 2576 2577 /** 2578 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2579 * @adapter: board private structure 2580 * @max_tx_rate: max Tx bw for a tc 2581 **/ 2582 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2583 u64 max_tx_rate) 2584 { 2585 int speed = 0, ret = 0; 2586 2587 if (ADV_LINK_SUPPORT(adapter)) { 2588 if (adapter->link_speed_mbps < U32_MAX) { 2589 speed = adapter->link_speed_mbps; 2590 goto validate_bw; 2591 } else { 2592 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2593 return -EINVAL; 2594 } 2595 } 2596 2597 switch (adapter->link_speed) { 2598 case VIRTCHNL_LINK_SPEED_40GB: 2599 speed = SPEED_40000; 2600 break; 2601 case VIRTCHNL_LINK_SPEED_25GB: 2602 speed = SPEED_25000; 2603 break; 2604 case VIRTCHNL_LINK_SPEED_20GB: 2605 speed = SPEED_20000; 2606 break; 2607 case VIRTCHNL_LINK_SPEED_10GB: 2608 speed = SPEED_10000; 2609 break; 2610 case VIRTCHNL_LINK_SPEED_5GB: 2611 speed = SPEED_5000; 2612 break; 2613 case VIRTCHNL_LINK_SPEED_2_5GB: 2614 speed = SPEED_2500; 2615 break; 2616 case VIRTCHNL_LINK_SPEED_1GB: 2617 speed = SPEED_1000; 2618 break; 2619 case VIRTCHNL_LINK_SPEED_100MB: 2620 speed = SPEED_100; 2621 break; 2622 default: 2623 break; 2624 } 2625 2626 validate_bw: 2627 if (max_tx_rate > speed) { 2628 dev_err(&adapter->pdev->dev, 2629 "Invalid tx rate specified\n"); 2630 ret = -EINVAL; 2631 } 2632 2633 return ret; 2634 } 2635 2636 /** 2637 * iavf_validate_ch_config - validate queue mapping info 2638 * @adapter: board private structure 2639 * @mqprio_qopt: queue parameters 2640 * 2641 * This function validates if the config provided by the user to 2642 * configure queue channels is valid or not. Returns 0 on a valid 2643 * config. 2644 **/ 2645 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2646 struct tc_mqprio_qopt_offload *mqprio_qopt) 2647 { 2648 u64 total_max_rate = 0; 2649 int i, num_qps = 0; 2650 u64 tx_rate = 0; 2651 int ret = 0; 2652 2653 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2654 mqprio_qopt->qopt.num_tc < 1) 2655 return -EINVAL; 2656 2657 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2658 if (!mqprio_qopt->qopt.count[i] || 2659 mqprio_qopt->qopt.offset[i] != num_qps) 2660 return -EINVAL; 2661 if (mqprio_qopt->min_rate[i]) { 2662 dev_err(&adapter->pdev->dev, 2663 "Invalid min tx rate (greater than 0) specified\n"); 2664 return -EINVAL; 2665 } 2666 /*convert to Mbps */ 2667 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2668 IAVF_MBPS_DIVISOR); 2669 total_max_rate += tx_rate; 2670 num_qps += mqprio_qopt->qopt.count[i]; 2671 } 2672 if (num_qps > IAVF_MAX_REQ_QUEUES) 2673 return -EINVAL; 2674 2675 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2676 return ret; 2677 } 2678 2679 /** 2680 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2681 * @adapter: board private structure 2682 **/ 2683 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2684 { 2685 struct iavf_cloud_filter *cf, *cftmp; 2686 2687 spin_lock_bh(&adapter->cloud_filter_list_lock); 2688 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2689 list) { 2690 list_del(&cf->list); 2691 kfree(cf); 2692 adapter->num_cloud_filters--; 2693 } 2694 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2695 } 2696 2697 /** 2698 * __iavf_setup_tc - configure multiple traffic classes 2699 * @netdev: network interface device structure 2700 * @type_data: tc offload data 2701 * 2702 * This function processes the config information provided by the 2703 * user to configure traffic classes/queue channels and packages the 2704 * information to request the PF to setup traffic classes. 2705 * 2706 * Returns 0 on success. 2707 **/ 2708 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2709 { 2710 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2711 struct iavf_adapter *adapter = netdev_priv(netdev); 2712 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2713 u8 num_tc = 0, total_qps = 0; 2714 int ret = 0, netdev_tc = 0; 2715 u64 max_tx_rate; 2716 u16 mode; 2717 int i; 2718 2719 num_tc = mqprio_qopt->qopt.num_tc; 2720 mode = mqprio_qopt->mode; 2721 2722 /* delete queue_channel */ 2723 if (!mqprio_qopt->qopt.hw) { 2724 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2725 /* reset the tc configuration */ 2726 netdev_reset_tc(netdev); 2727 adapter->num_tc = 0; 2728 netif_tx_stop_all_queues(netdev); 2729 netif_tx_disable(netdev); 2730 iavf_del_all_cloud_filters(adapter); 2731 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2732 goto exit; 2733 } else { 2734 return -EINVAL; 2735 } 2736 } 2737 2738 /* add queue channel */ 2739 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2740 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2741 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2742 return -EOPNOTSUPP; 2743 } 2744 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2745 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2746 return -EINVAL; 2747 } 2748 2749 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2750 if (ret) 2751 return ret; 2752 /* Return if same TC config is requested */ 2753 if (adapter->num_tc == num_tc) 2754 return 0; 2755 adapter->num_tc = num_tc; 2756 2757 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2758 if (i < num_tc) { 2759 adapter->ch_config.ch_info[i].count = 2760 mqprio_qopt->qopt.count[i]; 2761 adapter->ch_config.ch_info[i].offset = 2762 mqprio_qopt->qopt.offset[i]; 2763 total_qps += mqprio_qopt->qopt.count[i]; 2764 max_tx_rate = mqprio_qopt->max_rate[i]; 2765 /* convert to Mbps */ 2766 max_tx_rate = div_u64(max_tx_rate, 2767 IAVF_MBPS_DIVISOR); 2768 adapter->ch_config.ch_info[i].max_tx_rate = 2769 max_tx_rate; 2770 } else { 2771 adapter->ch_config.ch_info[i].count = 1; 2772 adapter->ch_config.ch_info[i].offset = 0; 2773 } 2774 } 2775 adapter->ch_config.total_qps = total_qps; 2776 netif_tx_stop_all_queues(netdev); 2777 netif_tx_disable(netdev); 2778 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2779 netdev_reset_tc(netdev); 2780 /* Report the tc mapping up the stack */ 2781 netdev_set_num_tc(adapter->netdev, num_tc); 2782 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2783 u16 qcount = mqprio_qopt->qopt.count[i]; 2784 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2785 2786 if (i < num_tc) 2787 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2788 qoffset); 2789 } 2790 } 2791 exit: 2792 return ret; 2793 } 2794 2795 /** 2796 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2797 * @adapter: board private structure 2798 * @f: pointer to struct flow_cls_offload 2799 * @filter: pointer to cloud filter structure 2800 */ 2801 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2802 struct flow_cls_offload *f, 2803 struct iavf_cloud_filter *filter) 2804 { 2805 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2806 struct flow_dissector *dissector = rule->match.dissector; 2807 u16 n_proto_mask = 0; 2808 u16 n_proto_key = 0; 2809 u8 field_flags = 0; 2810 u16 addr_type = 0; 2811 u16 n_proto = 0; 2812 int i = 0; 2813 struct virtchnl_filter *vf = &filter->f; 2814 2815 if (dissector->used_keys & 2816 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2817 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2818 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2819 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2820 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2821 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2822 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2823 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2824 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2825 dissector->used_keys); 2826 return -EOPNOTSUPP; 2827 } 2828 2829 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2830 struct flow_match_enc_keyid match; 2831 2832 flow_rule_match_enc_keyid(rule, &match); 2833 if (match.mask->keyid != 0) 2834 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2835 } 2836 2837 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2838 struct flow_match_basic match; 2839 2840 flow_rule_match_basic(rule, &match); 2841 n_proto_key = ntohs(match.key->n_proto); 2842 n_proto_mask = ntohs(match.mask->n_proto); 2843 2844 if (n_proto_key == ETH_P_ALL) { 2845 n_proto_key = 0; 2846 n_proto_mask = 0; 2847 } 2848 n_proto = n_proto_key & n_proto_mask; 2849 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2850 return -EINVAL; 2851 if (n_proto == ETH_P_IPV6) { 2852 /* specify flow type as TCP IPv6 */ 2853 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2854 } 2855 2856 if (match.key->ip_proto != IPPROTO_TCP) { 2857 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2858 return -EINVAL; 2859 } 2860 } 2861 2862 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2863 struct flow_match_eth_addrs match; 2864 2865 flow_rule_match_eth_addrs(rule, &match); 2866 2867 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2868 if (!is_zero_ether_addr(match.mask->dst)) { 2869 if (is_broadcast_ether_addr(match.mask->dst)) { 2870 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2871 } else { 2872 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2873 match.mask->dst); 2874 return IAVF_ERR_CONFIG; 2875 } 2876 } 2877 2878 if (!is_zero_ether_addr(match.mask->src)) { 2879 if (is_broadcast_ether_addr(match.mask->src)) { 2880 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2881 } else { 2882 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2883 match.mask->src); 2884 return IAVF_ERR_CONFIG; 2885 } 2886 } 2887 2888 if (!is_zero_ether_addr(match.key->dst)) 2889 if (is_valid_ether_addr(match.key->dst) || 2890 is_multicast_ether_addr(match.key->dst)) { 2891 /* set the mask if a valid dst_mac address */ 2892 for (i = 0; i < ETH_ALEN; i++) 2893 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2894 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2895 match.key->dst); 2896 } 2897 2898 if (!is_zero_ether_addr(match.key->src)) 2899 if (is_valid_ether_addr(match.key->src) || 2900 is_multicast_ether_addr(match.key->src)) { 2901 /* set the mask if a valid dst_mac address */ 2902 for (i = 0; i < ETH_ALEN; i++) 2903 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2904 ether_addr_copy(vf->data.tcp_spec.src_mac, 2905 match.key->src); 2906 } 2907 } 2908 2909 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2910 struct flow_match_vlan match; 2911 2912 flow_rule_match_vlan(rule, &match); 2913 if (match.mask->vlan_id) { 2914 if (match.mask->vlan_id == VLAN_VID_MASK) { 2915 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2916 } else { 2917 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2918 match.mask->vlan_id); 2919 return IAVF_ERR_CONFIG; 2920 } 2921 } 2922 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2923 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2924 } 2925 2926 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2927 struct flow_match_control match; 2928 2929 flow_rule_match_control(rule, &match); 2930 addr_type = match.key->addr_type; 2931 } 2932 2933 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2934 struct flow_match_ipv4_addrs match; 2935 2936 flow_rule_match_ipv4_addrs(rule, &match); 2937 if (match.mask->dst) { 2938 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2939 field_flags |= IAVF_CLOUD_FIELD_IIP; 2940 } else { 2941 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2942 be32_to_cpu(match.mask->dst)); 2943 return IAVF_ERR_CONFIG; 2944 } 2945 } 2946 2947 if (match.mask->src) { 2948 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2949 field_flags |= IAVF_CLOUD_FIELD_IIP; 2950 } else { 2951 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2952 be32_to_cpu(match.mask->dst)); 2953 return IAVF_ERR_CONFIG; 2954 } 2955 } 2956 2957 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2958 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2959 return IAVF_ERR_CONFIG; 2960 } 2961 if (match.key->dst) { 2962 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2963 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2964 } 2965 if (match.key->src) { 2966 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2967 vf->data.tcp_spec.src_ip[0] = match.key->src; 2968 } 2969 } 2970 2971 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2972 struct flow_match_ipv6_addrs match; 2973 2974 flow_rule_match_ipv6_addrs(rule, &match); 2975 2976 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2977 if (ipv6_addr_any(&match.mask->dst)) { 2978 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2979 IPV6_ADDR_ANY); 2980 return IAVF_ERR_CONFIG; 2981 } 2982 2983 /* src and dest IPv6 address should not be LOOPBACK 2984 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2985 */ 2986 if (ipv6_addr_loopback(&match.key->dst) || 2987 ipv6_addr_loopback(&match.key->src)) { 2988 dev_err(&adapter->pdev->dev, 2989 "ipv6 addr should not be loopback\n"); 2990 return IAVF_ERR_CONFIG; 2991 } 2992 if (!ipv6_addr_any(&match.mask->dst) || 2993 !ipv6_addr_any(&match.mask->src)) 2994 field_flags |= IAVF_CLOUD_FIELD_IIP; 2995 2996 for (i = 0; i < 4; i++) 2997 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 2998 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 2999 sizeof(vf->data.tcp_spec.dst_ip)); 3000 for (i = 0; i < 4; i++) 3001 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 3002 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 3003 sizeof(vf->data.tcp_spec.src_ip)); 3004 } 3005 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 3006 struct flow_match_ports match; 3007 3008 flow_rule_match_ports(rule, &match); 3009 if (match.mask->src) { 3010 if (match.mask->src == cpu_to_be16(0xffff)) { 3011 field_flags |= IAVF_CLOUD_FIELD_IIP; 3012 } else { 3013 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 3014 be16_to_cpu(match.mask->src)); 3015 return IAVF_ERR_CONFIG; 3016 } 3017 } 3018 3019 if (match.mask->dst) { 3020 if (match.mask->dst == cpu_to_be16(0xffff)) { 3021 field_flags |= IAVF_CLOUD_FIELD_IIP; 3022 } else { 3023 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 3024 be16_to_cpu(match.mask->dst)); 3025 return IAVF_ERR_CONFIG; 3026 } 3027 } 3028 if (match.key->dst) { 3029 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 3030 vf->data.tcp_spec.dst_port = match.key->dst; 3031 } 3032 3033 if (match.key->src) { 3034 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 3035 vf->data.tcp_spec.src_port = match.key->src; 3036 } 3037 } 3038 vf->field_flags = field_flags; 3039 3040 return 0; 3041 } 3042 3043 /** 3044 * iavf_handle_tclass - Forward to a traffic class on the device 3045 * @adapter: board private structure 3046 * @tc: traffic class index on the device 3047 * @filter: pointer to cloud filter structure 3048 */ 3049 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3050 struct iavf_cloud_filter *filter) 3051 { 3052 if (tc == 0) 3053 return 0; 3054 if (tc < adapter->num_tc) { 3055 if (!filter->f.data.tcp_spec.dst_port) { 3056 dev_err(&adapter->pdev->dev, 3057 "Specify destination port to redirect to traffic class other than TC0\n"); 3058 return -EINVAL; 3059 } 3060 } 3061 /* redirect to a traffic class on the same device */ 3062 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3063 filter->f.action_meta = tc; 3064 return 0; 3065 } 3066 3067 /** 3068 * iavf_configure_clsflower - Add tc flower filters 3069 * @adapter: board private structure 3070 * @cls_flower: Pointer to struct flow_cls_offload 3071 */ 3072 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3073 struct flow_cls_offload *cls_flower) 3074 { 3075 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3076 struct iavf_cloud_filter *filter = NULL; 3077 int err = -EINVAL, count = 50; 3078 3079 if (tc < 0) { 3080 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3081 return -EINVAL; 3082 } 3083 3084 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3085 if (!filter) 3086 return -ENOMEM; 3087 3088 while (!mutex_trylock(&adapter->crit_lock)) { 3089 if (--count == 0) 3090 goto err; 3091 udelay(1); 3092 } 3093 3094 filter->cookie = cls_flower->cookie; 3095 3096 /* set the mask to all zeroes to begin with */ 3097 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3098 /* start out with flow type and eth type IPv4 to begin with */ 3099 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3100 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3101 if (err < 0) 3102 goto err; 3103 3104 err = iavf_handle_tclass(adapter, tc, filter); 3105 if (err < 0) 3106 goto err; 3107 3108 /* add filter to the list */ 3109 spin_lock_bh(&adapter->cloud_filter_list_lock); 3110 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3111 adapter->num_cloud_filters++; 3112 filter->add = true; 3113 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3114 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3115 err: 3116 if (err) 3117 kfree(filter); 3118 3119 mutex_unlock(&adapter->crit_lock); 3120 return err; 3121 } 3122 3123 /* iavf_find_cf - Find the cloud filter in the list 3124 * @adapter: Board private structure 3125 * @cookie: filter specific cookie 3126 * 3127 * Returns ptr to the filter object or NULL. Must be called while holding the 3128 * cloud_filter_list_lock. 3129 */ 3130 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3131 unsigned long *cookie) 3132 { 3133 struct iavf_cloud_filter *filter = NULL; 3134 3135 if (!cookie) 3136 return NULL; 3137 3138 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3139 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3140 return filter; 3141 } 3142 return NULL; 3143 } 3144 3145 /** 3146 * iavf_delete_clsflower - Remove tc flower filters 3147 * @adapter: board private structure 3148 * @cls_flower: Pointer to struct flow_cls_offload 3149 */ 3150 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3151 struct flow_cls_offload *cls_flower) 3152 { 3153 struct iavf_cloud_filter *filter = NULL; 3154 int err = 0; 3155 3156 spin_lock_bh(&adapter->cloud_filter_list_lock); 3157 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3158 if (filter) { 3159 filter->del = true; 3160 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3161 } else { 3162 err = -EINVAL; 3163 } 3164 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3165 3166 return err; 3167 } 3168 3169 /** 3170 * iavf_setup_tc_cls_flower - flower classifier offloads 3171 * @adapter: board private structure 3172 * @cls_flower: pointer to flow_cls_offload struct with flow info 3173 */ 3174 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3175 struct flow_cls_offload *cls_flower) 3176 { 3177 switch (cls_flower->command) { 3178 case FLOW_CLS_REPLACE: 3179 return iavf_configure_clsflower(adapter, cls_flower); 3180 case FLOW_CLS_DESTROY: 3181 return iavf_delete_clsflower(adapter, cls_flower); 3182 case FLOW_CLS_STATS: 3183 return -EOPNOTSUPP; 3184 default: 3185 return -EOPNOTSUPP; 3186 } 3187 } 3188 3189 /** 3190 * iavf_setup_tc_block_cb - block callback for tc 3191 * @type: type of offload 3192 * @type_data: offload data 3193 * @cb_priv: 3194 * 3195 * This function is the block callback for traffic classes 3196 **/ 3197 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3198 void *cb_priv) 3199 { 3200 struct iavf_adapter *adapter = cb_priv; 3201 3202 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3203 return -EOPNOTSUPP; 3204 3205 switch (type) { 3206 case TC_SETUP_CLSFLOWER: 3207 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3208 default: 3209 return -EOPNOTSUPP; 3210 } 3211 } 3212 3213 static LIST_HEAD(iavf_block_cb_list); 3214 3215 /** 3216 * iavf_setup_tc - configure multiple traffic classes 3217 * @netdev: network interface device structure 3218 * @type: type of offload 3219 * @type_data: tc offload data 3220 * 3221 * This function is the callback to ndo_setup_tc in the 3222 * netdev_ops. 3223 * 3224 * Returns 0 on success 3225 **/ 3226 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3227 void *type_data) 3228 { 3229 struct iavf_adapter *adapter = netdev_priv(netdev); 3230 3231 switch (type) { 3232 case TC_SETUP_QDISC_MQPRIO: 3233 return __iavf_setup_tc(netdev, type_data); 3234 case TC_SETUP_BLOCK: 3235 return flow_block_cb_setup_simple(type_data, 3236 &iavf_block_cb_list, 3237 iavf_setup_tc_block_cb, 3238 adapter, adapter, true); 3239 default: 3240 return -EOPNOTSUPP; 3241 } 3242 } 3243 3244 /** 3245 * iavf_open - Called when a network interface is made active 3246 * @netdev: network interface device structure 3247 * 3248 * Returns 0 on success, negative value on failure 3249 * 3250 * The open entry point is called when a network interface is made 3251 * active by the system (IFF_UP). At this point all resources needed 3252 * for transmit and receive operations are allocated, the interrupt 3253 * handler is registered with the OS, the watchdog is started, 3254 * and the stack is notified that the interface is ready. 3255 **/ 3256 static int iavf_open(struct net_device *netdev) 3257 { 3258 struct iavf_adapter *adapter = netdev_priv(netdev); 3259 int err; 3260 3261 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3262 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3263 return -EIO; 3264 } 3265 3266 while (!mutex_trylock(&adapter->crit_lock)) 3267 usleep_range(500, 1000); 3268 3269 if (adapter->state != __IAVF_DOWN) { 3270 err = -EBUSY; 3271 goto err_unlock; 3272 } 3273 3274 /* allocate transmit descriptors */ 3275 err = iavf_setup_all_tx_resources(adapter); 3276 if (err) 3277 goto err_setup_tx; 3278 3279 /* allocate receive descriptors */ 3280 err = iavf_setup_all_rx_resources(adapter); 3281 if (err) 3282 goto err_setup_rx; 3283 3284 /* clear any pending interrupts, may auto mask */ 3285 err = iavf_request_traffic_irqs(adapter, netdev->name); 3286 if (err) 3287 goto err_req_irq; 3288 3289 spin_lock_bh(&adapter->mac_vlan_list_lock); 3290 3291 iavf_add_filter(adapter, adapter->hw.mac.addr); 3292 3293 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3294 3295 iavf_configure(adapter); 3296 3297 iavf_up_complete(adapter); 3298 3299 iavf_irq_enable(adapter, true); 3300 3301 mutex_unlock(&adapter->crit_lock); 3302 3303 return 0; 3304 3305 err_req_irq: 3306 iavf_down(adapter); 3307 iavf_free_traffic_irqs(adapter); 3308 err_setup_rx: 3309 iavf_free_all_rx_resources(adapter); 3310 err_setup_tx: 3311 iavf_free_all_tx_resources(adapter); 3312 err_unlock: 3313 mutex_unlock(&adapter->crit_lock); 3314 3315 return err; 3316 } 3317 3318 /** 3319 * iavf_close - Disables a network interface 3320 * @netdev: network interface device structure 3321 * 3322 * Returns 0, this is not allowed to fail 3323 * 3324 * The close entry point is called when an interface is de-activated 3325 * by the OS. The hardware is still under the drivers control, but 3326 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3327 * are freed, along with all transmit and receive resources. 3328 **/ 3329 static int iavf_close(struct net_device *netdev) 3330 { 3331 struct iavf_adapter *adapter = netdev_priv(netdev); 3332 int status; 3333 3334 if (adapter->state <= __IAVF_DOWN_PENDING) 3335 return 0; 3336 3337 while (!mutex_trylock(&adapter->crit_lock)) 3338 usleep_range(500, 1000); 3339 3340 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3341 if (CLIENT_ENABLED(adapter)) 3342 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3343 3344 iavf_down(adapter); 3345 iavf_change_state(adapter, __IAVF_DOWN_PENDING); 3346 iavf_free_traffic_irqs(adapter); 3347 3348 mutex_unlock(&adapter->crit_lock); 3349 3350 /* We explicitly don't free resources here because the hardware is 3351 * still active and can DMA into memory. Resources are cleared in 3352 * iavf_virtchnl_completion() after we get confirmation from the PF 3353 * driver that the rings have been stopped. 3354 * 3355 * Also, we wait for state to transition to __IAVF_DOWN before 3356 * returning. State change occurs in iavf_virtchnl_completion() after 3357 * VF resources are released (which occurs after PF driver processes and 3358 * responds to admin queue commands). 3359 */ 3360 3361 status = wait_event_timeout(adapter->down_waitqueue, 3362 adapter->state == __IAVF_DOWN, 3363 msecs_to_jiffies(500)); 3364 if (!status) 3365 netdev_warn(netdev, "Device resources not yet released\n"); 3366 return 0; 3367 } 3368 3369 /** 3370 * iavf_change_mtu - Change the Maximum Transfer Unit 3371 * @netdev: network interface device structure 3372 * @new_mtu: new value for maximum frame size 3373 * 3374 * Returns 0 on success, negative on failure 3375 **/ 3376 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3377 { 3378 struct iavf_adapter *adapter = netdev_priv(netdev); 3379 3380 netdev->mtu = new_mtu; 3381 if (CLIENT_ENABLED(adapter)) { 3382 iavf_notify_client_l2_params(&adapter->vsi); 3383 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3384 } 3385 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3386 queue_work(iavf_wq, &adapter->reset_task); 3387 3388 return 0; 3389 } 3390 3391 /** 3392 * iavf_set_features - set the netdev feature flags 3393 * @netdev: ptr to the netdev being adjusted 3394 * @features: the feature set that the stack is suggesting 3395 * Note: expects to be called while under rtnl_lock() 3396 **/ 3397 static int iavf_set_features(struct net_device *netdev, 3398 netdev_features_t features) 3399 { 3400 struct iavf_adapter *adapter = netdev_priv(netdev); 3401 3402 /* Don't allow changing VLAN_RX flag when adapter is not capable 3403 * of VLAN offload 3404 */ 3405 if (!VLAN_ALLOWED(adapter)) { 3406 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3407 return -EINVAL; 3408 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3409 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3410 adapter->aq_required |= 3411 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3412 else 3413 adapter->aq_required |= 3414 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3415 } 3416 3417 return 0; 3418 } 3419 3420 /** 3421 * iavf_features_check - Validate encapsulated packet conforms to limits 3422 * @skb: skb buff 3423 * @dev: This physical port's netdev 3424 * @features: Offload features that the stack believes apply 3425 **/ 3426 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3427 struct net_device *dev, 3428 netdev_features_t features) 3429 { 3430 size_t len; 3431 3432 /* No point in doing any of this if neither checksum nor GSO are 3433 * being requested for this frame. We can rule out both by just 3434 * checking for CHECKSUM_PARTIAL 3435 */ 3436 if (skb->ip_summed != CHECKSUM_PARTIAL) 3437 return features; 3438 3439 /* We cannot support GSO if the MSS is going to be less than 3440 * 64 bytes. If it is then we need to drop support for GSO. 3441 */ 3442 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3443 features &= ~NETIF_F_GSO_MASK; 3444 3445 /* MACLEN can support at most 63 words */ 3446 len = skb_network_header(skb) - skb->data; 3447 if (len & ~(63 * 2)) 3448 goto out_err; 3449 3450 /* IPLEN and EIPLEN can support at most 127 dwords */ 3451 len = skb_transport_header(skb) - skb_network_header(skb); 3452 if (len & ~(127 * 4)) 3453 goto out_err; 3454 3455 if (skb->encapsulation) { 3456 /* L4TUNLEN can support 127 words */ 3457 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3458 if (len & ~(127 * 2)) 3459 goto out_err; 3460 3461 /* IPLEN can support at most 127 dwords */ 3462 len = skb_inner_transport_header(skb) - 3463 skb_inner_network_header(skb); 3464 if (len & ~(127 * 4)) 3465 goto out_err; 3466 } 3467 3468 /* No need to validate L4LEN as TCP is the only protocol with a 3469 * a flexible value and we support all possible values supported 3470 * by TCP, which is at most 15 dwords 3471 */ 3472 3473 return features; 3474 out_err: 3475 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3476 } 3477 3478 /** 3479 * iavf_fix_features - fix up the netdev feature bits 3480 * @netdev: our net device 3481 * @features: desired feature bits 3482 * 3483 * Returns fixed-up features bits 3484 **/ 3485 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3486 netdev_features_t features) 3487 { 3488 struct iavf_adapter *adapter = netdev_priv(netdev); 3489 3490 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3491 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3492 NETIF_F_HW_VLAN_CTAG_RX | 3493 NETIF_F_HW_VLAN_CTAG_FILTER); 3494 3495 return features; 3496 } 3497 3498 static const struct net_device_ops iavf_netdev_ops = { 3499 .ndo_open = iavf_open, 3500 .ndo_stop = iavf_close, 3501 .ndo_start_xmit = iavf_xmit_frame, 3502 .ndo_set_rx_mode = iavf_set_rx_mode, 3503 .ndo_validate_addr = eth_validate_addr, 3504 .ndo_set_mac_address = iavf_set_mac, 3505 .ndo_change_mtu = iavf_change_mtu, 3506 .ndo_tx_timeout = iavf_tx_timeout, 3507 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3508 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3509 .ndo_features_check = iavf_features_check, 3510 .ndo_fix_features = iavf_fix_features, 3511 .ndo_set_features = iavf_set_features, 3512 .ndo_setup_tc = iavf_setup_tc, 3513 }; 3514 3515 /** 3516 * iavf_check_reset_complete - check that VF reset is complete 3517 * @hw: pointer to hw struct 3518 * 3519 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3520 **/ 3521 static int iavf_check_reset_complete(struct iavf_hw *hw) 3522 { 3523 u32 rstat; 3524 int i; 3525 3526 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3527 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3528 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3529 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3530 (rstat == VIRTCHNL_VFR_COMPLETED)) 3531 return 0; 3532 usleep_range(10, 20); 3533 } 3534 return -EBUSY; 3535 } 3536 3537 /** 3538 * iavf_process_config - Process the config information we got from the PF 3539 * @adapter: board private structure 3540 * 3541 * Verify that we have a valid config struct, and set up our netdev features 3542 * and our VSI struct. 3543 **/ 3544 int iavf_process_config(struct iavf_adapter *adapter) 3545 { 3546 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3547 int i, num_req_queues = adapter->num_req_queues; 3548 struct net_device *netdev = adapter->netdev; 3549 struct iavf_vsi *vsi = &adapter->vsi; 3550 netdev_features_t hw_enc_features; 3551 netdev_features_t hw_features; 3552 3553 /* got VF config message back from PF, now we can parse it */ 3554 for (i = 0; i < vfres->num_vsis; i++) { 3555 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3556 adapter->vsi_res = &vfres->vsi_res[i]; 3557 } 3558 if (!adapter->vsi_res) { 3559 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3560 return -ENODEV; 3561 } 3562 3563 if (num_req_queues && 3564 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3565 /* Problem. The PF gave us fewer queues than what we had 3566 * negotiated in our request. Need a reset to see if we can't 3567 * get back to a working state. 3568 */ 3569 dev_err(&adapter->pdev->dev, 3570 "Requested %d queues, but PF only gave us %d.\n", 3571 num_req_queues, 3572 adapter->vsi_res->num_queue_pairs); 3573 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3574 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3575 iavf_schedule_reset(adapter); 3576 return -ENODEV; 3577 } 3578 adapter->num_req_queues = 0; 3579 3580 hw_enc_features = NETIF_F_SG | 3581 NETIF_F_IP_CSUM | 3582 NETIF_F_IPV6_CSUM | 3583 NETIF_F_HIGHDMA | 3584 NETIF_F_SOFT_FEATURES | 3585 NETIF_F_TSO | 3586 NETIF_F_TSO_ECN | 3587 NETIF_F_TSO6 | 3588 NETIF_F_SCTP_CRC | 3589 NETIF_F_RXHASH | 3590 NETIF_F_RXCSUM | 3591 0; 3592 3593 /* advertise to stack only if offloads for encapsulated packets is 3594 * supported 3595 */ 3596 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3597 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3598 NETIF_F_GSO_GRE | 3599 NETIF_F_GSO_GRE_CSUM | 3600 NETIF_F_GSO_IPXIP4 | 3601 NETIF_F_GSO_IPXIP6 | 3602 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3603 NETIF_F_GSO_PARTIAL | 3604 0; 3605 3606 if (!(vfres->vf_cap_flags & 3607 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3608 netdev->gso_partial_features |= 3609 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3610 3611 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3612 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3613 netdev->hw_enc_features |= hw_enc_features; 3614 } 3615 /* record features VLANs can make use of */ 3616 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3617 3618 /* Write features and hw_features separately to avoid polluting 3619 * with, or dropping, features that are set when we registered. 3620 */ 3621 hw_features = hw_enc_features; 3622 3623 /* Enable VLAN features if supported */ 3624 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3625 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3626 NETIF_F_HW_VLAN_CTAG_RX); 3627 /* Enable cloud filter if ADQ is supported */ 3628 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3629 hw_features |= NETIF_F_HW_TC; 3630 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3631 hw_features |= NETIF_F_GSO_UDP_L4; 3632 3633 netdev->hw_features |= hw_features; 3634 3635 netdev->features |= hw_features; 3636 3637 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3638 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3639 3640 netdev->priv_flags |= IFF_UNICAST_FLT; 3641 3642 /* Do not turn on offloads when they are requested to be turned off. 3643 * TSO needs minimum 576 bytes to work correctly. 3644 */ 3645 if (netdev->wanted_features) { 3646 if (!(netdev->wanted_features & NETIF_F_TSO) || 3647 netdev->mtu < 576) 3648 netdev->features &= ~NETIF_F_TSO; 3649 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3650 netdev->mtu < 576) 3651 netdev->features &= ~NETIF_F_TSO6; 3652 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3653 netdev->features &= ~NETIF_F_TSO_ECN; 3654 if (!(netdev->wanted_features & NETIF_F_GRO)) 3655 netdev->features &= ~NETIF_F_GRO; 3656 if (!(netdev->wanted_features & NETIF_F_GSO)) 3657 netdev->features &= ~NETIF_F_GSO; 3658 } 3659 3660 adapter->vsi.id = adapter->vsi_res->vsi_id; 3661 3662 adapter->vsi.back = adapter; 3663 adapter->vsi.base_vector = 1; 3664 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3665 vsi->netdev = adapter->netdev; 3666 vsi->qs_handle = adapter->vsi_res->qset_handle; 3667 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3668 adapter->rss_key_size = vfres->rss_key_size; 3669 adapter->rss_lut_size = vfres->rss_lut_size; 3670 } else { 3671 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3672 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3673 } 3674 3675 return 0; 3676 } 3677 3678 /** 3679 * iavf_shutdown - Shutdown the device in preparation for a reboot 3680 * @pdev: pci device structure 3681 **/ 3682 static void iavf_shutdown(struct pci_dev *pdev) 3683 { 3684 struct net_device *netdev = pci_get_drvdata(pdev); 3685 struct iavf_adapter *adapter = netdev_priv(netdev); 3686 3687 netif_device_detach(netdev); 3688 3689 if (netif_running(netdev)) 3690 iavf_close(netdev); 3691 3692 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3693 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 3694 /* Prevent the watchdog from running. */ 3695 iavf_change_state(adapter, __IAVF_REMOVE); 3696 adapter->aq_required = 0; 3697 mutex_unlock(&adapter->crit_lock); 3698 3699 #ifdef CONFIG_PM 3700 pci_save_state(pdev); 3701 3702 #endif 3703 pci_disable_device(pdev); 3704 } 3705 3706 /** 3707 * iavf_probe - Device Initialization Routine 3708 * @pdev: PCI device information struct 3709 * @ent: entry in iavf_pci_tbl 3710 * 3711 * Returns 0 on success, negative on failure 3712 * 3713 * iavf_probe initializes an adapter identified by a pci_dev structure. 3714 * The OS initialization, configuring of the adapter private structure, 3715 * and a hardware reset occur. 3716 **/ 3717 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3718 { 3719 struct net_device *netdev; 3720 struct iavf_adapter *adapter = NULL; 3721 struct iavf_hw *hw = NULL; 3722 int err; 3723 3724 err = pci_enable_device(pdev); 3725 if (err) 3726 return err; 3727 3728 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3729 if (err) { 3730 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3731 if (err) { 3732 dev_err(&pdev->dev, 3733 "DMA configuration failed: 0x%x\n", err); 3734 goto err_dma; 3735 } 3736 } 3737 3738 err = pci_request_regions(pdev, iavf_driver_name); 3739 if (err) { 3740 dev_err(&pdev->dev, 3741 "pci_request_regions failed 0x%x\n", err); 3742 goto err_pci_reg; 3743 } 3744 3745 pci_enable_pcie_error_reporting(pdev); 3746 3747 pci_set_master(pdev); 3748 3749 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3750 IAVF_MAX_REQ_QUEUES); 3751 if (!netdev) { 3752 err = -ENOMEM; 3753 goto err_alloc_etherdev; 3754 } 3755 3756 SET_NETDEV_DEV(netdev, &pdev->dev); 3757 3758 pci_set_drvdata(pdev, netdev); 3759 adapter = netdev_priv(netdev); 3760 3761 adapter->netdev = netdev; 3762 adapter->pdev = pdev; 3763 3764 hw = &adapter->hw; 3765 hw->back = adapter; 3766 3767 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3768 iavf_change_state(adapter, __IAVF_STARTUP); 3769 3770 /* Call save state here because it relies on the adapter struct. */ 3771 pci_save_state(pdev); 3772 3773 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3774 pci_resource_len(pdev, 0)); 3775 if (!hw->hw_addr) { 3776 err = -EIO; 3777 goto err_ioremap; 3778 } 3779 hw->vendor_id = pdev->vendor; 3780 hw->device_id = pdev->device; 3781 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3782 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3783 hw->subsystem_device_id = pdev->subsystem_device; 3784 hw->bus.device = PCI_SLOT(pdev->devfn); 3785 hw->bus.func = PCI_FUNC(pdev->devfn); 3786 hw->bus.bus_id = pdev->bus->number; 3787 3788 /* set up the locks for the AQ, do this only once in probe 3789 * and destroy them only once in remove 3790 */ 3791 mutex_init(&adapter->crit_lock); 3792 mutex_init(&adapter->client_lock); 3793 mutex_init(&adapter->remove_lock); 3794 mutex_init(&hw->aq.asq_mutex); 3795 mutex_init(&hw->aq.arq_mutex); 3796 3797 spin_lock_init(&adapter->mac_vlan_list_lock); 3798 spin_lock_init(&adapter->cloud_filter_list_lock); 3799 spin_lock_init(&adapter->fdir_fltr_lock); 3800 spin_lock_init(&adapter->adv_rss_lock); 3801 3802 INIT_LIST_HEAD(&adapter->mac_filter_list); 3803 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3804 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3805 INIT_LIST_HEAD(&adapter->fdir_list_head); 3806 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3807 3808 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3809 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3810 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3811 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3812 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 3813 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3814 3815 /* Setup the wait queue for indicating transition to down status */ 3816 init_waitqueue_head(&adapter->down_waitqueue); 3817 3818 return 0; 3819 3820 err_ioremap: 3821 free_netdev(netdev); 3822 err_alloc_etherdev: 3823 pci_disable_pcie_error_reporting(pdev); 3824 pci_release_regions(pdev); 3825 err_pci_reg: 3826 err_dma: 3827 pci_disable_device(pdev); 3828 return err; 3829 } 3830 3831 /** 3832 * iavf_suspend - Power management suspend routine 3833 * @dev_d: device info pointer 3834 * 3835 * Called when the system (VM) is entering sleep/suspend. 3836 **/ 3837 static int __maybe_unused iavf_suspend(struct device *dev_d) 3838 { 3839 struct net_device *netdev = dev_get_drvdata(dev_d); 3840 struct iavf_adapter *adapter = netdev_priv(netdev); 3841 3842 netif_device_detach(netdev); 3843 3844 while (!mutex_trylock(&adapter->crit_lock)) 3845 usleep_range(500, 1000); 3846 3847 if (netif_running(netdev)) { 3848 rtnl_lock(); 3849 iavf_down(adapter); 3850 rtnl_unlock(); 3851 } 3852 iavf_free_misc_irq(adapter); 3853 iavf_reset_interrupt_capability(adapter); 3854 3855 mutex_unlock(&adapter->crit_lock); 3856 3857 return 0; 3858 } 3859 3860 /** 3861 * iavf_resume - Power management resume routine 3862 * @dev_d: device info pointer 3863 * 3864 * Called when the system (VM) is resumed from sleep/suspend. 3865 **/ 3866 static int __maybe_unused iavf_resume(struct device *dev_d) 3867 { 3868 struct pci_dev *pdev = to_pci_dev(dev_d); 3869 struct net_device *netdev = pci_get_drvdata(pdev); 3870 struct iavf_adapter *adapter = netdev_priv(netdev); 3871 u32 err; 3872 3873 pci_set_master(pdev); 3874 3875 rtnl_lock(); 3876 err = iavf_set_interrupt_capability(adapter); 3877 if (err) { 3878 rtnl_unlock(); 3879 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3880 return err; 3881 } 3882 err = iavf_request_misc_irq(adapter); 3883 rtnl_unlock(); 3884 if (err) { 3885 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3886 return err; 3887 } 3888 3889 queue_work(iavf_wq, &adapter->reset_task); 3890 3891 netif_device_attach(netdev); 3892 3893 return err; 3894 } 3895 3896 /** 3897 * iavf_remove - Device Removal Routine 3898 * @pdev: PCI device information struct 3899 * 3900 * iavf_remove is called by the PCI subsystem to alert the driver 3901 * that it should release a PCI device. The could be caused by a 3902 * Hot-Plug event, or because the driver is going to be removed from 3903 * memory. 3904 **/ 3905 static void iavf_remove(struct pci_dev *pdev) 3906 { 3907 struct net_device *netdev = pci_get_drvdata(pdev); 3908 struct iavf_adapter *adapter = netdev_priv(netdev); 3909 struct iavf_fdir_fltr *fdir, *fdirtmp; 3910 struct iavf_vlan_filter *vlf, *vlftmp; 3911 struct iavf_adv_rss *rss, *rsstmp; 3912 struct iavf_mac_filter *f, *ftmp; 3913 struct iavf_cloud_filter *cf, *cftmp; 3914 struct iavf_hw *hw = &adapter->hw; 3915 int err; 3916 /* Indicate we are in remove and not to run reset_task */ 3917 mutex_lock(&adapter->remove_lock); 3918 cancel_work_sync(&adapter->reset_task); 3919 cancel_delayed_work_sync(&adapter->watchdog_task); 3920 cancel_delayed_work_sync(&adapter->client_task); 3921 if (adapter->netdev_registered) { 3922 unregister_netdev(netdev); 3923 adapter->netdev_registered = false; 3924 } 3925 if (CLIENT_ALLOWED(adapter)) { 3926 err = iavf_lan_del_device(adapter); 3927 if (err) 3928 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3929 err); 3930 } 3931 3932 iavf_request_reset(adapter); 3933 msleep(50); 3934 /* If the FW isn't responding, kick it once, but only once. */ 3935 if (!iavf_asq_done(hw)) { 3936 iavf_request_reset(adapter); 3937 msleep(50); 3938 } 3939 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 3940 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); 3941 3942 /* Shut down all the garbage mashers on the detention level */ 3943 iavf_change_state(adapter, __IAVF_REMOVE); 3944 adapter->aq_required = 0; 3945 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3946 iavf_free_all_tx_resources(adapter); 3947 iavf_free_all_rx_resources(adapter); 3948 iavf_misc_irq_disable(adapter); 3949 iavf_free_misc_irq(adapter); 3950 iavf_reset_interrupt_capability(adapter); 3951 iavf_free_q_vectors(adapter); 3952 3953 cancel_delayed_work_sync(&adapter->watchdog_task); 3954 3955 cancel_work_sync(&adapter->adminq_task); 3956 3957 iavf_free_rss(adapter); 3958 3959 if (hw->aq.asq.count) 3960 iavf_shutdown_adminq(hw); 3961 3962 /* destroy the locks only once, here */ 3963 mutex_destroy(&hw->aq.arq_mutex); 3964 mutex_destroy(&hw->aq.asq_mutex); 3965 mutex_destroy(&adapter->client_lock); 3966 mutex_unlock(&adapter->crit_lock); 3967 mutex_destroy(&adapter->crit_lock); 3968 mutex_unlock(&adapter->remove_lock); 3969 mutex_destroy(&adapter->remove_lock); 3970 3971 iounmap(hw->hw_addr); 3972 pci_release_regions(pdev); 3973 iavf_free_queues(adapter); 3974 kfree(adapter->vf_res); 3975 spin_lock_bh(&adapter->mac_vlan_list_lock); 3976 /* If we got removed before an up/down sequence, we've got a filter 3977 * hanging out there that we need to get rid of. 3978 */ 3979 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3980 list_del(&f->list); 3981 kfree(f); 3982 } 3983 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 3984 list) { 3985 list_del(&vlf->list); 3986 kfree(vlf); 3987 } 3988 3989 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3990 3991 spin_lock_bh(&adapter->cloud_filter_list_lock); 3992 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 3993 list_del(&cf->list); 3994 kfree(cf); 3995 } 3996 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3997 3998 spin_lock_bh(&adapter->fdir_fltr_lock); 3999 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 4000 list_del(&fdir->list); 4001 kfree(fdir); 4002 } 4003 spin_unlock_bh(&adapter->fdir_fltr_lock); 4004 4005 spin_lock_bh(&adapter->adv_rss_lock); 4006 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 4007 list) { 4008 list_del(&rss->list); 4009 kfree(rss); 4010 } 4011 spin_unlock_bh(&adapter->adv_rss_lock); 4012 4013 free_netdev(netdev); 4014 4015 pci_disable_pcie_error_reporting(pdev); 4016 4017 pci_disable_device(pdev); 4018 } 4019 4020 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 4021 4022 static struct pci_driver iavf_driver = { 4023 .name = iavf_driver_name, 4024 .id_table = iavf_pci_tbl, 4025 .probe = iavf_probe, 4026 .remove = iavf_remove, 4027 .driver.pm = &iavf_pm_ops, 4028 .shutdown = iavf_shutdown, 4029 }; 4030 4031 /** 4032 * iavf_init_module - Driver Registration Routine 4033 * 4034 * iavf_init_module is the first routine called when the driver is 4035 * loaded. All it does is register with the PCI subsystem. 4036 **/ 4037 static int __init iavf_init_module(void) 4038 { 4039 int ret; 4040 4041 pr_info("iavf: %s\n", iavf_driver_string); 4042 4043 pr_info("%s\n", iavf_copyright); 4044 4045 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4046 iavf_driver_name); 4047 if (!iavf_wq) { 4048 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4049 return -ENOMEM; 4050 } 4051 ret = pci_register_driver(&iavf_driver); 4052 return ret; 4053 } 4054 4055 module_init(iavf_init_module); 4056 4057 /** 4058 * iavf_exit_module - Driver Exit Cleanup Routine 4059 * 4060 * iavf_exit_module is called just before the driver is removed 4061 * from memory. 4062 **/ 4063 static void __exit iavf_exit_module(void) 4064 { 4065 pci_unregister_driver(&iavf_driver); 4066 destroy_workqueue(iavf_wq); 4067 } 4068 4069 module_exit(iavf_exit_module); 4070 4071 /* iavf_main.c */ 4072