1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static int iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 56 * @hw: pointer to the HW structure 57 * @mem: ptr to mem struct to fill out 58 * @size: size of memory requested 59 * @alignment: what to align the allocation to 60 **/ 61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 62 struct iavf_dma_mem *mem, 63 u64 size, u32 alignment) 64 { 65 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 66 67 if (!mem) 68 return IAVF_ERR_PARAM; 69 70 mem->size = ALIGN(size, alignment); 71 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 72 (dma_addr_t *)&mem->pa, GFP_KERNEL); 73 if (mem->va) 74 return 0; 75 else 76 return IAVF_ERR_NO_MEMORY; 77 } 78 79 /** 80 * iavf_free_dma_mem_d - OS specific memory free for shared code 81 * @hw: pointer to the HW structure 82 * @mem: ptr to mem struct to free 83 **/ 84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 85 struct iavf_dma_mem *mem) 86 { 87 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 88 89 if (!mem || !mem->va) 90 return IAVF_ERR_PARAM; 91 dma_free_coherent(&adapter->pdev->dev, mem->size, 92 mem->va, (dma_addr_t)mem->pa); 93 return 0; 94 } 95 96 /** 97 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 98 * @hw: pointer to the HW structure 99 * @mem: ptr to mem struct to fill out 100 * @size: size of memory requested 101 **/ 102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 103 struct iavf_virt_mem *mem, u32 size) 104 { 105 if (!mem) 106 return IAVF_ERR_PARAM; 107 108 mem->size = size; 109 mem->va = kzalloc(size, GFP_KERNEL); 110 111 if (mem->va) 112 return 0; 113 else 114 return IAVF_ERR_NO_MEMORY; 115 } 116 117 /** 118 * iavf_free_virt_mem_d - OS specific memory free for shared code 119 * @hw: pointer to the HW structure 120 * @mem: ptr to mem struct to free 121 **/ 122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 123 struct iavf_virt_mem *mem) 124 { 125 if (!mem) 126 return IAVF_ERR_PARAM; 127 128 /* it's ok to kfree a NULL pointer */ 129 kfree(mem->va); 130 131 return 0; 132 } 133 134 /** 135 * iavf_schedule_reset - Set the flags and schedule a reset event 136 * @adapter: board private structure 137 **/ 138 void iavf_schedule_reset(struct iavf_adapter *adapter) 139 { 140 if (!(adapter->flags & 141 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 142 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 143 queue_work(iavf_wq, &adapter->reset_task); 144 } 145 } 146 147 /** 148 * iavf_tx_timeout - Respond to a Tx Hang 149 * @netdev: network interface device structure 150 * @txqueue: queue number that is timing out 151 **/ 152 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 153 { 154 struct iavf_adapter *adapter = netdev_priv(netdev); 155 156 adapter->tx_timeout_count++; 157 iavf_schedule_reset(adapter); 158 } 159 160 /** 161 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 162 * @adapter: board private structure 163 **/ 164 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 165 { 166 struct iavf_hw *hw = &adapter->hw; 167 168 if (!adapter->msix_entries) 169 return; 170 171 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 172 173 iavf_flush(hw); 174 175 synchronize_irq(adapter->msix_entries[0].vector); 176 } 177 178 /** 179 * iavf_misc_irq_enable - Enable default interrupt generation settings 180 * @adapter: board private structure 181 **/ 182 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 183 { 184 struct iavf_hw *hw = &adapter->hw; 185 186 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 187 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 188 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 189 190 iavf_flush(hw); 191 } 192 193 /** 194 * iavf_irq_disable - Mask off interrupt generation on the NIC 195 * @adapter: board private structure 196 **/ 197 static void iavf_irq_disable(struct iavf_adapter *adapter) 198 { 199 int i; 200 struct iavf_hw *hw = &adapter->hw; 201 202 if (!adapter->msix_entries) 203 return; 204 205 for (i = 1; i < adapter->num_msix_vectors; i++) { 206 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 207 synchronize_irq(adapter->msix_entries[i].vector); 208 } 209 iavf_flush(hw); 210 } 211 212 /** 213 * iavf_irq_enable_queues - Enable interrupt for specified queues 214 * @adapter: board private structure 215 * @mask: bitmap of queues to enable 216 **/ 217 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 218 { 219 struct iavf_hw *hw = &adapter->hw; 220 int i; 221 222 for (i = 1; i < adapter->num_msix_vectors; i++) { 223 if (mask & BIT(i - 1)) { 224 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 225 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 226 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 227 } 228 } 229 } 230 231 /** 232 * iavf_irq_enable - Enable default interrupt generation settings 233 * @adapter: board private structure 234 * @flush: boolean value whether to run rd32() 235 **/ 236 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 237 { 238 struct iavf_hw *hw = &adapter->hw; 239 240 iavf_misc_irq_enable(adapter); 241 iavf_irq_enable_queues(adapter, ~0); 242 243 if (flush) 244 iavf_flush(hw); 245 } 246 247 /** 248 * iavf_msix_aq - Interrupt handler for vector 0 249 * @irq: interrupt number 250 * @data: pointer to netdev 251 **/ 252 static irqreturn_t iavf_msix_aq(int irq, void *data) 253 { 254 struct net_device *netdev = data; 255 struct iavf_adapter *adapter = netdev_priv(netdev); 256 struct iavf_hw *hw = &adapter->hw; 257 258 /* handle non-queue interrupts, these reads clear the registers */ 259 rd32(hw, IAVF_VFINT_ICR01); 260 rd32(hw, IAVF_VFINT_ICR0_ENA1); 261 262 /* schedule work on the private workqueue */ 263 queue_work(iavf_wq, &adapter->adminq_task); 264 265 return IRQ_HANDLED; 266 } 267 268 /** 269 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 270 * @irq: interrupt number 271 * @data: pointer to a q_vector 272 **/ 273 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 274 { 275 struct iavf_q_vector *q_vector = data; 276 277 if (!q_vector->tx.ring && !q_vector->rx.ring) 278 return IRQ_HANDLED; 279 280 napi_schedule_irqoff(&q_vector->napi); 281 282 return IRQ_HANDLED; 283 } 284 285 /** 286 * iavf_map_vector_to_rxq - associate irqs with rx queues 287 * @adapter: board private structure 288 * @v_idx: interrupt number 289 * @r_idx: queue number 290 **/ 291 static void 292 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 293 { 294 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 295 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 296 struct iavf_hw *hw = &adapter->hw; 297 298 rx_ring->q_vector = q_vector; 299 rx_ring->next = q_vector->rx.ring; 300 rx_ring->vsi = &adapter->vsi; 301 q_vector->rx.ring = rx_ring; 302 q_vector->rx.count++; 303 q_vector->rx.next_update = jiffies + 1; 304 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 305 q_vector->ring_mask |= BIT(r_idx); 306 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 307 q_vector->rx.current_itr >> 1); 308 q_vector->rx.current_itr = q_vector->rx.target_itr; 309 } 310 311 /** 312 * iavf_map_vector_to_txq - associate irqs with tx queues 313 * @adapter: board private structure 314 * @v_idx: interrupt number 315 * @t_idx: queue number 316 **/ 317 static void 318 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 319 { 320 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 321 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 322 struct iavf_hw *hw = &adapter->hw; 323 324 tx_ring->q_vector = q_vector; 325 tx_ring->next = q_vector->tx.ring; 326 tx_ring->vsi = &adapter->vsi; 327 q_vector->tx.ring = tx_ring; 328 q_vector->tx.count++; 329 q_vector->tx.next_update = jiffies + 1; 330 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 331 q_vector->num_ringpairs++; 332 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 333 q_vector->tx.target_itr >> 1); 334 q_vector->tx.current_itr = q_vector->tx.target_itr; 335 } 336 337 /** 338 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 339 * @adapter: board private structure to initialize 340 * 341 * This function maps descriptor rings to the queue-specific vectors 342 * we were allotted through the MSI-X enabling code. Ideally, we'd have 343 * one vector per ring/queue, but on a constrained vector budget, we 344 * group the rings as "efficiently" as possible. You would add new 345 * mapping configurations in here. 346 **/ 347 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 348 { 349 int rings_remaining = adapter->num_active_queues; 350 int ridx = 0, vidx = 0; 351 int q_vectors; 352 353 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 354 355 for (; ridx < rings_remaining; ridx++) { 356 iavf_map_vector_to_rxq(adapter, vidx, ridx); 357 iavf_map_vector_to_txq(adapter, vidx, ridx); 358 359 /* In the case where we have more queues than vectors, continue 360 * round-robin on vectors until all queues are mapped. 361 */ 362 if (++vidx >= q_vectors) 363 vidx = 0; 364 } 365 366 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 367 } 368 369 /** 370 * iavf_irq_affinity_notify - Callback for affinity changes 371 * @notify: context as to what irq was changed 372 * @mask: the new affinity mask 373 * 374 * This is a callback function used by the irq_set_affinity_notifier function 375 * so that we may register to receive changes to the irq affinity masks. 376 **/ 377 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 378 const cpumask_t *mask) 379 { 380 struct iavf_q_vector *q_vector = 381 container_of(notify, struct iavf_q_vector, affinity_notify); 382 383 cpumask_copy(&q_vector->affinity_mask, mask); 384 } 385 386 /** 387 * iavf_irq_affinity_release - Callback for affinity notifier release 388 * @ref: internal core kernel usage 389 * 390 * This is a callback function used by the irq_set_affinity_notifier function 391 * to inform the current notification subscriber that they will no longer 392 * receive notifications. 393 **/ 394 static void iavf_irq_affinity_release(struct kref *ref) {} 395 396 /** 397 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 398 * @adapter: board private structure 399 * @basename: device basename 400 * 401 * Allocates MSI-X vectors for tx and rx handling, and requests 402 * interrupts from the kernel. 403 **/ 404 static int 405 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 406 { 407 unsigned int vector, q_vectors; 408 unsigned int rx_int_idx = 0, tx_int_idx = 0; 409 int irq_num, err; 410 int cpu; 411 412 iavf_irq_disable(adapter); 413 /* Decrement for Other and TCP Timer vectors */ 414 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 415 416 for (vector = 0; vector < q_vectors; vector++) { 417 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 418 419 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 420 421 if (q_vector->tx.ring && q_vector->rx.ring) { 422 snprintf(q_vector->name, sizeof(q_vector->name), 423 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 424 tx_int_idx++; 425 } else if (q_vector->rx.ring) { 426 snprintf(q_vector->name, sizeof(q_vector->name), 427 "iavf-%s-rx-%d", basename, rx_int_idx++); 428 } else if (q_vector->tx.ring) { 429 snprintf(q_vector->name, sizeof(q_vector->name), 430 "iavf-%s-tx-%d", basename, tx_int_idx++); 431 } else { 432 /* skip this unused q_vector */ 433 continue; 434 } 435 err = request_irq(irq_num, 436 iavf_msix_clean_rings, 437 0, 438 q_vector->name, 439 q_vector); 440 if (err) { 441 dev_info(&adapter->pdev->dev, 442 "Request_irq failed, error: %d\n", err); 443 goto free_queue_irqs; 444 } 445 /* register for affinity change notifications */ 446 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 447 q_vector->affinity_notify.release = 448 iavf_irq_affinity_release; 449 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 450 /* Spread the IRQ affinity hints across online CPUs. Note that 451 * get_cpu_mask returns a mask with a permanent lifetime so 452 * it's safe to use as a hint for irq_set_affinity_hint. 453 */ 454 cpu = cpumask_local_spread(q_vector->v_idx, -1); 455 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 456 } 457 458 return 0; 459 460 free_queue_irqs: 461 while (vector) { 462 vector--; 463 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 464 irq_set_affinity_notifier(irq_num, NULL); 465 irq_set_affinity_hint(irq_num, NULL); 466 free_irq(irq_num, &adapter->q_vectors[vector]); 467 } 468 return err; 469 } 470 471 /** 472 * iavf_request_misc_irq - Initialize MSI-X interrupts 473 * @adapter: board private structure 474 * 475 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 476 * vector is only for the admin queue, and stays active even when the netdev 477 * is closed. 478 **/ 479 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 480 { 481 struct net_device *netdev = adapter->netdev; 482 int err; 483 484 snprintf(adapter->misc_vector_name, 485 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 486 dev_name(&adapter->pdev->dev)); 487 err = request_irq(adapter->msix_entries[0].vector, 488 &iavf_msix_aq, 0, 489 adapter->misc_vector_name, netdev); 490 if (err) { 491 dev_err(&adapter->pdev->dev, 492 "request_irq for %s failed: %d\n", 493 adapter->misc_vector_name, err); 494 free_irq(adapter->msix_entries[0].vector, netdev); 495 } 496 return err; 497 } 498 499 /** 500 * iavf_free_traffic_irqs - Free MSI-X interrupts 501 * @adapter: board private structure 502 * 503 * Frees all MSI-X vectors other than 0. 504 **/ 505 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 506 { 507 int vector, irq_num, q_vectors; 508 509 if (!adapter->msix_entries) 510 return; 511 512 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 513 514 for (vector = 0; vector < q_vectors; vector++) { 515 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 516 irq_set_affinity_notifier(irq_num, NULL); 517 irq_set_affinity_hint(irq_num, NULL); 518 free_irq(irq_num, &adapter->q_vectors[vector]); 519 } 520 } 521 522 /** 523 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 524 * @adapter: board private structure 525 * 526 * Frees MSI-X vector 0. 527 **/ 528 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 529 { 530 struct net_device *netdev = adapter->netdev; 531 532 if (!adapter->msix_entries) 533 return; 534 535 free_irq(adapter->msix_entries[0].vector, netdev); 536 } 537 538 /** 539 * iavf_configure_tx - Configure Transmit Unit after Reset 540 * @adapter: board private structure 541 * 542 * Configure the Tx unit of the MAC after a reset. 543 **/ 544 static void iavf_configure_tx(struct iavf_adapter *adapter) 545 { 546 struct iavf_hw *hw = &adapter->hw; 547 int i; 548 549 for (i = 0; i < adapter->num_active_queues; i++) 550 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 551 } 552 553 /** 554 * iavf_configure_rx - Configure Receive Unit after Reset 555 * @adapter: board private structure 556 * 557 * Configure the Rx unit of the MAC after a reset. 558 **/ 559 static void iavf_configure_rx(struct iavf_adapter *adapter) 560 { 561 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 562 struct iavf_hw *hw = &adapter->hw; 563 int i; 564 565 /* Legacy Rx will always default to a 2048 buffer size. */ 566 #if (PAGE_SIZE < 8192) 567 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 568 struct net_device *netdev = adapter->netdev; 569 570 /* For jumbo frames on systems with 4K pages we have to use 571 * an order 1 page, so we might as well increase the size 572 * of our Rx buffer to make better use of the available space 573 */ 574 rx_buf_len = IAVF_RXBUFFER_3072; 575 576 /* We use a 1536 buffer size for configurations with 577 * standard Ethernet mtu. On x86 this gives us enough room 578 * for shared info and 192 bytes of padding. 579 */ 580 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 581 (netdev->mtu <= ETH_DATA_LEN)) 582 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 583 } 584 #endif 585 586 for (i = 0; i < adapter->num_active_queues; i++) { 587 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 588 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 589 590 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 591 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 592 else 593 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 594 } 595 } 596 597 /** 598 * iavf_find_vlan - Search filter list for specific vlan filter 599 * @adapter: board private structure 600 * @vlan: vlan tag 601 * 602 * Returns ptr to the filter object or NULL. Must be called while holding the 603 * mac_vlan_list_lock. 604 **/ 605 static struct 606 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 607 { 608 struct iavf_vlan_filter *f; 609 610 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 611 if (vlan == f->vlan) 612 return f; 613 } 614 return NULL; 615 } 616 617 /** 618 * iavf_add_vlan - Add a vlan filter to the list 619 * @adapter: board private structure 620 * @vlan: VLAN tag 621 * 622 * Returns ptr to the filter object or NULL when no memory available. 623 **/ 624 static struct 625 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 626 { 627 struct iavf_vlan_filter *f = NULL; 628 629 spin_lock_bh(&adapter->mac_vlan_list_lock); 630 631 f = iavf_find_vlan(adapter, vlan); 632 if (!f) { 633 f = kzalloc(sizeof(*f), GFP_ATOMIC); 634 if (!f) 635 goto clearout; 636 637 f->vlan = vlan; 638 639 list_add_tail(&f->list, &adapter->vlan_filter_list); 640 f->add = true; 641 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 642 } 643 644 clearout: 645 spin_unlock_bh(&adapter->mac_vlan_list_lock); 646 return f; 647 } 648 649 /** 650 * iavf_del_vlan - Remove a vlan filter from the list 651 * @adapter: board private structure 652 * @vlan: VLAN tag 653 **/ 654 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 655 { 656 struct iavf_vlan_filter *f; 657 658 spin_lock_bh(&adapter->mac_vlan_list_lock); 659 660 f = iavf_find_vlan(adapter, vlan); 661 if (f) { 662 f->remove = true; 663 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 664 } 665 666 spin_unlock_bh(&adapter->mac_vlan_list_lock); 667 } 668 669 /** 670 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 671 * @netdev: network device struct 672 * @proto: unused protocol data 673 * @vid: VLAN tag 674 **/ 675 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 676 __always_unused __be16 proto, u16 vid) 677 { 678 struct iavf_adapter *adapter = netdev_priv(netdev); 679 680 if (!VLAN_ALLOWED(adapter)) 681 return -EIO; 682 if (iavf_add_vlan(adapter, vid) == NULL) 683 return -ENOMEM; 684 return 0; 685 } 686 687 /** 688 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 689 * @netdev: network device struct 690 * @proto: unused protocol data 691 * @vid: VLAN tag 692 **/ 693 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 694 __always_unused __be16 proto, u16 vid) 695 { 696 struct iavf_adapter *adapter = netdev_priv(netdev); 697 698 if (VLAN_ALLOWED(adapter)) { 699 iavf_del_vlan(adapter, vid); 700 return 0; 701 } 702 return -EIO; 703 } 704 705 /** 706 * iavf_find_filter - Search filter list for specific mac filter 707 * @adapter: board private structure 708 * @macaddr: the MAC address 709 * 710 * Returns ptr to the filter object or NULL. Must be called while holding the 711 * mac_vlan_list_lock. 712 **/ 713 static struct 714 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 715 const u8 *macaddr) 716 { 717 struct iavf_mac_filter *f; 718 719 if (!macaddr) 720 return NULL; 721 722 list_for_each_entry(f, &adapter->mac_filter_list, list) { 723 if (ether_addr_equal(macaddr, f->macaddr)) 724 return f; 725 } 726 return NULL; 727 } 728 729 /** 730 * iavf_add_filter - Add a mac filter to the filter list 731 * @adapter: board private structure 732 * @macaddr: the MAC address 733 * 734 * Returns ptr to the filter object or NULL when no memory available. 735 **/ 736 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 737 const u8 *macaddr) 738 { 739 struct iavf_mac_filter *f; 740 741 if (!macaddr) 742 return NULL; 743 744 f = iavf_find_filter(adapter, macaddr); 745 if (!f) { 746 f = kzalloc(sizeof(*f), GFP_ATOMIC); 747 if (!f) 748 return f; 749 750 ether_addr_copy(f->macaddr, macaddr); 751 752 list_add_tail(&f->list, &adapter->mac_filter_list); 753 f->add = true; 754 f->is_new_mac = true; 755 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 756 } else { 757 f->remove = false; 758 } 759 760 return f; 761 } 762 763 /** 764 * iavf_set_mac - NDO callback to set port mac address 765 * @netdev: network interface device structure 766 * @p: pointer to an address structure 767 * 768 * Returns 0 on success, negative on failure 769 **/ 770 static int iavf_set_mac(struct net_device *netdev, void *p) 771 { 772 struct iavf_adapter *adapter = netdev_priv(netdev); 773 struct iavf_hw *hw = &adapter->hw; 774 struct iavf_mac_filter *f; 775 struct sockaddr *addr = p; 776 777 if (!is_valid_ether_addr(addr->sa_data)) 778 return -EADDRNOTAVAIL; 779 780 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 781 return 0; 782 783 spin_lock_bh(&adapter->mac_vlan_list_lock); 784 785 f = iavf_find_filter(adapter, hw->mac.addr); 786 if (f) { 787 f->remove = true; 788 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 789 } 790 791 f = iavf_add_filter(adapter, addr->sa_data); 792 793 spin_unlock_bh(&adapter->mac_vlan_list_lock); 794 795 if (f) { 796 ether_addr_copy(hw->mac.addr, addr->sa_data); 797 } 798 799 return (f == NULL) ? -ENOMEM : 0; 800 } 801 802 /** 803 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 804 * @netdev: the netdevice 805 * @addr: address to add 806 * 807 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 808 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 809 */ 810 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 811 { 812 struct iavf_adapter *adapter = netdev_priv(netdev); 813 814 if (iavf_add_filter(adapter, addr)) 815 return 0; 816 else 817 return -ENOMEM; 818 } 819 820 /** 821 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 822 * @netdev: the netdevice 823 * @addr: address to add 824 * 825 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 826 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 827 */ 828 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 829 { 830 struct iavf_adapter *adapter = netdev_priv(netdev); 831 struct iavf_mac_filter *f; 832 833 /* Under some circumstances, we might receive a request to delete 834 * our own device address from our uc list. Because we store the 835 * device address in the VSI's MAC/VLAN filter list, we need to ignore 836 * such requests and not delete our device address from this list. 837 */ 838 if (ether_addr_equal(addr, netdev->dev_addr)) 839 return 0; 840 841 f = iavf_find_filter(adapter, addr); 842 if (f) { 843 f->remove = true; 844 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 845 } 846 return 0; 847 } 848 849 /** 850 * iavf_set_rx_mode - NDO callback to set the netdev filters 851 * @netdev: network interface device structure 852 **/ 853 static void iavf_set_rx_mode(struct net_device *netdev) 854 { 855 struct iavf_adapter *adapter = netdev_priv(netdev); 856 857 spin_lock_bh(&adapter->mac_vlan_list_lock); 858 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 859 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 860 spin_unlock_bh(&adapter->mac_vlan_list_lock); 861 862 if (netdev->flags & IFF_PROMISC && 863 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 864 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 865 else if (!(netdev->flags & IFF_PROMISC) && 866 adapter->flags & IAVF_FLAG_PROMISC_ON) 867 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 868 869 if (netdev->flags & IFF_ALLMULTI && 870 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 871 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 872 else if (!(netdev->flags & IFF_ALLMULTI) && 873 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 874 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 875 } 876 877 /** 878 * iavf_napi_enable_all - enable NAPI on all queue vectors 879 * @adapter: board private structure 880 **/ 881 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 882 { 883 int q_idx; 884 struct iavf_q_vector *q_vector; 885 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 886 887 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 888 struct napi_struct *napi; 889 890 q_vector = &adapter->q_vectors[q_idx]; 891 napi = &q_vector->napi; 892 napi_enable(napi); 893 } 894 } 895 896 /** 897 * iavf_napi_disable_all - disable NAPI on all queue vectors 898 * @adapter: board private structure 899 **/ 900 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 901 { 902 int q_idx; 903 struct iavf_q_vector *q_vector; 904 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 905 906 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 907 q_vector = &adapter->q_vectors[q_idx]; 908 napi_disable(&q_vector->napi); 909 } 910 } 911 912 /** 913 * iavf_configure - set up transmit and receive data structures 914 * @adapter: board private structure 915 **/ 916 static void iavf_configure(struct iavf_adapter *adapter) 917 { 918 struct net_device *netdev = adapter->netdev; 919 int i; 920 921 iavf_set_rx_mode(netdev); 922 923 iavf_configure_tx(adapter); 924 iavf_configure_rx(adapter); 925 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 926 927 for (i = 0; i < adapter->num_active_queues; i++) { 928 struct iavf_ring *ring = &adapter->rx_rings[i]; 929 930 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 931 } 932 } 933 934 /** 935 * iavf_up_complete - Finish the last steps of bringing up a connection 936 * @adapter: board private structure 937 * 938 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 939 **/ 940 static void iavf_up_complete(struct iavf_adapter *adapter) 941 { 942 adapter->state = __IAVF_RUNNING; 943 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 944 945 iavf_napi_enable_all(adapter); 946 947 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 948 if (CLIENT_ENABLED(adapter)) 949 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 950 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 951 } 952 953 /** 954 * iavf_down - Shutdown the connection processing 955 * @adapter: board private structure 956 * 957 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 958 **/ 959 void iavf_down(struct iavf_adapter *adapter) 960 { 961 struct net_device *netdev = adapter->netdev; 962 struct iavf_vlan_filter *vlf; 963 struct iavf_cloud_filter *cf; 964 struct iavf_fdir_fltr *fdir; 965 struct iavf_mac_filter *f; 966 struct iavf_adv_rss *rss; 967 968 if (adapter->state <= __IAVF_DOWN_PENDING) 969 return; 970 971 netif_carrier_off(netdev); 972 netif_tx_disable(netdev); 973 adapter->link_up = false; 974 iavf_napi_disable_all(adapter); 975 iavf_irq_disable(adapter); 976 977 spin_lock_bh(&adapter->mac_vlan_list_lock); 978 979 /* clear the sync flag on all filters */ 980 __dev_uc_unsync(adapter->netdev, NULL); 981 __dev_mc_unsync(adapter->netdev, NULL); 982 983 /* remove all MAC filters */ 984 list_for_each_entry(f, &adapter->mac_filter_list, list) { 985 f->remove = true; 986 } 987 988 /* remove all VLAN filters */ 989 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 990 vlf->remove = true; 991 } 992 993 spin_unlock_bh(&adapter->mac_vlan_list_lock); 994 995 /* remove all cloud filters */ 996 spin_lock_bh(&adapter->cloud_filter_list_lock); 997 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 998 cf->del = true; 999 } 1000 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1001 1002 /* remove all Flow Director filters */ 1003 spin_lock_bh(&adapter->fdir_fltr_lock); 1004 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1005 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1006 } 1007 spin_unlock_bh(&adapter->fdir_fltr_lock); 1008 1009 /* remove all advance RSS configuration */ 1010 spin_lock_bh(&adapter->adv_rss_lock); 1011 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1012 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1013 spin_unlock_bh(&adapter->adv_rss_lock); 1014 1015 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1016 adapter->state != __IAVF_RESETTING) { 1017 /* cancel any current operation */ 1018 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1019 /* Schedule operations to close down the HW. Don't wait 1020 * here for this to complete. The watchdog is still running 1021 * and it will take care of this. 1022 */ 1023 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1024 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1025 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1026 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1027 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1028 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1029 } 1030 1031 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1032 } 1033 1034 /** 1035 * iavf_acquire_msix_vectors - Setup the MSIX capability 1036 * @adapter: board private structure 1037 * @vectors: number of vectors to request 1038 * 1039 * Work with the OS to set up the MSIX vectors needed. 1040 * 1041 * Returns 0 on success, negative on failure 1042 **/ 1043 static int 1044 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1045 { 1046 int err, vector_threshold; 1047 1048 /* We'll want at least 3 (vector_threshold): 1049 * 0) Other (Admin Queue and link, mostly) 1050 * 1) TxQ[0] Cleanup 1051 * 2) RxQ[0] Cleanup 1052 */ 1053 vector_threshold = MIN_MSIX_COUNT; 1054 1055 /* The more we get, the more we will assign to Tx/Rx Cleanup 1056 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1057 * Right now, we simply care about how many we'll get; we'll 1058 * set them up later while requesting irq's. 1059 */ 1060 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1061 vector_threshold, vectors); 1062 if (err < 0) { 1063 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1064 kfree(adapter->msix_entries); 1065 adapter->msix_entries = NULL; 1066 return err; 1067 } 1068 1069 /* Adjust for only the vectors we'll use, which is minimum 1070 * of max_msix_q_vectors + NONQ_VECS, or the number of 1071 * vectors we were allocated. 1072 */ 1073 adapter->num_msix_vectors = err; 1074 return 0; 1075 } 1076 1077 /** 1078 * iavf_free_queues - Free memory for all rings 1079 * @adapter: board private structure to initialize 1080 * 1081 * Free all of the memory associated with queue pairs. 1082 **/ 1083 static void iavf_free_queues(struct iavf_adapter *adapter) 1084 { 1085 if (!adapter->vsi_res) 1086 return; 1087 adapter->num_active_queues = 0; 1088 kfree(adapter->tx_rings); 1089 adapter->tx_rings = NULL; 1090 kfree(adapter->rx_rings); 1091 adapter->rx_rings = NULL; 1092 } 1093 1094 /** 1095 * iavf_alloc_queues - Allocate memory for all rings 1096 * @adapter: board private structure to initialize 1097 * 1098 * We allocate one ring per queue at run-time since we don't know the 1099 * number of queues at compile-time. The polling_netdev array is 1100 * intended for Multiqueue, but should work fine with a single queue. 1101 **/ 1102 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1103 { 1104 int i, num_active_queues; 1105 1106 /* If we're in reset reallocating queues we don't actually know yet for 1107 * certain the PF gave us the number of queues we asked for but we'll 1108 * assume it did. Once basic reset is finished we'll confirm once we 1109 * start negotiating config with PF. 1110 */ 1111 if (adapter->num_req_queues) 1112 num_active_queues = adapter->num_req_queues; 1113 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1114 adapter->num_tc) 1115 num_active_queues = adapter->ch_config.total_qps; 1116 else 1117 num_active_queues = min_t(int, 1118 adapter->vsi_res->num_queue_pairs, 1119 (int)(num_online_cpus())); 1120 1121 1122 adapter->tx_rings = kcalloc(num_active_queues, 1123 sizeof(struct iavf_ring), GFP_KERNEL); 1124 if (!adapter->tx_rings) 1125 goto err_out; 1126 adapter->rx_rings = kcalloc(num_active_queues, 1127 sizeof(struct iavf_ring), GFP_KERNEL); 1128 if (!adapter->rx_rings) 1129 goto err_out; 1130 1131 for (i = 0; i < num_active_queues; i++) { 1132 struct iavf_ring *tx_ring; 1133 struct iavf_ring *rx_ring; 1134 1135 tx_ring = &adapter->tx_rings[i]; 1136 1137 tx_ring->queue_index = i; 1138 tx_ring->netdev = adapter->netdev; 1139 tx_ring->dev = &adapter->pdev->dev; 1140 tx_ring->count = adapter->tx_desc_count; 1141 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1142 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1143 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1144 1145 rx_ring = &adapter->rx_rings[i]; 1146 rx_ring->queue_index = i; 1147 rx_ring->netdev = adapter->netdev; 1148 rx_ring->dev = &adapter->pdev->dev; 1149 rx_ring->count = adapter->rx_desc_count; 1150 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1151 } 1152 1153 adapter->num_active_queues = num_active_queues; 1154 1155 return 0; 1156 1157 err_out: 1158 iavf_free_queues(adapter); 1159 return -ENOMEM; 1160 } 1161 1162 /** 1163 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1164 * @adapter: board private structure to initialize 1165 * 1166 * Attempt to configure the interrupts using the best available 1167 * capabilities of the hardware and the kernel. 1168 **/ 1169 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1170 { 1171 int vector, v_budget; 1172 int pairs = 0; 1173 int err = 0; 1174 1175 if (!adapter->vsi_res) { 1176 err = -EIO; 1177 goto out; 1178 } 1179 pairs = adapter->num_active_queues; 1180 1181 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1182 * us much good if we have more vectors than CPUs. However, we already 1183 * limit the total number of queues by the number of CPUs so we do not 1184 * need any further limiting here. 1185 */ 1186 v_budget = min_t(int, pairs + NONQ_VECS, 1187 (int)adapter->vf_res->max_vectors); 1188 1189 adapter->msix_entries = kcalloc(v_budget, 1190 sizeof(struct msix_entry), GFP_KERNEL); 1191 if (!adapter->msix_entries) { 1192 err = -ENOMEM; 1193 goto out; 1194 } 1195 1196 for (vector = 0; vector < v_budget; vector++) 1197 adapter->msix_entries[vector].entry = vector; 1198 1199 err = iavf_acquire_msix_vectors(adapter, v_budget); 1200 1201 out: 1202 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1203 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1204 return err; 1205 } 1206 1207 /** 1208 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1209 * @adapter: board private structure 1210 * 1211 * Return 0 on success, negative on failure 1212 **/ 1213 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1214 { 1215 struct iavf_aqc_get_set_rss_key_data *rss_key = 1216 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1217 struct iavf_hw *hw = &adapter->hw; 1218 int ret = 0; 1219 1220 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1221 /* bail because we already have a command pending */ 1222 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1223 adapter->current_op); 1224 return -EBUSY; 1225 } 1226 1227 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1228 if (ret) { 1229 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1230 iavf_stat_str(hw, ret), 1231 iavf_aq_str(hw, hw->aq.asq_last_status)); 1232 return ret; 1233 1234 } 1235 1236 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1237 adapter->rss_lut, adapter->rss_lut_size); 1238 if (ret) { 1239 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1240 iavf_stat_str(hw, ret), 1241 iavf_aq_str(hw, hw->aq.asq_last_status)); 1242 } 1243 1244 return ret; 1245 1246 } 1247 1248 /** 1249 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1250 * @adapter: board private structure 1251 * 1252 * Returns 0 on success, negative on failure 1253 **/ 1254 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1255 { 1256 struct iavf_hw *hw = &adapter->hw; 1257 u32 *dw; 1258 u16 i; 1259 1260 dw = (u32 *)adapter->rss_key; 1261 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1262 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1263 1264 dw = (u32 *)adapter->rss_lut; 1265 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1266 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1267 1268 iavf_flush(hw); 1269 1270 return 0; 1271 } 1272 1273 /** 1274 * iavf_config_rss - Configure RSS keys and lut 1275 * @adapter: board private structure 1276 * 1277 * Returns 0 on success, negative on failure 1278 **/ 1279 int iavf_config_rss(struct iavf_adapter *adapter) 1280 { 1281 1282 if (RSS_PF(adapter)) { 1283 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1284 IAVF_FLAG_AQ_SET_RSS_KEY; 1285 return 0; 1286 } else if (RSS_AQ(adapter)) { 1287 return iavf_config_rss_aq(adapter); 1288 } else { 1289 return iavf_config_rss_reg(adapter); 1290 } 1291 } 1292 1293 /** 1294 * iavf_fill_rss_lut - Fill the lut with default values 1295 * @adapter: board private structure 1296 **/ 1297 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1298 { 1299 u16 i; 1300 1301 for (i = 0; i < adapter->rss_lut_size; i++) 1302 adapter->rss_lut[i] = i % adapter->num_active_queues; 1303 } 1304 1305 /** 1306 * iavf_init_rss - Prepare for RSS 1307 * @adapter: board private structure 1308 * 1309 * Return 0 on success, negative on failure 1310 **/ 1311 static int iavf_init_rss(struct iavf_adapter *adapter) 1312 { 1313 struct iavf_hw *hw = &adapter->hw; 1314 int ret; 1315 1316 if (!RSS_PF(adapter)) { 1317 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1318 if (adapter->vf_res->vf_cap_flags & 1319 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1320 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1321 else 1322 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1323 1324 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1325 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1326 } 1327 1328 iavf_fill_rss_lut(adapter); 1329 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1330 ret = iavf_config_rss(adapter); 1331 1332 return ret; 1333 } 1334 1335 /** 1336 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1337 * @adapter: board private structure to initialize 1338 * 1339 * We allocate one q_vector per queue interrupt. If allocation fails we 1340 * return -ENOMEM. 1341 **/ 1342 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1343 { 1344 int q_idx = 0, num_q_vectors; 1345 struct iavf_q_vector *q_vector; 1346 1347 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1348 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1349 GFP_KERNEL); 1350 if (!adapter->q_vectors) 1351 return -ENOMEM; 1352 1353 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1354 q_vector = &adapter->q_vectors[q_idx]; 1355 q_vector->adapter = adapter; 1356 q_vector->vsi = &adapter->vsi; 1357 q_vector->v_idx = q_idx; 1358 q_vector->reg_idx = q_idx; 1359 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1360 netif_napi_add(adapter->netdev, &q_vector->napi, 1361 iavf_napi_poll, NAPI_POLL_WEIGHT); 1362 } 1363 1364 return 0; 1365 } 1366 1367 /** 1368 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1369 * @adapter: board private structure to initialize 1370 * 1371 * This function frees the memory allocated to the q_vectors. In addition if 1372 * NAPI is enabled it will delete any references to the NAPI struct prior 1373 * to freeing the q_vector. 1374 **/ 1375 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1376 { 1377 int q_idx, num_q_vectors; 1378 int napi_vectors; 1379 1380 if (!adapter->q_vectors) 1381 return; 1382 1383 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1384 napi_vectors = adapter->num_active_queues; 1385 1386 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1387 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1388 1389 if (q_idx < napi_vectors) 1390 netif_napi_del(&q_vector->napi); 1391 } 1392 kfree(adapter->q_vectors); 1393 adapter->q_vectors = NULL; 1394 } 1395 1396 /** 1397 * iavf_reset_interrupt_capability - Reset MSIX setup 1398 * @adapter: board private structure 1399 * 1400 **/ 1401 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1402 { 1403 if (!adapter->msix_entries) 1404 return; 1405 1406 pci_disable_msix(adapter->pdev); 1407 kfree(adapter->msix_entries); 1408 adapter->msix_entries = NULL; 1409 } 1410 1411 /** 1412 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1413 * @adapter: board private structure to initialize 1414 * 1415 **/ 1416 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1417 { 1418 int err; 1419 1420 err = iavf_alloc_queues(adapter); 1421 if (err) { 1422 dev_err(&adapter->pdev->dev, 1423 "Unable to allocate memory for queues\n"); 1424 goto err_alloc_queues; 1425 } 1426 1427 rtnl_lock(); 1428 err = iavf_set_interrupt_capability(adapter); 1429 rtnl_unlock(); 1430 if (err) { 1431 dev_err(&adapter->pdev->dev, 1432 "Unable to setup interrupt capabilities\n"); 1433 goto err_set_interrupt; 1434 } 1435 1436 err = iavf_alloc_q_vectors(adapter); 1437 if (err) { 1438 dev_err(&adapter->pdev->dev, 1439 "Unable to allocate memory for queue vectors\n"); 1440 goto err_alloc_q_vectors; 1441 } 1442 1443 /* If we've made it so far while ADq flag being ON, then we haven't 1444 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1445 * resources have been allocated in the reset path. 1446 * Now we can truly claim that ADq is enabled. 1447 */ 1448 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1449 adapter->num_tc) 1450 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1451 adapter->num_tc); 1452 1453 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1454 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1455 adapter->num_active_queues); 1456 1457 return 0; 1458 err_alloc_q_vectors: 1459 iavf_reset_interrupt_capability(adapter); 1460 err_set_interrupt: 1461 iavf_free_queues(adapter); 1462 err_alloc_queues: 1463 return err; 1464 } 1465 1466 /** 1467 * iavf_free_rss - Free memory used by RSS structs 1468 * @adapter: board private structure 1469 **/ 1470 static void iavf_free_rss(struct iavf_adapter *adapter) 1471 { 1472 kfree(adapter->rss_key); 1473 adapter->rss_key = NULL; 1474 1475 kfree(adapter->rss_lut); 1476 adapter->rss_lut = NULL; 1477 } 1478 1479 /** 1480 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1481 * @adapter: board private structure 1482 * 1483 * Returns 0 on success, negative on failure 1484 **/ 1485 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1486 { 1487 struct net_device *netdev = adapter->netdev; 1488 int err; 1489 1490 if (netif_running(netdev)) 1491 iavf_free_traffic_irqs(adapter); 1492 iavf_free_misc_irq(adapter); 1493 iavf_reset_interrupt_capability(adapter); 1494 iavf_free_q_vectors(adapter); 1495 iavf_free_queues(adapter); 1496 1497 err = iavf_init_interrupt_scheme(adapter); 1498 if (err) 1499 goto err; 1500 1501 netif_tx_stop_all_queues(netdev); 1502 1503 err = iavf_request_misc_irq(adapter); 1504 if (err) 1505 goto err; 1506 1507 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1508 1509 iavf_map_rings_to_vectors(adapter); 1510 err: 1511 return err; 1512 } 1513 1514 /** 1515 * iavf_process_aq_command - process aq_required flags 1516 * and sends aq command 1517 * @adapter: pointer to iavf adapter structure 1518 * 1519 * Returns 0 on success 1520 * Returns error code if no command was sent 1521 * or error code if the command failed. 1522 **/ 1523 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1524 { 1525 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1526 return iavf_send_vf_config_msg(adapter); 1527 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1528 iavf_disable_queues(adapter); 1529 return 0; 1530 } 1531 1532 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1533 iavf_map_queues(adapter); 1534 return 0; 1535 } 1536 1537 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1538 iavf_add_ether_addrs(adapter); 1539 return 0; 1540 } 1541 1542 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1543 iavf_add_vlans(adapter); 1544 return 0; 1545 } 1546 1547 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1548 iavf_del_ether_addrs(adapter); 1549 return 0; 1550 } 1551 1552 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1553 iavf_del_vlans(adapter); 1554 return 0; 1555 } 1556 1557 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1558 iavf_enable_vlan_stripping(adapter); 1559 return 0; 1560 } 1561 1562 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1563 iavf_disable_vlan_stripping(adapter); 1564 return 0; 1565 } 1566 1567 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1568 iavf_configure_queues(adapter); 1569 return 0; 1570 } 1571 1572 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1573 iavf_enable_queues(adapter); 1574 return 0; 1575 } 1576 1577 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1578 /* This message goes straight to the firmware, not the 1579 * PF, so we don't have to set current_op as we will 1580 * not get a response through the ARQ. 1581 */ 1582 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1583 return 0; 1584 } 1585 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1586 iavf_get_hena(adapter); 1587 return 0; 1588 } 1589 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1590 iavf_set_hena(adapter); 1591 return 0; 1592 } 1593 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1594 iavf_set_rss_key(adapter); 1595 return 0; 1596 } 1597 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1598 iavf_set_rss_lut(adapter); 1599 return 0; 1600 } 1601 1602 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1603 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1604 FLAG_VF_MULTICAST_PROMISC); 1605 return 0; 1606 } 1607 1608 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1609 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1610 return 0; 1611 } 1612 1613 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1614 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1615 iavf_set_promiscuous(adapter, 0); 1616 return 0; 1617 } 1618 1619 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1620 iavf_enable_channels(adapter); 1621 return 0; 1622 } 1623 1624 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1625 iavf_disable_channels(adapter); 1626 return 0; 1627 } 1628 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1629 iavf_add_cloud_filter(adapter); 1630 return 0; 1631 } 1632 1633 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1634 iavf_del_cloud_filter(adapter); 1635 return 0; 1636 } 1637 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1638 iavf_del_cloud_filter(adapter); 1639 return 0; 1640 } 1641 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1642 iavf_add_cloud_filter(adapter); 1643 return 0; 1644 } 1645 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1646 iavf_add_fdir_filter(adapter); 1647 return IAVF_SUCCESS; 1648 } 1649 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1650 iavf_del_fdir_filter(adapter); 1651 return IAVF_SUCCESS; 1652 } 1653 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1654 iavf_add_adv_rss_cfg(adapter); 1655 return 0; 1656 } 1657 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1658 iavf_del_adv_rss_cfg(adapter); 1659 return 0; 1660 } 1661 return -EAGAIN; 1662 } 1663 1664 /** 1665 * iavf_startup - first step of driver startup 1666 * @adapter: board private structure 1667 * 1668 * Function process __IAVF_STARTUP driver state. 1669 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1670 * when fails it returns -EAGAIN 1671 **/ 1672 static int iavf_startup(struct iavf_adapter *adapter) 1673 { 1674 struct pci_dev *pdev = adapter->pdev; 1675 struct iavf_hw *hw = &adapter->hw; 1676 int err; 1677 1678 WARN_ON(adapter->state != __IAVF_STARTUP); 1679 1680 /* driver loaded, probe complete */ 1681 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1682 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1683 err = iavf_set_mac_type(hw); 1684 if (err) { 1685 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1686 goto err; 1687 } 1688 1689 err = iavf_check_reset_complete(hw); 1690 if (err) { 1691 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1692 err); 1693 goto err; 1694 } 1695 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1696 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1697 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1698 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1699 1700 err = iavf_init_adminq(hw); 1701 if (err) { 1702 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1703 goto err; 1704 } 1705 err = iavf_send_api_ver(adapter); 1706 if (err) { 1707 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1708 iavf_shutdown_adminq(hw); 1709 goto err; 1710 } 1711 adapter->state = __IAVF_INIT_VERSION_CHECK; 1712 err: 1713 return err; 1714 } 1715 1716 /** 1717 * iavf_init_version_check - second step of driver startup 1718 * @adapter: board private structure 1719 * 1720 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1721 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1722 * when fails it returns -EAGAIN 1723 **/ 1724 static int iavf_init_version_check(struct iavf_adapter *adapter) 1725 { 1726 struct pci_dev *pdev = adapter->pdev; 1727 struct iavf_hw *hw = &adapter->hw; 1728 int err = -EAGAIN; 1729 1730 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1731 1732 if (!iavf_asq_done(hw)) { 1733 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1734 iavf_shutdown_adminq(hw); 1735 adapter->state = __IAVF_STARTUP; 1736 goto err; 1737 } 1738 1739 /* aq msg sent, awaiting reply */ 1740 err = iavf_verify_api_ver(adapter); 1741 if (err) { 1742 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1743 err = iavf_send_api_ver(adapter); 1744 else 1745 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1746 adapter->pf_version.major, 1747 adapter->pf_version.minor, 1748 VIRTCHNL_VERSION_MAJOR, 1749 VIRTCHNL_VERSION_MINOR); 1750 goto err; 1751 } 1752 err = iavf_send_vf_config_msg(adapter); 1753 if (err) { 1754 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1755 err); 1756 goto err; 1757 } 1758 adapter->state = __IAVF_INIT_GET_RESOURCES; 1759 1760 err: 1761 return err; 1762 } 1763 1764 /** 1765 * iavf_init_get_resources - third step of driver startup 1766 * @adapter: board private structure 1767 * 1768 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1769 * finishes driver initialization procedure. 1770 * When success the state is changed to __IAVF_DOWN 1771 * when fails it returns -EAGAIN 1772 **/ 1773 static int iavf_init_get_resources(struct iavf_adapter *adapter) 1774 { 1775 struct net_device *netdev = adapter->netdev; 1776 struct pci_dev *pdev = adapter->pdev; 1777 struct iavf_hw *hw = &adapter->hw; 1778 int err; 1779 1780 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1781 /* aq msg sent, awaiting reply */ 1782 if (!adapter->vf_res) { 1783 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1784 GFP_KERNEL); 1785 if (!adapter->vf_res) { 1786 err = -ENOMEM; 1787 goto err; 1788 } 1789 } 1790 err = iavf_get_vf_config(adapter); 1791 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1792 err = iavf_send_vf_config_msg(adapter); 1793 goto err; 1794 } else if (err == IAVF_ERR_PARAM) { 1795 /* We only get ERR_PARAM if the device is in a very bad 1796 * state or if we've been disabled for previous bad 1797 * behavior. Either way, we're done now. 1798 */ 1799 iavf_shutdown_adminq(hw); 1800 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1801 return 0; 1802 } 1803 if (err) { 1804 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1805 goto err_alloc; 1806 } 1807 1808 err = iavf_process_config(adapter); 1809 if (err) 1810 goto err_alloc; 1811 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1812 1813 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1814 1815 netdev->netdev_ops = &iavf_netdev_ops; 1816 iavf_set_ethtool_ops(netdev); 1817 netdev->watchdog_timeo = 5 * HZ; 1818 1819 /* MTU range: 68 - 9710 */ 1820 netdev->min_mtu = ETH_MIN_MTU; 1821 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1822 1823 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1824 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1825 adapter->hw.mac.addr); 1826 eth_hw_addr_random(netdev); 1827 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1828 } else { 1829 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 1830 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1831 } 1832 1833 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1834 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1835 err = iavf_init_interrupt_scheme(adapter); 1836 if (err) 1837 goto err_sw_init; 1838 iavf_map_rings_to_vectors(adapter); 1839 if (adapter->vf_res->vf_cap_flags & 1840 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1841 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1842 1843 err = iavf_request_misc_irq(adapter); 1844 if (err) 1845 goto err_sw_init; 1846 1847 netif_carrier_off(netdev); 1848 adapter->link_up = false; 1849 1850 /* set the semaphore to prevent any callbacks after device registration 1851 * up to time when state of driver will be set to __IAVF_DOWN 1852 */ 1853 rtnl_lock(); 1854 if (!adapter->netdev_registered) { 1855 err = register_netdevice(netdev); 1856 if (err) { 1857 rtnl_unlock(); 1858 goto err_register; 1859 } 1860 } 1861 1862 adapter->netdev_registered = true; 1863 1864 netif_tx_stop_all_queues(netdev); 1865 if (CLIENT_ALLOWED(adapter)) { 1866 err = iavf_lan_add_device(adapter); 1867 if (err) 1868 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1869 err); 1870 } 1871 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1872 if (netdev->features & NETIF_F_GRO) 1873 dev_info(&pdev->dev, "GRO is enabled\n"); 1874 1875 adapter->state = __IAVF_DOWN; 1876 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1877 rtnl_unlock(); 1878 1879 iavf_misc_irq_enable(adapter); 1880 wake_up(&adapter->down_waitqueue); 1881 1882 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1883 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1884 if (!adapter->rss_key || !adapter->rss_lut) { 1885 err = -ENOMEM; 1886 goto err_mem; 1887 } 1888 if (RSS_AQ(adapter)) 1889 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1890 else 1891 iavf_init_rss(adapter); 1892 1893 return err; 1894 err_mem: 1895 iavf_free_rss(adapter); 1896 err_register: 1897 iavf_free_misc_irq(adapter); 1898 err_sw_init: 1899 iavf_reset_interrupt_capability(adapter); 1900 err_alloc: 1901 kfree(adapter->vf_res); 1902 adapter->vf_res = NULL; 1903 err: 1904 return err; 1905 } 1906 1907 /** 1908 * iavf_watchdog_task - Periodic call-back task 1909 * @work: pointer to work_struct 1910 **/ 1911 static void iavf_watchdog_task(struct work_struct *work) 1912 { 1913 struct iavf_adapter *adapter = container_of(work, 1914 struct iavf_adapter, 1915 watchdog_task.work); 1916 struct iavf_hw *hw = &adapter->hw; 1917 u32 reg_val; 1918 1919 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1920 goto restart_watchdog; 1921 1922 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1923 adapter->state = __IAVF_COMM_FAILED; 1924 1925 switch (adapter->state) { 1926 case __IAVF_COMM_FAILED: 1927 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 1928 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1929 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 1930 reg_val == VIRTCHNL_VFR_COMPLETED) { 1931 /* A chance for redemption! */ 1932 dev_err(&adapter->pdev->dev, 1933 "Hardware came out of reset. Attempting reinit.\n"); 1934 adapter->state = __IAVF_STARTUP; 1935 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1936 queue_delayed_work(iavf_wq, &adapter->init_task, 10); 1937 clear_bit(__IAVF_IN_CRITICAL_TASK, 1938 &adapter->crit_section); 1939 /* Don't reschedule the watchdog, since we've restarted 1940 * the init task. When init_task contacts the PF and 1941 * gets everything set up again, it'll restart the 1942 * watchdog for us. Down, boy. Sit. Stay. Woof. 1943 */ 1944 return; 1945 } 1946 adapter->aq_required = 0; 1947 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1948 clear_bit(__IAVF_IN_CRITICAL_TASK, 1949 &adapter->crit_section); 1950 queue_delayed_work(iavf_wq, 1951 &adapter->watchdog_task, 1952 msecs_to_jiffies(10)); 1953 goto watchdog_done; 1954 case __IAVF_RESETTING: 1955 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1956 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1957 return; 1958 case __IAVF_DOWN: 1959 case __IAVF_DOWN_PENDING: 1960 case __IAVF_TESTING: 1961 case __IAVF_RUNNING: 1962 if (adapter->current_op) { 1963 if (!iavf_asq_done(hw)) { 1964 dev_dbg(&adapter->pdev->dev, 1965 "Admin queue timeout\n"); 1966 iavf_send_api_ver(adapter); 1967 } 1968 } else { 1969 /* An error will be returned if no commands were 1970 * processed; use this opportunity to update stats 1971 */ 1972 if (iavf_process_aq_command(adapter) && 1973 adapter->state == __IAVF_RUNNING) 1974 iavf_request_stats(adapter); 1975 } 1976 break; 1977 case __IAVF_REMOVE: 1978 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1979 return; 1980 default: 1981 goto restart_watchdog; 1982 } 1983 1984 /* check for hw reset */ 1985 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 1986 if (!reg_val) { 1987 adapter->state = __IAVF_RESETTING; 1988 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1989 adapter->aq_required = 0; 1990 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1991 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1992 queue_work(iavf_wq, &adapter->reset_task); 1993 goto watchdog_done; 1994 } 1995 1996 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 1997 watchdog_done: 1998 if (adapter->state == __IAVF_RUNNING || 1999 adapter->state == __IAVF_COMM_FAILED) 2000 iavf_detect_recover_hung(&adapter->vsi); 2001 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2002 restart_watchdog: 2003 if (adapter->aq_required) 2004 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2005 msecs_to_jiffies(20)); 2006 else 2007 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2008 queue_work(iavf_wq, &adapter->adminq_task); 2009 } 2010 2011 static void iavf_disable_vf(struct iavf_adapter *adapter) 2012 { 2013 struct iavf_mac_filter *f, *ftmp; 2014 struct iavf_vlan_filter *fv, *fvtmp; 2015 struct iavf_cloud_filter *cf, *cftmp; 2016 2017 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2018 2019 /* We don't use netif_running() because it may be true prior to 2020 * ndo_open() returning, so we can't assume it means all our open 2021 * tasks have finished, since we're not holding the rtnl_lock here. 2022 */ 2023 if (adapter->state == __IAVF_RUNNING) { 2024 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2025 netif_carrier_off(adapter->netdev); 2026 netif_tx_disable(adapter->netdev); 2027 adapter->link_up = false; 2028 iavf_napi_disable_all(adapter); 2029 iavf_irq_disable(adapter); 2030 iavf_free_traffic_irqs(adapter); 2031 iavf_free_all_tx_resources(adapter); 2032 iavf_free_all_rx_resources(adapter); 2033 } 2034 2035 spin_lock_bh(&adapter->mac_vlan_list_lock); 2036 2037 /* Delete all of the filters */ 2038 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2039 list_del(&f->list); 2040 kfree(f); 2041 } 2042 2043 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2044 list_del(&fv->list); 2045 kfree(fv); 2046 } 2047 2048 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2049 2050 spin_lock_bh(&adapter->cloud_filter_list_lock); 2051 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2052 list_del(&cf->list); 2053 kfree(cf); 2054 adapter->num_cloud_filters--; 2055 } 2056 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2057 2058 iavf_free_misc_irq(adapter); 2059 iavf_reset_interrupt_capability(adapter); 2060 iavf_free_queues(adapter); 2061 iavf_free_q_vectors(adapter); 2062 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2063 iavf_shutdown_adminq(&adapter->hw); 2064 adapter->netdev->flags &= ~IFF_UP; 2065 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2066 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2067 adapter->state = __IAVF_DOWN; 2068 wake_up(&adapter->down_waitqueue); 2069 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2070 } 2071 2072 /** 2073 * iavf_reset_task - Call-back task to handle hardware reset 2074 * @work: pointer to work_struct 2075 * 2076 * During reset we need to shut down and reinitialize the admin queue 2077 * before we can use it to communicate with the PF again. We also clear 2078 * and reinit the rings because that context is lost as well. 2079 **/ 2080 static void iavf_reset_task(struct work_struct *work) 2081 { 2082 struct iavf_adapter *adapter = container_of(work, 2083 struct iavf_adapter, 2084 reset_task); 2085 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2086 struct net_device *netdev = adapter->netdev; 2087 struct iavf_hw *hw = &adapter->hw; 2088 struct iavf_mac_filter *f, *ftmp; 2089 struct iavf_vlan_filter *vlf; 2090 struct iavf_cloud_filter *cf; 2091 u32 reg_val; 2092 int i = 0, err; 2093 bool running; 2094 2095 /* When device is being removed it doesn't make sense to run the reset 2096 * task, just return in such a case. 2097 */ 2098 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 2099 return; 2100 2101 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, 2102 &adapter->crit_section)) 2103 usleep_range(500, 1000); 2104 if (CLIENT_ENABLED(adapter)) { 2105 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2106 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2107 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2108 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2109 cancel_delayed_work_sync(&adapter->client_task); 2110 iavf_notify_client_close(&adapter->vsi, true); 2111 } 2112 iavf_misc_irq_disable(adapter); 2113 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2114 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2115 /* Restart the AQ here. If we have been reset but didn't 2116 * detect it, or if the PF had to reinit, our AQ will be hosed. 2117 */ 2118 iavf_shutdown_adminq(hw); 2119 iavf_init_adminq(hw); 2120 iavf_request_reset(adapter); 2121 } 2122 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2123 2124 /* poll until we see the reset actually happen */ 2125 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2126 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2127 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2128 if (!reg_val) 2129 break; 2130 usleep_range(5000, 10000); 2131 } 2132 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2133 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2134 goto continue_reset; /* act like the reset happened */ 2135 } 2136 2137 /* wait until the reset is complete and the PF is responding to us */ 2138 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2139 /* sleep first to make sure a minimum wait time is met */ 2140 msleep(IAVF_RESET_WAIT_MS); 2141 2142 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2143 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2144 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2145 break; 2146 } 2147 2148 pci_set_master(adapter->pdev); 2149 2150 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2151 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2152 reg_val); 2153 iavf_disable_vf(adapter); 2154 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2155 return; /* Do not attempt to reinit. It's dead, Jim. */ 2156 } 2157 2158 continue_reset: 2159 /* We don't use netif_running() because it may be true prior to 2160 * ndo_open() returning, so we can't assume it means all our open 2161 * tasks have finished, since we're not holding the rtnl_lock here. 2162 */ 2163 running = ((adapter->state == __IAVF_RUNNING) || 2164 (adapter->state == __IAVF_RESETTING)); 2165 2166 if (running) { 2167 netif_carrier_off(netdev); 2168 netif_tx_stop_all_queues(netdev); 2169 adapter->link_up = false; 2170 iavf_napi_disable_all(adapter); 2171 } 2172 iavf_irq_disable(adapter); 2173 2174 adapter->state = __IAVF_RESETTING; 2175 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2176 2177 /* free the Tx/Rx rings and descriptors, might be better to just 2178 * re-use them sometime in the future 2179 */ 2180 iavf_free_all_rx_resources(adapter); 2181 iavf_free_all_tx_resources(adapter); 2182 2183 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2184 /* kill and reinit the admin queue */ 2185 iavf_shutdown_adminq(hw); 2186 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2187 err = iavf_init_adminq(hw); 2188 if (err) 2189 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2190 err); 2191 adapter->aq_required = 0; 2192 2193 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2194 err = iavf_reinit_interrupt_scheme(adapter); 2195 if (err) 2196 goto reset_err; 2197 } 2198 2199 if (RSS_AQ(adapter)) { 2200 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2201 } else { 2202 err = iavf_init_rss(adapter); 2203 if (err) 2204 goto reset_err; 2205 } 2206 2207 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2208 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2209 2210 spin_lock_bh(&adapter->mac_vlan_list_lock); 2211 2212 /* Delete filter for the current MAC address, it could have 2213 * been changed by the PF via administratively set MAC. 2214 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2215 */ 2216 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2217 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2218 list_del(&f->list); 2219 kfree(f); 2220 } 2221 } 2222 /* re-add all MAC filters */ 2223 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2224 f->add = true; 2225 } 2226 /* re-add all VLAN filters */ 2227 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2228 vlf->add = true; 2229 } 2230 2231 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2232 2233 /* check if TCs are running and re-add all cloud filters */ 2234 spin_lock_bh(&adapter->cloud_filter_list_lock); 2235 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2236 adapter->num_tc) { 2237 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2238 cf->add = true; 2239 } 2240 } 2241 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2242 2243 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2244 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2245 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2246 iavf_misc_irq_enable(adapter); 2247 2248 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2249 2250 /* We were running when the reset started, so we need to restore some 2251 * state here. 2252 */ 2253 if (running) { 2254 /* allocate transmit descriptors */ 2255 err = iavf_setup_all_tx_resources(adapter); 2256 if (err) 2257 goto reset_err; 2258 2259 /* allocate receive descriptors */ 2260 err = iavf_setup_all_rx_resources(adapter); 2261 if (err) 2262 goto reset_err; 2263 2264 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2265 err = iavf_request_traffic_irqs(adapter, netdev->name); 2266 if (err) 2267 goto reset_err; 2268 2269 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2270 } 2271 2272 iavf_configure(adapter); 2273 2274 iavf_up_complete(adapter); 2275 2276 iavf_irq_enable(adapter, true); 2277 } else { 2278 adapter->state = __IAVF_DOWN; 2279 wake_up(&adapter->down_waitqueue); 2280 } 2281 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2282 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2283 2284 return; 2285 reset_err: 2286 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2287 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2288 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2289 iavf_close(netdev); 2290 } 2291 2292 /** 2293 * iavf_adminq_task - worker thread to clean the admin queue 2294 * @work: pointer to work_struct containing our data 2295 **/ 2296 static void iavf_adminq_task(struct work_struct *work) 2297 { 2298 struct iavf_adapter *adapter = 2299 container_of(work, struct iavf_adapter, adminq_task); 2300 struct iavf_hw *hw = &adapter->hw; 2301 struct iavf_arq_event_info event; 2302 enum virtchnl_ops v_op; 2303 enum iavf_status ret, v_ret; 2304 u32 val, oldval; 2305 u16 pending; 2306 2307 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2308 goto out; 2309 2310 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2311 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2312 if (!event.msg_buf) 2313 goto out; 2314 2315 do { 2316 ret = iavf_clean_arq_element(hw, &event, &pending); 2317 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2318 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2319 2320 if (ret || !v_op) 2321 break; /* No event to process or error cleaning ARQ */ 2322 2323 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2324 event.msg_len); 2325 if (pending != 0) 2326 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2327 } while (pending); 2328 2329 if ((adapter->flags & 2330 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2331 adapter->state == __IAVF_RESETTING) 2332 goto freedom; 2333 2334 /* check for error indications */ 2335 val = rd32(hw, hw->aq.arq.len); 2336 if (val == 0xdeadbeef) /* indicates device in reset */ 2337 goto freedom; 2338 oldval = val; 2339 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2340 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2341 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2342 } 2343 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2344 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2345 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2346 } 2347 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2348 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2349 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2350 } 2351 if (oldval != val) 2352 wr32(hw, hw->aq.arq.len, val); 2353 2354 val = rd32(hw, hw->aq.asq.len); 2355 oldval = val; 2356 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2357 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2358 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2359 } 2360 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2361 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2362 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2363 } 2364 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2365 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2366 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2367 } 2368 if (oldval != val) 2369 wr32(hw, hw->aq.asq.len, val); 2370 2371 freedom: 2372 kfree(event.msg_buf); 2373 out: 2374 /* re-enable Admin queue interrupt cause */ 2375 iavf_misc_irq_enable(adapter); 2376 } 2377 2378 /** 2379 * iavf_client_task - worker thread to perform client work 2380 * @work: pointer to work_struct containing our data 2381 * 2382 * This task handles client interactions. Because client calls can be 2383 * reentrant, we can't handle them in the watchdog. 2384 **/ 2385 static void iavf_client_task(struct work_struct *work) 2386 { 2387 struct iavf_adapter *adapter = 2388 container_of(work, struct iavf_adapter, client_task.work); 2389 2390 /* If we can't get the client bit, just give up. We'll be rescheduled 2391 * later. 2392 */ 2393 2394 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) 2395 return; 2396 2397 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2398 iavf_client_subtask(adapter); 2399 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2400 goto out; 2401 } 2402 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2403 iavf_notify_client_l2_params(&adapter->vsi); 2404 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2405 goto out; 2406 } 2407 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2408 iavf_notify_client_close(&adapter->vsi, false); 2409 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2410 goto out; 2411 } 2412 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2413 iavf_notify_client_open(&adapter->vsi); 2414 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2415 } 2416 out: 2417 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2418 } 2419 2420 /** 2421 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2422 * @adapter: board private structure 2423 * 2424 * Free all transmit software resources 2425 **/ 2426 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2427 { 2428 int i; 2429 2430 if (!adapter->tx_rings) 2431 return; 2432 2433 for (i = 0; i < adapter->num_active_queues; i++) 2434 if (adapter->tx_rings[i].desc) 2435 iavf_free_tx_resources(&adapter->tx_rings[i]); 2436 } 2437 2438 /** 2439 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2440 * @adapter: board private structure 2441 * 2442 * If this function returns with an error, then it's possible one or 2443 * more of the rings is populated (while the rest are not). It is the 2444 * callers duty to clean those orphaned rings. 2445 * 2446 * Return 0 on success, negative on failure 2447 **/ 2448 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2449 { 2450 int i, err = 0; 2451 2452 for (i = 0; i < adapter->num_active_queues; i++) { 2453 adapter->tx_rings[i].count = adapter->tx_desc_count; 2454 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2455 if (!err) 2456 continue; 2457 dev_err(&adapter->pdev->dev, 2458 "Allocation for Tx Queue %u failed\n", i); 2459 break; 2460 } 2461 2462 return err; 2463 } 2464 2465 /** 2466 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2467 * @adapter: board private structure 2468 * 2469 * If this function returns with an error, then it's possible one or 2470 * more of the rings is populated (while the rest are not). It is the 2471 * callers duty to clean those orphaned rings. 2472 * 2473 * Return 0 on success, negative on failure 2474 **/ 2475 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2476 { 2477 int i, err = 0; 2478 2479 for (i = 0; i < adapter->num_active_queues; i++) { 2480 adapter->rx_rings[i].count = adapter->rx_desc_count; 2481 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2482 if (!err) 2483 continue; 2484 dev_err(&adapter->pdev->dev, 2485 "Allocation for Rx Queue %u failed\n", i); 2486 break; 2487 } 2488 return err; 2489 } 2490 2491 /** 2492 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2493 * @adapter: board private structure 2494 * 2495 * Free all receive software resources 2496 **/ 2497 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2498 { 2499 int i; 2500 2501 if (!adapter->rx_rings) 2502 return; 2503 2504 for (i = 0; i < adapter->num_active_queues; i++) 2505 if (adapter->rx_rings[i].desc) 2506 iavf_free_rx_resources(&adapter->rx_rings[i]); 2507 } 2508 2509 /** 2510 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2511 * @adapter: board private structure 2512 * @max_tx_rate: max Tx bw for a tc 2513 **/ 2514 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2515 u64 max_tx_rate) 2516 { 2517 int speed = 0, ret = 0; 2518 2519 if (ADV_LINK_SUPPORT(adapter)) { 2520 if (adapter->link_speed_mbps < U32_MAX) { 2521 speed = adapter->link_speed_mbps; 2522 goto validate_bw; 2523 } else { 2524 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2525 return -EINVAL; 2526 } 2527 } 2528 2529 switch (adapter->link_speed) { 2530 case VIRTCHNL_LINK_SPEED_40GB: 2531 speed = SPEED_40000; 2532 break; 2533 case VIRTCHNL_LINK_SPEED_25GB: 2534 speed = SPEED_25000; 2535 break; 2536 case VIRTCHNL_LINK_SPEED_20GB: 2537 speed = SPEED_20000; 2538 break; 2539 case VIRTCHNL_LINK_SPEED_10GB: 2540 speed = SPEED_10000; 2541 break; 2542 case VIRTCHNL_LINK_SPEED_5GB: 2543 speed = SPEED_5000; 2544 break; 2545 case VIRTCHNL_LINK_SPEED_2_5GB: 2546 speed = SPEED_2500; 2547 break; 2548 case VIRTCHNL_LINK_SPEED_1GB: 2549 speed = SPEED_1000; 2550 break; 2551 case VIRTCHNL_LINK_SPEED_100MB: 2552 speed = SPEED_100; 2553 break; 2554 default: 2555 break; 2556 } 2557 2558 validate_bw: 2559 if (max_tx_rate > speed) { 2560 dev_err(&adapter->pdev->dev, 2561 "Invalid tx rate specified\n"); 2562 ret = -EINVAL; 2563 } 2564 2565 return ret; 2566 } 2567 2568 /** 2569 * iavf_validate_ch_config - validate queue mapping info 2570 * @adapter: board private structure 2571 * @mqprio_qopt: queue parameters 2572 * 2573 * This function validates if the config provided by the user to 2574 * configure queue channels is valid or not. Returns 0 on a valid 2575 * config. 2576 **/ 2577 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2578 struct tc_mqprio_qopt_offload *mqprio_qopt) 2579 { 2580 u64 total_max_rate = 0; 2581 int i, num_qps = 0; 2582 u64 tx_rate = 0; 2583 int ret = 0; 2584 2585 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2586 mqprio_qopt->qopt.num_tc < 1) 2587 return -EINVAL; 2588 2589 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2590 if (!mqprio_qopt->qopt.count[i] || 2591 mqprio_qopt->qopt.offset[i] != num_qps) 2592 return -EINVAL; 2593 if (mqprio_qopt->min_rate[i]) { 2594 dev_err(&adapter->pdev->dev, 2595 "Invalid min tx rate (greater than 0) specified\n"); 2596 return -EINVAL; 2597 } 2598 /*convert to Mbps */ 2599 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2600 IAVF_MBPS_DIVISOR); 2601 total_max_rate += tx_rate; 2602 num_qps += mqprio_qopt->qopt.count[i]; 2603 } 2604 if (num_qps > IAVF_MAX_REQ_QUEUES) 2605 return -EINVAL; 2606 2607 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2608 return ret; 2609 } 2610 2611 /** 2612 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2613 * @adapter: board private structure 2614 **/ 2615 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2616 { 2617 struct iavf_cloud_filter *cf, *cftmp; 2618 2619 spin_lock_bh(&adapter->cloud_filter_list_lock); 2620 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2621 list) { 2622 list_del(&cf->list); 2623 kfree(cf); 2624 adapter->num_cloud_filters--; 2625 } 2626 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2627 } 2628 2629 /** 2630 * __iavf_setup_tc - configure multiple traffic classes 2631 * @netdev: network interface device structure 2632 * @type_data: tc offload data 2633 * 2634 * This function processes the config information provided by the 2635 * user to configure traffic classes/queue channels and packages the 2636 * information to request the PF to setup traffic classes. 2637 * 2638 * Returns 0 on success. 2639 **/ 2640 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2641 { 2642 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2643 struct iavf_adapter *adapter = netdev_priv(netdev); 2644 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2645 u8 num_tc = 0, total_qps = 0; 2646 int ret = 0, netdev_tc = 0; 2647 u64 max_tx_rate; 2648 u16 mode; 2649 int i; 2650 2651 num_tc = mqprio_qopt->qopt.num_tc; 2652 mode = mqprio_qopt->mode; 2653 2654 /* delete queue_channel */ 2655 if (!mqprio_qopt->qopt.hw) { 2656 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2657 /* reset the tc configuration */ 2658 netdev_reset_tc(netdev); 2659 adapter->num_tc = 0; 2660 netif_tx_stop_all_queues(netdev); 2661 netif_tx_disable(netdev); 2662 iavf_del_all_cloud_filters(adapter); 2663 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2664 goto exit; 2665 } else { 2666 return -EINVAL; 2667 } 2668 } 2669 2670 /* add queue channel */ 2671 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2672 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2673 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2674 return -EOPNOTSUPP; 2675 } 2676 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2677 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2678 return -EINVAL; 2679 } 2680 2681 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2682 if (ret) 2683 return ret; 2684 /* Return if same TC config is requested */ 2685 if (adapter->num_tc == num_tc) 2686 return 0; 2687 adapter->num_tc = num_tc; 2688 2689 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2690 if (i < num_tc) { 2691 adapter->ch_config.ch_info[i].count = 2692 mqprio_qopt->qopt.count[i]; 2693 adapter->ch_config.ch_info[i].offset = 2694 mqprio_qopt->qopt.offset[i]; 2695 total_qps += mqprio_qopt->qopt.count[i]; 2696 max_tx_rate = mqprio_qopt->max_rate[i]; 2697 /* convert to Mbps */ 2698 max_tx_rate = div_u64(max_tx_rate, 2699 IAVF_MBPS_DIVISOR); 2700 adapter->ch_config.ch_info[i].max_tx_rate = 2701 max_tx_rate; 2702 } else { 2703 adapter->ch_config.ch_info[i].count = 1; 2704 adapter->ch_config.ch_info[i].offset = 0; 2705 } 2706 } 2707 adapter->ch_config.total_qps = total_qps; 2708 netif_tx_stop_all_queues(netdev); 2709 netif_tx_disable(netdev); 2710 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2711 netdev_reset_tc(netdev); 2712 /* Report the tc mapping up the stack */ 2713 netdev_set_num_tc(adapter->netdev, num_tc); 2714 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2715 u16 qcount = mqprio_qopt->qopt.count[i]; 2716 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2717 2718 if (i < num_tc) 2719 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2720 qoffset); 2721 } 2722 } 2723 exit: 2724 return ret; 2725 } 2726 2727 /** 2728 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2729 * @adapter: board private structure 2730 * @f: pointer to struct flow_cls_offload 2731 * @filter: pointer to cloud filter structure 2732 */ 2733 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2734 struct flow_cls_offload *f, 2735 struct iavf_cloud_filter *filter) 2736 { 2737 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2738 struct flow_dissector *dissector = rule->match.dissector; 2739 u16 n_proto_mask = 0; 2740 u16 n_proto_key = 0; 2741 u8 field_flags = 0; 2742 u16 addr_type = 0; 2743 u16 n_proto = 0; 2744 int i = 0; 2745 struct virtchnl_filter *vf = &filter->f; 2746 2747 if (dissector->used_keys & 2748 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2749 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2750 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2751 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2752 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2753 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2754 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2755 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2756 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2757 dissector->used_keys); 2758 return -EOPNOTSUPP; 2759 } 2760 2761 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2762 struct flow_match_enc_keyid match; 2763 2764 flow_rule_match_enc_keyid(rule, &match); 2765 if (match.mask->keyid != 0) 2766 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2767 } 2768 2769 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2770 struct flow_match_basic match; 2771 2772 flow_rule_match_basic(rule, &match); 2773 n_proto_key = ntohs(match.key->n_proto); 2774 n_proto_mask = ntohs(match.mask->n_proto); 2775 2776 if (n_proto_key == ETH_P_ALL) { 2777 n_proto_key = 0; 2778 n_proto_mask = 0; 2779 } 2780 n_proto = n_proto_key & n_proto_mask; 2781 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2782 return -EINVAL; 2783 if (n_proto == ETH_P_IPV6) { 2784 /* specify flow type as TCP IPv6 */ 2785 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2786 } 2787 2788 if (match.key->ip_proto != IPPROTO_TCP) { 2789 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2790 return -EINVAL; 2791 } 2792 } 2793 2794 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2795 struct flow_match_eth_addrs match; 2796 2797 flow_rule_match_eth_addrs(rule, &match); 2798 2799 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2800 if (!is_zero_ether_addr(match.mask->dst)) { 2801 if (is_broadcast_ether_addr(match.mask->dst)) { 2802 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2803 } else { 2804 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2805 match.mask->dst); 2806 return IAVF_ERR_CONFIG; 2807 } 2808 } 2809 2810 if (!is_zero_ether_addr(match.mask->src)) { 2811 if (is_broadcast_ether_addr(match.mask->src)) { 2812 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2813 } else { 2814 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2815 match.mask->src); 2816 return IAVF_ERR_CONFIG; 2817 } 2818 } 2819 2820 if (!is_zero_ether_addr(match.key->dst)) 2821 if (is_valid_ether_addr(match.key->dst) || 2822 is_multicast_ether_addr(match.key->dst)) { 2823 /* set the mask if a valid dst_mac address */ 2824 for (i = 0; i < ETH_ALEN; i++) 2825 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2826 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2827 match.key->dst); 2828 } 2829 2830 if (!is_zero_ether_addr(match.key->src)) 2831 if (is_valid_ether_addr(match.key->src) || 2832 is_multicast_ether_addr(match.key->src)) { 2833 /* set the mask if a valid dst_mac address */ 2834 for (i = 0; i < ETH_ALEN; i++) 2835 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2836 ether_addr_copy(vf->data.tcp_spec.src_mac, 2837 match.key->src); 2838 } 2839 } 2840 2841 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2842 struct flow_match_vlan match; 2843 2844 flow_rule_match_vlan(rule, &match); 2845 if (match.mask->vlan_id) { 2846 if (match.mask->vlan_id == VLAN_VID_MASK) { 2847 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2848 } else { 2849 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2850 match.mask->vlan_id); 2851 return IAVF_ERR_CONFIG; 2852 } 2853 } 2854 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2855 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2856 } 2857 2858 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2859 struct flow_match_control match; 2860 2861 flow_rule_match_control(rule, &match); 2862 addr_type = match.key->addr_type; 2863 } 2864 2865 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2866 struct flow_match_ipv4_addrs match; 2867 2868 flow_rule_match_ipv4_addrs(rule, &match); 2869 if (match.mask->dst) { 2870 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2871 field_flags |= IAVF_CLOUD_FIELD_IIP; 2872 } else { 2873 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2874 be32_to_cpu(match.mask->dst)); 2875 return IAVF_ERR_CONFIG; 2876 } 2877 } 2878 2879 if (match.mask->src) { 2880 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2881 field_flags |= IAVF_CLOUD_FIELD_IIP; 2882 } else { 2883 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2884 be32_to_cpu(match.mask->dst)); 2885 return IAVF_ERR_CONFIG; 2886 } 2887 } 2888 2889 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2890 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2891 return IAVF_ERR_CONFIG; 2892 } 2893 if (match.key->dst) { 2894 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2895 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2896 } 2897 if (match.key->src) { 2898 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2899 vf->data.tcp_spec.src_ip[0] = match.key->src; 2900 } 2901 } 2902 2903 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2904 struct flow_match_ipv6_addrs match; 2905 2906 flow_rule_match_ipv6_addrs(rule, &match); 2907 2908 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2909 if (ipv6_addr_any(&match.mask->dst)) { 2910 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2911 IPV6_ADDR_ANY); 2912 return IAVF_ERR_CONFIG; 2913 } 2914 2915 /* src and dest IPv6 address should not be LOOPBACK 2916 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2917 */ 2918 if (ipv6_addr_loopback(&match.key->dst) || 2919 ipv6_addr_loopback(&match.key->src)) { 2920 dev_err(&adapter->pdev->dev, 2921 "ipv6 addr should not be loopback\n"); 2922 return IAVF_ERR_CONFIG; 2923 } 2924 if (!ipv6_addr_any(&match.mask->dst) || 2925 !ipv6_addr_any(&match.mask->src)) 2926 field_flags |= IAVF_CLOUD_FIELD_IIP; 2927 2928 for (i = 0; i < 4; i++) 2929 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 2930 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 2931 sizeof(vf->data.tcp_spec.dst_ip)); 2932 for (i = 0; i < 4; i++) 2933 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 2934 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 2935 sizeof(vf->data.tcp_spec.src_ip)); 2936 } 2937 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2938 struct flow_match_ports match; 2939 2940 flow_rule_match_ports(rule, &match); 2941 if (match.mask->src) { 2942 if (match.mask->src == cpu_to_be16(0xffff)) { 2943 field_flags |= IAVF_CLOUD_FIELD_IIP; 2944 } else { 2945 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 2946 be16_to_cpu(match.mask->src)); 2947 return IAVF_ERR_CONFIG; 2948 } 2949 } 2950 2951 if (match.mask->dst) { 2952 if (match.mask->dst == cpu_to_be16(0xffff)) { 2953 field_flags |= IAVF_CLOUD_FIELD_IIP; 2954 } else { 2955 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 2956 be16_to_cpu(match.mask->dst)); 2957 return IAVF_ERR_CONFIG; 2958 } 2959 } 2960 if (match.key->dst) { 2961 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 2962 vf->data.tcp_spec.dst_port = match.key->dst; 2963 } 2964 2965 if (match.key->src) { 2966 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 2967 vf->data.tcp_spec.src_port = match.key->src; 2968 } 2969 } 2970 vf->field_flags = field_flags; 2971 2972 return 0; 2973 } 2974 2975 /** 2976 * iavf_handle_tclass - Forward to a traffic class on the device 2977 * @adapter: board private structure 2978 * @tc: traffic class index on the device 2979 * @filter: pointer to cloud filter structure 2980 */ 2981 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 2982 struct iavf_cloud_filter *filter) 2983 { 2984 if (tc == 0) 2985 return 0; 2986 if (tc < adapter->num_tc) { 2987 if (!filter->f.data.tcp_spec.dst_port) { 2988 dev_err(&adapter->pdev->dev, 2989 "Specify destination port to redirect to traffic class other than TC0\n"); 2990 return -EINVAL; 2991 } 2992 } 2993 /* redirect to a traffic class on the same device */ 2994 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 2995 filter->f.action_meta = tc; 2996 return 0; 2997 } 2998 2999 /** 3000 * iavf_configure_clsflower - Add tc flower filters 3001 * @adapter: board private structure 3002 * @cls_flower: Pointer to struct flow_cls_offload 3003 */ 3004 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3005 struct flow_cls_offload *cls_flower) 3006 { 3007 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3008 struct iavf_cloud_filter *filter = NULL; 3009 int err = -EINVAL, count = 50; 3010 3011 if (tc < 0) { 3012 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3013 return -EINVAL; 3014 } 3015 3016 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3017 if (!filter) 3018 return -ENOMEM; 3019 3020 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3021 &adapter->crit_section)) { 3022 if (--count == 0) 3023 goto err; 3024 udelay(1); 3025 } 3026 3027 filter->cookie = cls_flower->cookie; 3028 3029 /* set the mask to all zeroes to begin with */ 3030 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3031 /* start out with flow type and eth type IPv4 to begin with */ 3032 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3033 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3034 if (err < 0) 3035 goto err; 3036 3037 err = iavf_handle_tclass(adapter, tc, filter); 3038 if (err < 0) 3039 goto err; 3040 3041 /* add filter to the list */ 3042 spin_lock_bh(&adapter->cloud_filter_list_lock); 3043 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3044 adapter->num_cloud_filters++; 3045 filter->add = true; 3046 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3047 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3048 err: 3049 if (err) 3050 kfree(filter); 3051 3052 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3053 return err; 3054 } 3055 3056 /* iavf_find_cf - Find the cloud filter in the list 3057 * @adapter: Board private structure 3058 * @cookie: filter specific cookie 3059 * 3060 * Returns ptr to the filter object or NULL. Must be called while holding the 3061 * cloud_filter_list_lock. 3062 */ 3063 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3064 unsigned long *cookie) 3065 { 3066 struct iavf_cloud_filter *filter = NULL; 3067 3068 if (!cookie) 3069 return NULL; 3070 3071 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3072 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3073 return filter; 3074 } 3075 return NULL; 3076 } 3077 3078 /** 3079 * iavf_delete_clsflower - Remove tc flower filters 3080 * @adapter: board private structure 3081 * @cls_flower: Pointer to struct flow_cls_offload 3082 */ 3083 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3084 struct flow_cls_offload *cls_flower) 3085 { 3086 struct iavf_cloud_filter *filter = NULL; 3087 int err = 0; 3088 3089 spin_lock_bh(&adapter->cloud_filter_list_lock); 3090 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3091 if (filter) { 3092 filter->del = true; 3093 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3094 } else { 3095 err = -EINVAL; 3096 } 3097 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3098 3099 return err; 3100 } 3101 3102 /** 3103 * iavf_setup_tc_cls_flower - flower classifier offloads 3104 * @adapter: board private structure 3105 * @cls_flower: pointer to flow_cls_offload struct with flow info 3106 */ 3107 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3108 struct flow_cls_offload *cls_flower) 3109 { 3110 switch (cls_flower->command) { 3111 case FLOW_CLS_REPLACE: 3112 return iavf_configure_clsflower(adapter, cls_flower); 3113 case FLOW_CLS_DESTROY: 3114 return iavf_delete_clsflower(adapter, cls_flower); 3115 case FLOW_CLS_STATS: 3116 return -EOPNOTSUPP; 3117 default: 3118 return -EOPNOTSUPP; 3119 } 3120 } 3121 3122 /** 3123 * iavf_setup_tc_block_cb - block callback for tc 3124 * @type: type of offload 3125 * @type_data: offload data 3126 * @cb_priv: 3127 * 3128 * This function is the block callback for traffic classes 3129 **/ 3130 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3131 void *cb_priv) 3132 { 3133 struct iavf_adapter *adapter = cb_priv; 3134 3135 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3136 return -EOPNOTSUPP; 3137 3138 switch (type) { 3139 case TC_SETUP_CLSFLOWER: 3140 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3141 default: 3142 return -EOPNOTSUPP; 3143 } 3144 } 3145 3146 static LIST_HEAD(iavf_block_cb_list); 3147 3148 /** 3149 * iavf_setup_tc - configure multiple traffic classes 3150 * @netdev: network interface device structure 3151 * @type: type of offload 3152 * @type_data: tc offload data 3153 * 3154 * This function is the callback to ndo_setup_tc in the 3155 * netdev_ops. 3156 * 3157 * Returns 0 on success 3158 **/ 3159 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3160 void *type_data) 3161 { 3162 struct iavf_adapter *adapter = netdev_priv(netdev); 3163 3164 switch (type) { 3165 case TC_SETUP_QDISC_MQPRIO: 3166 return __iavf_setup_tc(netdev, type_data); 3167 case TC_SETUP_BLOCK: 3168 return flow_block_cb_setup_simple(type_data, 3169 &iavf_block_cb_list, 3170 iavf_setup_tc_block_cb, 3171 adapter, adapter, true); 3172 default: 3173 return -EOPNOTSUPP; 3174 } 3175 } 3176 3177 /** 3178 * iavf_open - Called when a network interface is made active 3179 * @netdev: network interface device structure 3180 * 3181 * Returns 0 on success, negative value on failure 3182 * 3183 * The open entry point is called when a network interface is made 3184 * active by the system (IFF_UP). At this point all resources needed 3185 * for transmit and receive operations are allocated, the interrupt 3186 * handler is registered with the OS, the watchdog is started, 3187 * and the stack is notified that the interface is ready. 3188 **/ 3189 static int iavf_open(struct net_device *netdev) 3190 { 3191 struct iavf_adapter *adapter = netdev_priv(netdev); 3192 int err; 3193 3194 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3195 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3196 return -EIO; 3197 } 3198 3199 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3200 &adapter->crit_section)) 3201 usleep_range(500, 1000); 3202 3203 if (adapter->state != __IAVF_DOWN) { 3204 err = -EBUSY; 3205 goto err_unlock; 3206 } 3207 3208 /* allocate transmit descriptors */ 3209 err = iavf_setup_all_tx_resources(adapter); 3210 if (err) 3211 goto err_setup_tx; 3212 3213 /* allocate receive descriptors */ 3214 err = iavf_setup_all_rx_resources(adapter); 3215 if (err) 3216 goto err_setup_rx; 3217 3218 /* clear any pending interrupts, may auto mask */ 3219 err = iavf_request_traffic_irqs(adapter, netdev->name); 3220 if (err) 3221 goto err_req_irq; 3222 3223 spin_lock_bh(&adapter->mac_vlan_list_lock); 3224 3225 iavf_add_filter(adapter, adapter->hw.mac.addr); 3226 3227 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3228 3229 iavf_configure(adapter); 3230 3231 iavf_up_complete(adapter); 3232 3233 iavf_irq_enable(adapter, true); 3234 3235 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3236 3237 return 0; 3238 3239 err_req_irq: 3240 iavf_down(adapter); 3241 iavf_free_traffic_irqs(adapter); 3242 err_setup_rx: 3243 iavf_free_all_rx_resources(adapter); 3244 err_setup_tx: 3245 iavf_free_all_tx_resources(adapter); 3246 err_unlock: 3247 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3248 3249 return err; 3250 } 3251 3252 /** 3253 * iavf_close - Disables a network interface 3254 * @netdev: network interface device structure 3255 * 3256 * Returns 0, this is not allowed to fail 3257 * 3258 * The close entry point is called when an interface is de-activated 3259 * by the OS. The hardware is still under the drivers control, but 3260 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3261 * are freed, along with all transmit and receive resources. 3262 **/ 3263 static int iavf_close(struct net_device *netdev) 3264 { 3265 struct iavf_adapter *adapter = netdev_priv(netdev); 3266 int status; 3267 3268 if (adapter->state <= __IAVF_DOWN_PENDING) 3269 return 0; 3270 3271 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3272 &adapter->crit_section)) 3273 usleep_range(500, 1000); 3274 3275 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3276 if (CLIENT_ENABLED(adapter)) 3277 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3278 3279 iavf_down(adapter); 3280 adapter->state = __IAVF_DOWN_PENDING; 3281 iavf_free_traffic_irqs(adapter); 3282 3283 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3284 3285 /* We explicitly don't free resources here because the hardware is 3286 * still active and can DMA into memory. Resources are cleared in 3287 * iavf_virtchnl_completion() after we get confirmation from the PF 3288 * driver that the rings have been stopped. 3289 * 3290 * Also, we wait for state to transition to __IAVF_DOWN before 3291 * returning. State change occurs in iavf_virtchnl_completion() after 3292 * VF resources are released (which occurs after PF driver processes and 3293 * responds to admin queue commands). 3294 */ 3295 3296 status = wait_event_timeout(adapter->down_waitqueue, 3297 adapter->state == __IAVF_DOWN, 3298 msecs_to_jiffies(500)); 3299 if (!status) 3300 netdev_warn(netdev, "Device resources not yet released\n"); 3301 return 0; 3302 } 3303 3304 /** 3305 * iavf_change_mtu - Change the Maximum Transfer Unit 3306 * @netdev: network interface device structure 3307 * @new_mtu: new value for maximum frame size 3308 * 3309 * Returns 0 on success, negative on failure 3310 **/ 3311 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3312 { 3313 struct iavf_adapter *adapter = netdev_priv(netdev); 3314 3315 netdev->mtu = new_mtu; 3316 if (CLIENT_ENABLED(adapter)) { 3317 iavf_notify_client_l2_params(&adapter->vsi); 3318 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3319 } 3320 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3321 queue_work(iavf_wq, &adapter->reset_task); 3322 3323 return 0; 3324 } 3325 3326 /** 3327 * iavf_set_features - set the netdev feature flags 3328 * @netdev: ptr to the netdev being adjusted 3329 * @features: the feature set that the stack is suggesting 3330 * Note: expects to be called while under rtnl_lock() 3331 **/ 3332 static int iavf_set_features(struct net_device *netdev, 3333 netdev_features_t features) 3334 { 3335 struct iavf_adapter *adapter = netdev_priv(netdev); 3336 3337 /* Don't allow changing VLAN_RX flag when adapter is not capable 3338 * of VLAN offload 3339 */ 3340 if (!VLAN_ALLOWED(adapter)) { 3341 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3342 return -EINVAL; 3343 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3344 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3345 adapter->aq_required |= 3346 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3347 else 3348 adapter->aq_required |= 3349 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3350 } 3351 3352 return 0; 3353 } 3354 3355 /** 3356 * iavf_features_check - Validate encapsulated packet conforms to limits 3357 * @skb: skb buff 3358 * @dev: This physical port's netdev 3359 * @features: Offload features that the stack believes apply 3360 **/ 3361 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3362 struct net_device *dev, 3363 netdev_features_t features) 3364 { 3365 size_t len; 3366 3367 /* No point in doing any of this if neither checksum nor GSO are 3368 * being requested for this frame. We can rule out both by just 3369 * checking for CHECKSUM_PARTIAL 3370 */ 3371 if (skb->ip_summed != CHECKSUM_PARTIAL) 3372 return features; 3373 3374 /* We cannot support GSO if the MSS is going to be less than 3375 * 64 bytes. If it is then we need to drop support for GSO. 3376 */ 3377 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3378 features &= ~NETIF_F_GSO_MASK; 3379 3380 /* MACLEN can support at most 63 words */ 3381 len = skb_network_header(skb) - skb->data; 3382 if (len & ~(63 * 2)) 3383 goto out_err; 3384 3385 /* IPLEN and EIPLEN can support at most 127 dwords */ 3386 len = skb_transport_header(skb) - skb_network_header(skb); 3387 if (len & ~(127 * 4)) 3388 goto out_err; 3389 3390 if (skb->encapsulation) { 3391 /* L4TUNLEN can support 127 words */ 3392 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3393 if (len & ~(127 * 2)) 3394 goto out_err; 3395 3396 /* IPLEN can support at most 127 dwords */ 3397 len = skb_inner_transport_header(skb) - 3398 skb_inner_network_header(skb); 3399 if (len & ~(127 * 4)) 3400 goto out_err; 3401 } 3402 3403 /* No need to validate L4LEN as TCP is the only protocol with a 3404 * a flexible value and we support all possible values supported 3405 * by TCP, which is at most 15 dwords 3406 */ 3407 3408 return features; 3409 out_err: 3410 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3411 } 3412 3413 /** 3414 * iavf_fix_features - fix up the netdev feature bits 3415 * @netdev: our net device 3416 * @features: desired feature bits 3417 * 3418 * Returns fixed-up features bits 3419 **/ 3420 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3421 netdev_features_t features) 3422 { 3423 struct iavf_adapter *adapter = netdev_priv(netdev); 3424 3425 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3426 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3427 NETIF_F_HW_VLAN_CTAG_RX | 3428 NETIF_F_HW_VLAN_CTAG_FILTER); 3429 3430 return features; 3431 } 3432 3433 static const struct net_device_ops iavf_netdev_ops = { 3434 .ndo_open = iavf_open, 3435 .ndo_stop = iavf_close, 3436 .ndo_start_xmit = iavf_xmit_frame, 3437 .ndo_set_rx_mode = iavf_set_rx_mode, 3438 .ndo_validate_addr = eth_validate_addr, 3439 .ndo_set_mac_address = iavf_set_mac, 3440 .ndo_change_mtu = iavf_change_mtu, 3441 .ndo_tx_timeout = iavf_tx_timeout, 3442 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3443 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3444 .ndo_features_check = iavf_features_check, 3445 .ndo_fix_features = iavf_fix_features, 3446 .ndo_set_features = iavf_set_features, 3447 .ndo_setup_tc = iavf_setup_tc, 3448 }; 3449 3450 /** 3451 * iavf_check_reset_complete - check that VF reset is complete 3452 * @hw: pointer to hw struct 3453 * 3454 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3455 **/ 3456 static int iavf_check_reset_complete(struct iavf_hw *hw) 3457 { 3458 u32 rstat; 3459 int i; 3460 3461 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3462 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3463 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3464 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3465 (rstat == VIRTCHNL_VFR_COMPLETED)) 3466 return 0; 3467 usleep_range(10, 20); 3468 } 3469 return -EBUSY; 3470 } 3471 3472 /** 3473 * iavf_process_config - Process the config information we got from the PF 3474 * @adapter: board private structure 3475 * 3476 * Verify that we have a valid config struct, and set up our netdev features 3477 * and our VSI struct. 3478 **/ 3479 int iavf_process_config(struct iavf_adapter *adapter) 3480 { 3481 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3482 int i, num_req_queues = adapter->num_req_queues; 3483 struct net_device *netdev = adapter->netdev; 3484 struct iavf_vsi *vsi = &adapter->vsi; 3485 netdev_features_t hw_enc_features; 3486 netdev_features_t hw_features; 3487 3488 /* got VF config message back from PF, now we can parse it */ 3489 for (i = 0; i < vfres->num_vsis; i++) { 3490 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3491 adapter->vsi_res = &vfres->vsi_res[i]; 3492 } 3493 if (!adapter->vsi_res) { 3494 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3495 return -ENODEV; 3496 } 3497 3498 if (num_req_queues && 3499 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3500 /* Problem. The PF gave us fewer queues than what we had 3501 * negotiated in our request. Need a reset to see if we can't 3502 * get back to a working state. 3503 */ 3504 dev_err(&adapter->pdev->dev, 3505 "Requested %d queues, but PF only gave us %d.\n", 3506 num_req_queues, 3507 adapter->vsi_res->num_queue_pairs); 3508 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3509 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3510 iavf_schedule_reset(adapter); 3511 return -ENODEV; 3512 } 3513 adapter->num_req_queues = 0; 3514 3515 hw_enc_features = NETIF_F_SG | 3516 NETIF_F_IP_CSUM | 3517 NETIF_F_IPV6_CSUM | 3518 NETIF_F_HIGHDMA | 3519 NETIF_F_SOFT_FEATURES | 3520 NETIF_F_TSO | 3521 NETIF_F_TSO_ECN | 3522 NETIF_F_TSO6 | 3523 NETIF_F_SCTP_CRC | 3524 NETIF_F_RXHASH | 3525 NETIF_F_RXCSUM | 3526 0; 3527 3528 /* advertise to stack only if offloads for encapsulated packets is 3529 * supported 3530 */ 3531 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3532 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3533 NETIF_F_GSO_GRE | 3534 NETIF_F_GSO_GRE_CSUM | 3535 NETIF_F_GSO_IPXIP4 | 3536 NETIF_F_GSO_IPXIP6 | 3537 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3538 NETIF_F_GSO_PARTIAL | 3539 0; 3540 3541 if (!(vfres->vf_cap_flags & 3542 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3543 netdev->gso_partial_features |= 3544 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3545 3546 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3547 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3548 netdev->hw_enc_features |= hw_enc_features; 3549 } 3550 /* record features VLANs can make use of */ 3551 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3552 3553 /* Write features and hw_features separately to avoid polluting 3554 * with, or dropping, features that are set when we registered. 3555 */ 3556 hw_features = hw_enc_features; 3557 3558 /* Enable VLAN features if supported */ 3559 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3560 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3561 NETIF_F_HW_VLAN_CTAG_RX); 3562 /* Enable cloud filter if ADQ is supported */ 3563 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3564 hw_features |= NETIF_F_HW_TC; 3565 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3566 hw_features |= NETIF_F_GSO_UDP_L4; 3567 3568 netdev->hw_features |= hw_features; 3569 3570 netdev->features |= hw_features; 3571 3572 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3573 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3574 3575 netdev->priv_flags |= IFF_UNICAST_FLT; 3576 3577 /* Do not turn on offloads when they are requested to be turned off. 3578 * TSO needs minimum 576 bytes to work correctly. 3579 */ 3580 if (netdev->wanted_features) { 3581 if (!(netdev->wanted_features & NETIF_F_TSO) || 3582 netdev->mtu < 576) 3583 netdev->features &= ~NETIF_F_TSO; 3584 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3585 netdev->mtu < 576) 3586 netdev->features &= ~NETIF_F_TSO6; 3587 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3588 netdev->features &= ~NETIF_F_TSO_ECN; 3589 if (!(netdev->wanted_features & NETIF_F_GRO)) 3590 netdev->features &= ~NETIF_F_GRO; 3591 if (!(netdev->wanted_features & NETIF_F_GSO)) 3592 netdev->features &= ~NETIF_F_GSO; 3593 } 3594 3595 adapter->vsi.id = adapter->vsi_res->vsi_id; 3596 3597 adapter->vsi.back = adapter; 3598 adapter->vsi.base_vector = 1; 3599 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3600 vsi->netdev = adapter->netdev; 3601 vsi->qs_handle = adapter->vsi_res->qset_handle; 3602 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3603 adapter->rss_key_size = vfres->rss_key_size; 3604 adapter->rss_lut_size = vfres->rss_lut_size; 3605 } else { 3606 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3607 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3608 } 3609 3610 return 0; 3611 } 3612 3613 /** 3614 * iavf_init_task - worker thread to perform delayed initialization 3615 * @work: pointer to work_struct containing our data 3616 * 3617 * This task completes the work that was begun in probe. Due to the nature 3618 * of VF-PF communications, we may need to wait tens of milliseconds to get 3619 * responses back from the PF. Rather than busy-wait in probe and bog down the 3620 * whole system, we'll do it in a task so we can sleep. 3621 * This task only runs during driver init. Once we've established 3622 * communications with the PF driver and set up our netdev, the watchdog 3623 * takes over. 3624 **/ 3625 static void iavf_init_task(struct work_struct *work) 3626 { 3627 struct iavf_adapter *adapter = container_of(work, 3628 struct iavf_adapter, 3629 init_task.work); 3630 struct iavf_hw *hw = &adapter->hw; 3631 3632 switch (adapter->state) { 3633 case __IAVF_STARTUP: 3634 if (iavf_startup(adapter) < 0) 3635 goto init_failed; 3636 break; 3637 case __IAVF_INIT_VERSION_CHECK: 3638 if (iavf_init_version_check(adapter) < 0) 3639 goto init_failed; 3640 break; 3641 case __IAVF_INIT_GET_RESOURCES: 3642 if (iavf_init_get_resources(adapter) < 0) 3643 goto init_failed; 3644 return; 3645 default: 3646 goto init_failed; 3647 } 3648 3649 queue_delayed_work(iavf_wq, &adapter->init_task, 3650 msecs_to_jiffies(30)); 3651 return; 3652 init_failed: 3653 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 3654 dev_err(&adapter->pdev->dev, 3655 "Failed to communicate with PF; waiting before retry\n"); 3656 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3657 iavf_shutdown_adminq(hw); 3658 adapter->state = __IAVF_STARTUP; 3659 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); 3660 return; 3661 } 3662 queue_delayed_work(iavf_wq, &adapter->init_task, HZ); 3663 } 3664 3665 /** 3666 * iavf_shutdown - Shutdown the device in preparation for a reboot 3667 * @pdev: pci device structure 3668 **/ 3669 static void iavf_shutdown(struct pci_dev *pdev) 3670 { 3671 struct net_device *netdev = pci_get_drvdata(pdev); 3672 struct iavf_adapter *adapter = netdev_priv(netdev); 3673 3674 netif_device_detach(netdev); 3675 3676 if (netif_running(netdev)) 3677 iavf_close(netdev); 3678 3679 /* Prevent the watchdog from running. */ 3680 adapter->state = __IAVF_REMOVE; 3681 adapter->aq_required = 0; 3682 3683 #ifdef CONFIG_PM 3684 pci_save_state(pdev); 3685 3686 #endif 3687 pci_disable_device(pdev); 3688 } 3689 3690 /** 3691 * iavf_probe - Device Initialization Routine 3692 * @pdev: PCI device information struct 3693 * @ent: entry in iavf_pci_tbl 3694 * 3695 * Returns 0 on success, negative on failure 3696 * 3697 * iavf_probe initializes an adapter identified by a pci_dev structure. 3698 * The OS initialization, configuring of the adapter private structure, 3699 * and a hardware reset occur. 3700 **/ 3701 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3702 { 3703 struct net_device *netdev; 3704 struct iavf_adapter *adapter = NULL; 3705 struct iavf_hw *hw = NULL; 3706 int err; 3707 3708 err = pci_enable_device(pdev); 3709 if (err) 3710 return err; 3711 3712 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3713 if (err) { 3714 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3715 if (err) { 3716 dev_err(&pdev->dev, 3717 "DMA configuration failed: 0x%x\n", err); 3718 goto err_dma; 3719 } 3720 } 3721 3722 err = pci_request_regions(pdev, iavf_driver_name); 3723 if (err) { 3724 dev_err(&pdev->dev, 3725 "pci_request_regions failed 0x%x\n", err); 3726 goto err_pci_reg; 3727 } 3728 3729 pci_enable_pcie_error_reporting(pdev); 3730 3731 pci_set_master(pdev); 3732 3733 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3734 IAVF_MAX_REQ_QUEUES); 3735 if (!netdev) { 3736 err = -ENOMEM; 3737 goto err_alloc_etherdev; 3738 } 3739 3740 SET_NETDEV_DEV(netdev, &pdev->dev); 3741 3742 pci_set_drvdata(pdev, netdev); 3743 adapter = netdev_priv(netdev); 3744 3745 adapter->netdev = netdev; 3746 adapter->pdev = pdev; 3747 3748 hw = &adapter->hw; 3749 hw->back = adapter; 3750 3751 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3752 adapter->state = __IAVF_STARTUP; 3753 3754 /* Call save state here because it relies on the adapter struct. */ 3755 pci_save_state(pdev); 3756 3757 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3758 pci_resource_len(pdev, 0)); 3759 if (!hw->hw_addr) { 3760 err = -EIO; 3761 goto err_ioremap; 3762 } 3763 hw->vendor_id = pdev->vendor; 3764 hw->device_id = pdev->device; 3765 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3766 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3767 hw->subsystem_device_id = pdev->subsystem_device; 3768 hw->bus.device = PCI_SLOT(pdev->devfn); 3769 hw->bus.func = PCI_FUNC(pdev->devfn); 3770 hw->bus.bus_id = pdev->bus->number; 3771 3772 /* set up the locks for the AQ, do this only once in probe 3773 * and destroy them only once in remove 3774 */ 3775 mutex_init(&hw->aq.asq_mutex); 3776 mutex_init(&hw->aq.arq_mutex); 3777 3778 spin_lock_init(&adapter->mac_vlan_list_lock); 3779 spin_lock_init(&adapter->cloud_filter_list_lock); 3780 spin_lock_init(&adapter->fdir_fltr_lock); 3781 spin_lock_init(&adapter->adv_rss_lock); 3782 3783 INIT_LIST_HEAD(&adapter->mac_filter_list); 3784 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3785 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3786 INIT_LIST_HEAD(&adapter->fdir_list_head); 3787 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3788 3789 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3790 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3791 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3792 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3793 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); 3794 queue_delayed_work(iavf_wq, &adapter->init_task, 3795 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3796 3797 /* Setup the wait queue for indicating transition to down status */ 3798 init_waitqueue_head(&adapter->down_waitqueue); 3799 3800 return 0; 3801 3802 err_ioremap: 3803 free_netdev(netdev); 3804 err_alloc_etherdev: 3805 pci_disable_pcie_error_reporting(pdev); 3806 pci_release_regions(pdev); 3807 err_pci_reg: 3808 err_dma: 3809 pci_disable_device(pdev); 3810 return err; 3811 } 3812 3813 /** 3814 * iavf_suspend - Power management suspend routine 3815 * @dev_d: device info pointer 3816 * 3817 * Called when the system (VM) is entering sleep/suspend. 3818 **/ 3819 static int __maybe_unused iavf_suspend(struct device *dev_d) 3820 { 3821 struct net_device *netdev = dev_get_drvdata(dev_d); 3822 struct iavf_adapter *adapter = netdev_priv(netdev); 3823 3824 netif_device_detach(netdev); 3825 3826 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3827 &adapter->crit_section)) 3828 usleep_range(500, 1000); 3829 3830 if (netif_running(netdev)) { 3831 rtnl_lock(); 3832 iavf_down(adapter); 3833 rtnl_unlock(); 3834 } 3835 iavf_free_misc_irq(adapter); 3836 iavf_reset_interrupt_capability(adapter); 3837 3838 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3839 3840 return 0; 3841 } 3842 3843 /** 3844 * iavf_resume - Power management resume routine 3845 * @dev_d: device info pointer 3846 * 3847 * Called when the system (VM) is resumed from sleep/suspend. 3848 **/ 3849 static int __maybe_unused iavf_resume(struct device *dev_d) 3850 { 3851 struct pci_dev *pdev = to_pci_dev(dev_d); 3852 struct net_device *netdev = pci_get_drvdata(pdev); 3853 struct iavf_adapter *adapter = netdev_priv(netdev); 3854 u32 err; 3855 3856 pci_set_master(pdev); 3857 3858 rtnl_lock(); 3859 err = iavf_set_interrupt_capability(adapter); 3860 if (err) { 3861 rtnl_unlock(); 3862 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3863 return err; 3864 } 3865 err = iavf_request_misc_irq(adapter); 3866 rtnl_unlock(); 3867 if (err) { 3868 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3869 return err; 3870 } 3871 3872 queue_work(iavf_wq, &adapter->reset_task); 3873 3874 netif_device_attach(netdev); 3875 3876 return err; 3877 } 3878 3879 /** 3880 * iavf_remove - Device Removal Routine 3881 * @pdev: PCI device information struct 3882 * 3883 * iavf_remove is called by the PCI subsystem to alert the driver 3884 * that it should release a PCI device. The could be caused by a 3885 * Hot-Plug event, or because the driver is going to be removed from 3886 * memory. 3887 **/ 3888 static void iavf_remove(struct pci_dev *pdev) 3889 { 3890 struct net_device *netdev = pci_get_drvdata(pdev); 3891 struct iavf_adapter *adapter = netdev_priv(netdev); 3892 struct iavf_fdir_fltr *fdir, *fdirtmp; 3893 struct iavf_vlan_filter *vlf, *vlftmp; 3894 struct iavf_adv_rss *rss, *rsstmp; 3895 struct iavf_mac_filter *f, *ftmp; 3896 struct iavf_cloud_filter *cf, *cftmp; 3897 struct iavf_hw *hw = &adapter->hw; 3898 int err; 3899 /* Indicate we are in remove and not to run reset_task */ 3900 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); 3901 cancel_delayed_work_sync(&adapter->init_task); 3902 cancel_work_sync(&adapter->reset_task); 3903 cancel_delayed_work_sync(&adapter->client_task); 3904 if (adapter->netdev_registered) { 3905 unregister_netdev(netdev); 3906 adapter->netdev_registered = false; 3907 } 3908 if (CLIENT_ALLOWED(adapter)) { 3909 err = iavf_lan_del_device(adapter); 3910 if (err) 3911 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3912 err); 3913 } 3914 3915 /* Shut down all the garbage mashers on the detention level */ 3916 adapter->state = __IAVF_REMOVE; 3917 adapter->aq_required = 0; 3918 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3919 iavf_request_reset(adapter); 3920 msleep(50); 3921 /* If the FW isn't responding, kick it once, but only once. */ 3922 if (!iavf_asq_done(hw)) { 3923 iavf_request_reset(adapter); 3924 msleep(50); 3925 } 3926 iavf_free_all_tx_resources(adapter); 3927 iavf_free_all_rx_resources(adapter); 3928 iavf_misc_irq_disable(adapter); 3929 iavf_free_misc_irq(adapter); 3930 iavf_reset_interrupt_capability(adapter); 3931 iavf_free_q_vectors(adapter); 3932 3933 cancel_delayed_work_sync(&adapter->watchdog_task); 3934 3935 cancel_work_sync(&adapter->adminq_task); 3936 3937 iavf_free_rss(adapter); 3938 3939 if (hw->aq.asq.count) 3940 iavf_shutdown_adminq(hw); 3941 3942 /* destroy the locks only once, here */ 3943 mutex_destroy(&hw->aq.arq_mutex); 3944 mutex_destroy(&hw->aq.asq_mutex); 3945 3946 iounmap(hw->hw_addr); 3947 pci_release_regions(pdev); 3948 iavf_free_queues(adapter); 3949 kfree(adapter->vf_res); 3950 spin_lock_bh(&adapter->mac_vlan_list_lock); 3951 /* If we got removed before an up/down sequence, we've got a filter 3952 * hanging out there that we need to get rid of. 3953 */ 3954 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3955 list_del(&f->list); 3956 kfree(f); 3957 } 3958 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 3959 list) { 3960 list_del(&vlf->list); 3961 kfree(vlf); 3962 } 3963 3964 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3965 3966 spin_lock_bh(&adapter->cloud_filter_list_lock); 3967 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 3968 list_del(&cf->list); 3969 kfree(cf); 3970 } 3971 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3972 3973 spin_lock_bh(&adapter->fdir_fltr_lock); 3974 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 3975 list_del(&fdir->list); 3976 kfree(fdir); 3977 } 3978 spin_unlock_bh(&adapter->fdir_fltr_lock); 3979 3980 spin_lock_bh(&adapter->adv_rss_lock); 3981 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 3982 list) { 3983 list_del(&rss->list); 3984 kfree(rss); 3985 } 3986 spin_unlock_bh(&adapter->adv_rss_lock); 3987 3988 free_netdev(netdev); 3989 3990 pci_disable_pcie_error_reporting(pdev); 3991 3992 pci_disable_device(pdev); 3993 } 3994 3995 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 3996 3997 static struct pci_driver iavf_driver = { 3998 .name = iavf_driver_name, 3999 .id_table = iavf_pci_tbl, 4000 .probe = iavf_probe, 4001 .remove = iavf_remove, 4002 .driver.pm = &iavf_pm_ops, 4003 .shutdown = iavf_shutdown, 4004 }; 4005 4006 /** 4007 * iavf_init_module - Driver Registration Routine 4008 * 4009 * iavf_init_module is the first routine called when the driver is 4010 * loaded. All it does is register with the PCI subsystem. 4011 **/ 4012 static int __init iavf_init_module(void) 4013 { 4014 int ret; 4015 4016 pr_info("iavf: %s\n", iavf_driver_string); 4017 4018 pr_info("%s\n", iavf_copyright); 4019 4020 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4021 iavf_driver_name); 4022 if (!iavf_wq) { 4023 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4024 return -ENOMEM; 4025 } 4026 ret = pci_register_driver(&iavf_driver); 4027 return ret; 4028 } 4029 4030 module_init(iavf_init_module); 4031 4032 /** 4033 * iavf_exit_module - Driver Exit Cleanup Routine 4034 * 4035 * iavf_exit_module is called just before the driver is removed 4036 * from memory. 4037 **/ 4038 static void __exit iavf_exit_module(void) 4039 { 4040 pci_unregister_driver(&iavf_driver); 4041 destroy_workqueue(iavf_wq); 4042 } 4043 4044 module_exit(iavf_exit_module); 4045 4046 /* iavf_main.c */ 4047