1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static int iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 56 * @hw: pointer to the HW structure 57 * @mem: ptr to mem struct to fill out 58 * @size: size of memory requested 59 * @alignment: what to align the allocation to 60 **/ 61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 62 struct iavf_dma_mem *mem, 63 u64 size, u32 alignment) 64 { 65 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 66 67 if (!mem) 68 return IAVF_ERR_PARAM; 69 70 mem->size = ALIGN(size, alignment); 71 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 72 (dma_addr_t *)&mem->pa, GFP_KERNEL); 73 if (mem->va) 74 return 0; 75 else 76 return IAVF_ERR_NO_MEMORY; 77 } 78 79 /** 80 * iavf_free_dma_mem_d - OS specific memory free for shared code 81 * @hw: pointer to the HW structure 82 * @mem: ptr to mem struct to free 83 **/ 84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 85 struct iavf_dma_mem *mem) 86 { 87 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 88 89 if (!mem || !mem->va) 90 return IAVF_ERR_PARAM; 91 dma_free_coherent(&adapter->pdev->dev, mem->size, 92 mem->va, (dma_addr_t)mem->pa); 93 return 0; 94 } 95 96 /** 97 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 98 * @hw: pointer to the HW structure 99 * @mem: ptr to mem struct to fill out 100 * @size: size of memory requested 101 **/ 102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 103 struct iavf_virt_mem *mem, u32 size) 104 { 105 if (!mem) 106 return IAVF_ERR_PARAM; 107 108 mem->size = size; 109 mem->va = kzalloc(size, GFP_KERNEL); 110 111 if (mem->va) 112 return 0; 113 else 114 return IAVF_ERR_NO_MEMORY; 115 } 116 117 /** 118 * iavf_free_virt_mem_d - OS specific memory free for shared code 119 * @hw: pointer to the HW structure 120 * @mem: ptr to mem struct to free 121 **/ 122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 123 struct iavf_virt_mem *mem) 124 { 125 if (!mem) 126 return IAVF_ERR_PARAM; 127 128 /* it's ok to kfree a NULL pointer */ 129 kfree(mem->va); 130 131 return 0; 132 } 133 134 /** 135 * iavf_schedule_reset - Set the flags and schedule a reset event 136 * @adapter: board private structure 137 **/ 138 void iavf_schedule_reset(struct iavf_adapter *adapter) 139 { 140 if (!(adapter->flags & 141 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 142 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 143 queue_work(iavf_wq, &adapter->reset_task); 144 } 145 } 146 147 /** 148 * iavf_tx_timeout - Respond to a Tx Hang 149 * @netdev: network interface device structure 150 * @txqueue: queue number that is timing out 151 **/ 152 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 153 { 154 struct iavf_adapter *adapter = netdev_priv(netdev); 155 156 adapter->tx_timeout_count++; 157 iavf_schedule_reset(adapter); 158 } 159 160 /** 161 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 162 * @adapter: board private structure 163 **/ 164 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 165 { 166 struct iavf_hw *hw = &adapter->hw; 167 168 if (!adapter->msix_entries) 169 return; 170 171 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 172 173 iavf_flush(hw); 174 175 synchronize_irq(adapter->msix_entries[0].vector); 176 } 177 178 /** 179 * iavf_misc_irq_enable - Enable default interrupt generation settings 180 * @adapter: board private structure 181 **/ 182 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 183 { 184 struct iavf_hw *hw = &adapter->hw; 185 186 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 187 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 188 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 189 190 iavf_flush(hw); 191 } 192 193 /** 194 * iavf_irq_disable - Mask off interrupt generation on the NIC 195 * @adapter: board private structure 196 **/ 197 static void iavf_irq_disable(struct iavf_adapter *adapter) 198 { 199 int i; 200 struct iavf_hw *hw = &adapter->hw; 201 202 if (!adapter->msix_entries) 203 return; 204 205 for (i = 1; i < adapter->num_msix_vectors; i++) { 206 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 207 synchronize_irq(adapter->msix_entries[i].vector); 208 } 209 iavf_flush(hw); 210 } 211 212 /** 213 * iavf_irq_enable_queues - Enable interrupt for specified queues 214 * @adapter: board private structure 215 * @mask: bitmap of queues to enable 216 **/ 217 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 218 { 219 struct iavf_hw *hw = &adapter->hw; 220 int i; 221 222 for (i = 1; i < adapter->num_msix_vectors; i++) { 223 if (mask & BIT(i - 1)) { 224 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 225 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 226 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 227 } 228 } 229 } 230 231 /** 232 * iavf_irq_enable - Enable default interrupt generation settings 233 * @adapter: board private structure 234 * @flush: boolean value whether to run rd32() 235 **/ 236 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 237 { 238 struct iavf_hw *hw = &adapter->hw; 239 240 iavf_misc_irq_enable(adapter); 241 iavf_irq_enable_queues(adapter, ~0); 242 243 if (flush) 244 iavf_flush(hw); 245 } 246 247 /** 248 * iavf_msix_aq - Interrupt handler for vector 0 249 * @irq: interrupt number 250 * @data: pointer to netdev 251 **/ 252 static irqreturn_t iavf_msix_aq(int irq, void *data) 253 { 254 struct net_device *netdev = data; 255 struct iavf_adapter *adapter = netdev_priv(netdev); 256 struct iavf_hw *hw = &adapter->hw; 257 258 /* handle non-queue interrupts, these reads clear the registers */ 259 rd32(hw, IAVF_VFINT_ICR01); 260 rd32(hw, IAVF_VFINT_ICR0_ENA1); 261 262 /* schedule work on the private workqueue */ 263 queue_work(iavf_wq, &adapter->adminq_task); 264 265 return IRQ_HANDLED; 266 } 267 268 /** 269 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 270 * @irq: interrupt number 271 * @data: pointer to a q_vector 272 **/ 273 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 274 { 275 struct iavf_q_vector *q_vector = data; 276 277 if (!q_vector->tx.ring && !q_vector->rx.ring) 278 return IRQ_HANDLED; 279 280 napi_schedule_irqoff(&q_vector->napi); 281 282 return IRQ_HANDLED; 283 } 284 285 /** 286 * iavf_map_vector_to_rxq - associate irqs with rx queues 287 * @adapter: board private structure 288 * @v_idx: interrupt number 289 * @r_idx: queue number 290 **/ 291 static void 292 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 293 { 294 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 295 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 296 struct iavf_hw *hw = &adapter->hw; 297 298 rx_ring->q_vector = q_vector; 299 rx_ring->next = q_vector->rx.ring; 300 rx_ring->vsi = &adapter->vsi; 301 q_vector->rx.ring = rx_ring; 302 q_vector->rx.count++; 303 q_vector->rx.next_update = jiffies + 1; 304 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 305 q_vector->ring_mask |= BIT(r_idx); 306 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 307 q_vector->rx.current_itr >> 1); 308 q_vector->rx.current_itr = q_vector->rx.target_itr; 309 } 310 311 /** 312 * iavf_map_vector_to_txq - associate irqs with tx queues 313 * @adapter: board private structure 314 * @v_idx: interrupt number 315 * @t_idx: queue number 316 **/ 317 static void 318 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 319 { 320 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 321 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 322 struct iavf_hw *hw = &adapter->hw; 323 324 tx_ring->q_vector = q_vector; 325 tx_ring->next = q_vector->tx.ring; 326 tx_ring->vsi = &adapter->vsi; 327 q_vector->tx.ring = tx_ring; 328 q_vector->tx.count++; 329 q_vector->tx.next_update = jiffies + 1; 330 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 331 q_vector->num_ringpairs++; 332 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 333 q_vector->tx.target_itr >> 1); 334 q_vector->tx.current_itr = q_vector->tx.target_itr; 335 } 336 337 /** 338 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 339 * @adapter: board private structure to initialize 340 * 341 * This function maps descriptor rings to the queue-specific vectors 342 * we were allotted through the MSI-X enabling code. Ideally, we'd have 343 * one vector per ring/queue, but on a constrained vector budget, we 344 * group the rings as "efficiently" as possible. You would add new 345 * mapping configurations in here. 346 **/ 347 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 348 { 349 int rings_remaining = adapter->num_active_queues; 350 int ridx = 0, vidx = 0; 351 int q_vectors; 352 353 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 354 355 for (; ridx < rings_remaining; ridx++) { 356 iavf_map_vector_to_rxq(adapter, vidx, ridx); 357 iavf_map_vector_to_txq(adapter, vidx, ridx); 358 359 /* In the case where we have more queues than vectors, continue 360 * round-robin on vectors until all queues are mapped. 361 */ 362 if (++vidx >= q_vectors) 363 vidx = 0; 364 } 365 366 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 367 } 368 369 /** 370 * iavf_irq_affinity_notify - Callback for affinity changes 371 * @notify: context as to what irq was changed 372 * @mask: the new affinity mask 373 * 374 * This is a callback function used by the irq_set_affinity_notifier function 375 * so that we may register to receive changes to the irq affinity masks. 376 **/ 377 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 378 const cpumask_t *mask) 379 { 380 struct iavf_q_vector *q_vector = 381 container_of(notify, struct iavf_q_vector, affinity_notify); 382 383 cpumask_copy(&q_vector->affinity_mask, mask); 384 } 385 386 /** 387 * iavf_irq_affinity_release - Callback for affinity notifier release 388 * @ref: internal core kernel usage 389 * 390 * This is a callback function used by the irq_set_affinity_notifier function 391 * to inform the current notification subscriber that they will no longer 392 * receive notifications. 393 **/ 394 static void iavf_irq_affinity_release(struct kref *ref) {} 395 396 /** 397 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 398 * @adapter: board private structure 399 * @basename: device basename 400 * 401 * Allocates MSI-X vectors for tx and rx handling, and requests 402 * interrupts from the kernel. 403 **/ 404 static int 405 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 406 { 407 unsigned int vector, q_vectors; 408 unsigned int rx_int_idx = 0, tx_int_idx = 0; 409 int irq_num, err; 410 int cpu; 411 412 iavf_irq_disable(adapter); 413 /* Decrement for Other and TCP Timer vectors */ 414 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 415 416 for (vector = 0; vector < q_vectors; vector++) { 417 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 418 419 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 420 421 if (q_vector->tx.ring && q_vector->rx.ring) { 422 snprintf(q_vector->name, sizeof(q_vector->name), 423 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 424 tx_int_idx++; 425 } else if (q_vector->rx.ring) { 426 snprintf(q_vector->name, sizeof(q_vector->name), 427 "iavf-%s-rx-%d", basename, rx_int_idx++); 428 } else if (q_vector->tx.ring) { 429 snprintf(q_vector->name, sizeof(q_vector->name), 430 "iavf-%s-tx-%d", basename, tx_int_idx++); 431 } else { 432 /* skip this unused q_vector */ 433 continue; 434 } 435 err = request_irq(irq_num, 436 iavf_msix_clean_rings, 437 0, 438 q_vector->name, 439 q_vector); 440 if (err) { 441 dev_info(&adapter->pdev->dev, 442 "Request_irq failed, error: %d\n", err); 443 goto free_queue_irqs; 444 } 445 /* register for affinity change notifications */ 446 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 447 q_vector->affinity_notify.release = 448 iavf_irq_affinity_release; 449 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 450 /* Spread the IRQ affinity hints across online CPUs. Note that 451 * get_cpu_mask returns a mask with a permanent lifetime so 452 * it's safe to use as a hint for irq_set_affinity_hint. 453 */ 454 cpu = cpumask_local_spread(q_vector->v_idx, -1); 455 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 456 } 457 458 return 0; 459 460 free_queue_irqs: 461 while (vector) { 462 vector--; 463 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 464 irq_set_affinity_notifier(irq_num, NULL); 465 irq_set_affinity_hint(irq_num, NULL); 466 free_irq(irq_num, &adapter->q_vectors[vector]); 467 } 468 return err; 469 } 470 471 /** 472 * iavf_request_misc_irq - Initialize MSI-X interrupts 473 * @adapter: board private structure 474 * 475 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 476 * vector is only for the admin queue, and stays active even when the netdev 477 * is closed. 478 **/ 479 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 480 { 481 struct net_device *netdev = adapter->netdev; 482 int err; 483 484 snprintf(adapter->misc_vector_name, 485 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 486 dev_name(&adapter->pdev->dev)); 487 err = request_irq(adapter->msix_entries[0].vector, 488 &iavf_msix_aq, 0, 489 adapter->misc_vector_name, netdev); 490 if (err) { 491 dev_err(&adapter->pdev->dev, 492 "request_irq for %s failed: %d\n", 493 adapter->misc_vector_name, err); 494 free_irq(adapter->msix_entries[0].vector, netdev); 495 } 496 return err; 497 } 498 499 /** 500 * iavf_free_traffic_irqs - Free MSI-X interrupts 501 * @adapter: board private structure 502 * 503 * Frees all MSI-X vectors other than 0. 504 **/ 505 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 506 { 507 int vector, irq_num, q_vectors; 508 509 if (!adapter->msix_entries) 510 return; 511 512 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 513 514 for (vector = 0; vector < q_vectors; vector++) { 515 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 516 irq_set_affinity_notifier(irq_num, NULL); 517 irq_set_affinity_hint(irq_num, NULL); 518 free_irq(irq_num, &adapter->q_vectors[vector]); 519 } 520 } 521 522 /** 523 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 524 * @adapter: board private structure 525 * 526 * Frees MSI-X vector 0. 527 **/ 528 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 529 { 530 struct net_device *netdev = adapter->netdev; 531 532 if (!adapter->msix_entries) 533 return; 534 535 free_irq(adapter->msix_entries[0].vector, netdev); 536 } 537 538 /** 539 * iavf_configure_tx - Configure Transmit Unit after Reset 540 * @adapter: board private structure 541 * 542 * Configure the Tx unit of the MAC after a reset. 543 **/ 544 static void iavf_configure_tx(struct iavf_adapter *adapter) 545 { 546 struct iavf_hw *hw = &adapter->hw; 547 int i; 548 549 for (i = 0; i < adapter->num_active_queues; i++) 550 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 551 } 552 553 /** 554 * iavf_configure_rx - Configure Receive Unit after Reset 555 * @adapter: board private structure 556 * 557 * Configure the Rx unit of the MAC after a reset. 558 **/ 559 static void iavf_configure_rx(struct iavf_adapter *adapter) 560 { 561 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 562 struct iavf_hw *hw = &adapter->hw; 563 int i; 564 565 /* Legacy Rx will always default to a 2048 buffer size. */ 566 #if (PAGE_SIZE < 8192) 567 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 568 struct net_device *netdev = adapter->netdev; 569 570 /* For jumbo frames on systems with 4K pages we have to use 571 * an order 1 page, so we might as well increase the size 572 * of our Rx buffer to make better use of the available space 573 */ 574 rx_buf_len = IAVF_RXBUFFER_3072; 575 576 /* We use a 1536 buffer size for configurations with 577 * standard Ethernet mtu. On x86 this gives us enough room 578 * for shared info and 192 bytes of padding. 579 */ 580 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 581 (netdev->mtu <= ETH_DATA_LEN)) 582 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 583 } 584 #endif 585 586 for (i = 0; i < adapter->num_active_queues; i++) { 587 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 588 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 589 590 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 591 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 592 else 593 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 594 } 595 } 596 597 /** 598 * iavf_find_vlan - Search filter list for specific vlan filter 599 * @adapter: board private structure 600 * @vlan: vlan tag 601 * 602 * Returns ptr to the filter object or NULL. Must be called while holding the 603 * mac_vlan_list_lock. 604 **/ 605 static struct 606 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 607 { 608 struct iavf_vlan_filter *f; 609 610 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 611 if (vlan == f->vlan) 612 return f; 613 } 614 return NULL; 615 } 616 617 /** 618 * iavf_add_vlan - Add a vlan filter to the list 619 * @adapter: board private structure 620 * @vlan: VLAN tag 621 * 622 * Returns ptr to the filter object or NULL when no memory available. 623 **/ 624 static struct 625 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 626 { 627 struct iavf_vlan_filter *f = NULL; 628 629 spin_lock_bh(&adapter->mac_vlan_list_lock); 630 631 f = iavf_find_vlan(adapter, vlan); 632 if (!f) { 633 f = kzalloc(sizeof(*f), GFP_ATOMIC); 634 if (!f) 635 goto clearout; 636 637 f->vlan = vlan; 638 639 list_add_tail(&f->list, &adapter->vlan_filter_list); 640 f->add = true; 641 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 642 } 643 644 clearout: 645 spin_unlock_bh(&adapter->mac_vlan_list_lock); 646 return f; 647 } 648 649 /** 650 * iavf_del_vlan - Remove a vlan filter from the list 651 * @adapter: board private structure 652 * @vlan: VLAN tag 653 **/ 654 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 655 { 656 struct iavf_vlan_filter *f; 657 658 spin_lock_bh(&adapter->mac_vlan_list_lock); 659 660 f = iavf_find_vlan(adapter, vlan); 661 if (f) { 662 f->remove = true; 663 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 664 } 665 666 spin_unlock_bh(&adapter->mac_vlan_list_lock); 667 } 668 669 /** 670 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 671 * @netdev: network device struct 672 * @proto: unused protocol data 673 * @vid: VLAN tag 674 **/ 675 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 676 __always_unused __be16 proto, u16 vid) 677 { 678 struct iavf_adapter *adapter = netdev_priv(netdev); 679 680 if (!VLAN_ALLOWED(adapter)) 681 return -EIO; 682 if (iavf_add_vlan(adapter, vid) == NULL) 683 return -ENOMEM; 684 return 0; 685 } 686 687 /** 688 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 689 * @netdev: network device struct 690 * @proto: unused protocol data 691 * @vid: VLAN tag 692 **/ 693 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 694 __always_unused __be16 proto, u16 vid) 695 { 696 struct iavf_adapter *adapter = netdev_priv(netdev); 697 698 if (VLAN_ALLOWED(adapter)) { 699 iavf_del_vlan(adapter, vid); 700 return 0; 701 } 702 return -EIO; 703 } 704 705 /** 706 * iavf_find_filter - Search filter list for specific mac filter 707 * @adapter: board private structure 708 * @macaddr: the MAC address 709 * 710 * Returns ptr to the filter object or NULL. Must be called while holding the 711 * mac_vlan_list_lock. 712 **/ 713 static struct 714 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 715 const u8 *macaddr) 716 { 717 struct iavf_mac_filter *f; 718 719 if (!macaddr) 720 return NULL; 721 722 list_for_each_entry(f, &adapter->mac_filter_list, list) { 723 if (ether_addr_equal(macaddr, f->macaddr)) 724 return f; 725 } 726 return NULL; 727 } 728 729 /** 730 * iavf_add_filter - Add a mac filter to the filter list 731 * @adapter: board private structure 732 * @macaddr: the MAC address 733 * 734 * Returns ptr to the filter object or NULL when no memory available. 735 **/ 736 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 737 const u8 *macaddr) 738 { 739 struct iavf_mac_filter *f; 740 741 if (!macaddr) 742 return NULL; 743 744 f = iavf_find_filter(adapter, macaddr); 745 if (!f) { 746 f = kzalloc(sizeof(*f), GFP_ATOMIC); 747 if (!f) 748 return f; 749 750 ether_addr_copy(f->macaddr, macaddr); 751 752 list_add_tail(&f->list, &adapter->mac_filter_list); 753 f->add = true; 754 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 755 } else { 756 f->remove = false; 757 } 758 759 return f; 760 } 761 762 /** 763 * iavf_set_mac - NDO callback to set port mac address 764 * @netdev: network interface device structure 765 * @p: pointer to an address structure 766 * 767 * Returns 0 on success, negative on failure 768 **/ 769 static int iavf_set_mac(struct net_device *netdev, void *p) 770 { 771 struct iavf_adapter *adapter = netdev_priv(netdev); 772 struct iavf_hw *hw = &adapter->hw; 773 struct iavf_mac_filter *f; 774 struct sockaddr *addr = p; 775 776 if (!is_valid_ether_addr(addr->sa_data)) 777 return -EADDRNOTAVAIL; 778 779 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 780 return 0; 781 782 spin_lock_bh(&adapter->mac_vlan_list_lock); 783 784 f = iavf_find_filter(adapter, hw->mac.addr); 785 if (f) { 786 f->remove = true; 787 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 788 } 789 790 f = iavf_add_filter(adapter, addr->sa_data); 791 792 spin_unlock_bh(&adapter->mac_vlan_list_lock); 793 794 if (f) { 795 ether_addr_copy(hw->mac.addr, addr->sa_data); 796 } 797 798 return (f == NULL) ? -ENOMEM : 0; 799 } 800 801 /** 802 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 803 * @netdev: the netdevice 804 * @addr: address to add 805 * 806 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 807 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 808 */ 809 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 810 { 811 struct iavf_adapter *adapter = netdev_priv(netdev); 812 813 if (iavf_add_filter(adapter, addr)) 814 return 0; 815 else 816 return -ENOMEM; 817 } 818 819 /** 820 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 821 * @netdev: the netdevice 822 * @addr: address to add 823 * 824 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 825 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 826 */ 827 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 828 { 829 struct iavf_adapter *adapter = netdev_priv(netdev); 830 struct iavf_mac_filter *f; 831 832 /* Under some circumstances, we might receive a request to delete 833 * our own device address from our uc list. Because we store the 834 * device address in the VSI's MAC/VLAN filter list, we need to ignore 835 * such requests and not delete our device address from this list. 836 */ 837 if (ether_addr_equal(addr, netdev->dev_addr)) 838 return 0; 839 840 f = iavf_find_filter(adapter, addr); 841 if (f) { 842 f->remove = true; 843 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 844 } 845 return 0; 846 } 847 848 /** 849 * iavf_set_rx_mode - NDO callback to set the netdev filters 850 * @netdev: network interface device structure 851 **/ 852 static void iavf_set_rx_mode(struct net_device *netdev) 853 { 854 struct iavf_adapter *adapter = netdev_priv(netdev); 855 856 spin_lock_bh(&adapter->mac_vlan_list_lock); 857 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 858 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 859 spin_unlock_bh(&adapter->mac_vlan_list_lock); 860 861 if (netdev->flags & IFF_PROMISC && 862 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 863 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 864 else if (!(netdev->flags & IFF_PROMISC) && 865 adapter->flags & IAVF_FLAG_PROMISC_ON) 866 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 867 868 if (netdev->flags & IFF_ALLMULTI && 869 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 870 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 871 else if (!(netdev->flags & IFF_ALLMULTI) && 872 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 873 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 874 } 875 876 /** 877 * iavf_napi_enable_all - enable NAPI on all queue vectors 878 * @adapter: board private structure 879 **/ 880 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 881 { 882 int q_idx; 883 struct iavf_q_vector *q_vector; 884 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 885 886 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 887 struct napi_struct *napi; 888 889 q_vector = &adapter->q_vectors[q_idx]; 890 napi = &q_vector->napi; 891 napi_enable(napi); 892 } 893 } 894 895 /** 896 * iavf_napi_disable_all - disable NAPI on all queue vectors 897 * @adapter: board private structure 898 **/ 899 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 900 { 901 int q_idx; 902 struct iavf_q_vector *q_vector; 903 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 904 905 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 906 q_vector = &adapter->q_vectors[q_idx]; 907 napi_disable(&q_vector->napi); 908 } 909 } 910 911 /** 912 * iavf_configure - set up transmit and receive data structures 913 * @adapter: board private structure 914 **/ 915 static void iavf_configure(struct iavf_adapter *adapter) 916 { 917 struct net_device *netdev = adapter->netdev; 918 int i; 919 920 iavf_set_rx_mode(netdev); 921 922 iavf_configure_tx(adapter); 923 iavf_configure_rx(adapter); 924 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 925 926 for (i = 0; i < adapter->num_active_queues; i++) { 927 struct iavf_ring *ring = &adapter->rx_rings[i]; 928 929 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 930 } 931 } 932 933 /** 934 * iavf_up_complete - Finish the last steps of bringing up a connection 935 * @adapter: board private structure 936 * 937 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 938 **/ 939 static void iavf_up_complete(struct iavf_adapter *adapter) 940 { 941 adapter->state = __IAVF_RUNNING; 942 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 943 944 iavf_napi_enable_all(adapter); 945 946 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 947 if (CLIENT_ENABLED(adapter)) 948 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 949 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 950 } 951 952 /** 953 * iavf_down - Shutdown the connection processing 954 * @adapter: board private structure 955 * 956 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 957 **/ 958 void iavf_down(struct iavf_adapter *adapter) 959 { 960 struct net_device *netdev = adapter->netdev; 961 struct iavf_vlan_filter *vlf; 962 struct iavf_cloud_filter *cf; 963 struct iavf_fdir_fltr *fdir; 964 struct iavf_mac_filter *f; 965 struct iavf_adv_rss *rss; 966 967 if (adapter->state <= __IAVF_DOWN_PENDING) 968 return; 969 970 netif_carrier_off(netdev); 971 netif_tx_disable(netdev); 972 adapter->link_up = false; 973 iavf_napi_disable_all(adapter); 974 iavf_irq_disable(adapter); 975 976 spin_lock_bh(&adapter->mac_vlan_list_lock); 977 978 /* clear the sync flag on all filters */ 979 __dev_uc_unsync(adapter->netdev, NULL); 980 __dev_mc_unsync(adapter->netdev, NULL); 981 982 /* remove all MAC filters */ 983 list_for_each_entry(f, &adapter->mac_filter_list, list) { 984 f->remove = true; 985 } 986 987 /* remove all VLAN filters */ 988 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 989 vlf->remove = true; 990 } 991 992 spin_unlock_bh(&adapter->mac_vlan_list_lock); 993 994 /* remove all cloud filters */ 995 spin_lock_bh(&adapter->cloud_filter_list_lock); 996 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 997 cf->del = true; 998 } 999 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1000 1001 /* remove all Flow Director filters */ 1002 spin_lock_bh(&adapter->fdir_fltr_lock); 1003 list_for_each_entry(fdir, &adapter->fdir_list_head, list) { 1004 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1005 } 1006 spin_unlock_bh(&adapter->fdir_fltr_lock); 1007 1008 /* remove all advance RSS configuration */ 1009 spin_lock_bh(&adapter->adv_rss_lock); 1010 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) 1011 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1012 spin_unlock_bh(&adapter->adv_rss_lock); 1013 1014 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1015 adapter->state != __IAVF_RESETTING) { 1016 /* cancel any current operation */ 1017 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1018 /* Schedule operations to close down the HW. Don't wait 1019 * here for this to complete. The watchdog is still running 1020 * and it will take care of this. 1021 */ 1022 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1023 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1024 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1025 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1026 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1027 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1028 } 1029 1030 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1031 } 1032 1033 /** 1034 * iavf_acquire_msix_vectors - Setup the MSIX capability 1035 * @adapter: board private structure 1036 * @vectors: number of vectors to request 1037 * 1038 * Work with the OS to set up the MSIX vectors needed. 1039 * 1040 * Returns 0 on success, negative on failure 1041 **/ 1042 static int 1043 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1044 { 1045 int err, vector_threshold; 1046 1047 /* We'll want at least 3 (vector_threshold): 1048 * 0) Other (Admin Queue and link, mostly) 1049 * 1) TxQ[0] Cleanup 1050 * 2) RxQ[0] Cleanup 1051 */ 1052 vector_threshold = MIN_MSIX_COUNT; 1053 1054 /* The more we get, the more we will assign to Tx/Rx Cleanup 1055 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1056 * Right now, we simply care about how many we'll get; we'll 1057 * set them up later while requesting irq's. 1058 */ 1059 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1060 vector_threshold, vectors); 1061 if (err < 0) { 1062 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1063 kfree(adapter->msix_entries); 1064 adapter->msix_entries = NULL; 1065 return err; 1066 } 1067 1068 /* Adjust for only the vectors we'll use, which is minimum 1069 * of max_msix_q_vectors + NONQ_VECS, or the number of 1070 * vectors we were allocated. 1071 */ 1072 adapter->num_msix_vectors = err; 1073 return 0; 1074 } 1075 1076 /** 1077 * iavf_free_queues - Free memory for all rings 1078 * @adapter: board private structure to initialize 1079 * 1080 * Free all of the memory associated with queue pairs. 1081 **/ 1082 static void iavf_free_queues(struct iavf_adapter *adapter) 1083 { 1084 if (!adapter->vsi_res) 1085 return; 1086 adapter->num_active_queues = 0; 1087 kfree(adapter->tx_rings); 1088 adapter->tx_rings = NULL; 1089 kfree(adapter->rx_rings); 1090 adapter->rx_rings = NULL; 1091 } 1092 1093 /** 1094 * iavf_alloc_queues - Allocate memory for all rings 1095 * @adapter: board private structure to initialize 1096 * 1097 * We allocate one ring per queue at run-time since we don't know the 1098 * number of queues at compile-time. The polling_netdev array is 1099 * intended for Multiqueue, but should work fine with a single queue. 1100 **/ 1101 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1102 { 1103 int i, num_active_queues; 1104 1105 /* If we're in reset reallocating queues we don't actually know yet for 1106 * certain the PF gave us the number of queues we asked for but we'll 1107 * assume it did. Once basic reset is finished we'll confirm once we 1108 * start negotiating config with PF. 1109 */ 1110 if (adapter->num_req_queues) 1111 num_active_queues = adapter->num_req_queues; 1112 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1113 adapter->num_tc) 1114 num_active_queues = adapter->ch_config.total_qps; 1115 else 1116 num_active_queues = min_t(int, 1117 adapter->vsi_res->num_queue_pairs, 1118 (int)(num_online_cpus())); 1119 1120 1121 adapter->tx_rings = kcalloc(num_active_queues, 1122 sizeof(struct iavf_ring), GFP_KERNEL); 1123 if (!adapter->tx_rings) 1124 goto err_out; 1125 adapter->rx_rings = kcalloc(num_active_queues, 1126 sizeof(struct iavf_ring), GFP_KERNEL); 1127 if (!adapter->rx_rings) 1128 goto err_out; 1129 1130 for (i = 0; i < num_active_queues; i++) { 1131 struct iavf_ring *tx_ring; 1132 struct iavf_ring *rx_ring; 1133 1134 tx_ring = &adapter->tx_rings[i]; 1135 1136 tx_ring->queue_index = i; 1137 tx_ring->netdev = adapter->netdev; 1138 tx_ring->dev = &adapter->pdev->dev; 1139 tx_ring->count = adapter->tx_desc_count; 1140 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1141 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1142 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1143 1144 rx_ring = &adapter->rx_rings[i]; 1145 rx_ring->queue_index = i; 1146 rx_ring->netdev = adapter->netdev; 1147 rx_ring->dev = &adapter->pdev->dev; 1148 rx_ring->count = adapter->rx_desc_count; 1149 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1150 } 1151 1152 adapter->num_active_queues = num_active_queues; 1153 1154 return 0; 1155 1156 err_out: 1157 iavf_free_queues(adapter); 1158 return -ENOMEM; 1159 } 1160 1161 /** 1162 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1163 * @adapter: board private structure to initialize 1164 * 1165 * Attempt to configure the interrupts using the best available 1166 * capabilities of the hardware and the kernel. 1167 **/ 1168 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1169 { 1170 int vector, v_budget; 1171 int pairs = 0; 1172 int err = 0; 1173 1174 if (!adapter->vsi_res) { 1175 err = -EIO; 1176 goto out; 1177 } 1178 pairs = adapter->num_active_queues; 1179 1180 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1181 * us much good if we have more vectors than CPUs. However, we already 1182 * limit the total number of queues by the number of CPUs so we do not 1183 * need any further limiting here. 1184 */ 1185 v_budget = min_t(int, pairs + NONQ_VECS, 1186 (int)adapter->vf_res->max_vectors); 1187 1188 adapter->msix_entries = kcalloc(v_budget, 1189 sizeof(struct msix_entry), GFP_KERNEL); 1190 if (!adapter->msix_entries) { 1191 err = -ENOMEM; 1192 goto out; 1193 } 1194 1195 for (vector = 0; vector < v_budget; vector++) 1196 adapter->msix_entries[vector].entry = vector; 1197 1198 err = iavf_acquire_msix_vectors(adapter, v_budget); 1199 1200 out: 1201 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1202 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1203 return err; 1204 } 1205 1206 /** 1207 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1208 * @adapter: board private structure 1209 * 1210 * Return 0 on success, negative on failure 1211 **/ 1212 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1213 { 1214 struct iavf_aqc_get_set_rss_key_data *rss_key = 1215 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1216 struct iavf_hw *hw = &adapter->hw; 1217 int ret = 0; 1218 1219 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1220 /* bail because we already have a command pending */ 1221 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1222 adapter->current_op); 1223 return -EBUSY; 1224 } 1225 1226 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1227 if (ret) { 1228 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1229 iavf_stat_str(hw, ret), 1230 iavf_aq_str(hw, hw->aq.asq_last_status)); 1231 return ret; 1232 1233 } 1234 1235 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1236 adapter->rss_lut, adapter->rss_lut_size); 1237 if (ret) { 1238 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1239 iavf_stat_str(hw, ret), 1240 iavf_aq_str(hw, hw->aq.asq_last_status)); 1241 } 1242 1243 return ret; 1244 1245 } 1246 1247 /** 1248 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1249 * @adapter: board private structure 1250 * 1251 * Returns 0 on success, negative on failure 1252 **/ 1253 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1254 { 1255 struct iavf_hw *hw = &adapter->hw; 1256 u32 *dw; 1257 u16 i; 1258 1259 dw = (u32 *)adapter->rss_key; 1260 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1261 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1262 1263 dw = (u32 *)adapter->rss_lut; 1264 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1265 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1266 1267 iavf_flush(hw); 1268 1269 return 0; 1270 } 1271 1272 /** 1273 * iavf_config_rss - Configure RSS keys and lut 1274 * @adapter: board private structure 1275 * 1276 * Returns 0 on success, negative on failure 1277 **/ 1278 int iavf_config_rss(struct iavf_adapter *adapter) 1279 { 1280 1281 if (RSS_PF(adapter)) { 1282 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1283 IAVF_FLAG_AQ_SET_RSS_KEY; 1284 return 0; 1285 } else if (RSS_AQ(adapter)) { 1286 return iavf_config_rss_aq(adapter); 1287 } else { 1288 return iavf_config_rss_reg(adapter); 1289 } 1290 } 1291 1292 /** 1293 * iavf_fill_rss_lut - Fill the lut with default values 1294 * @adapter: board private structure 1295 **/ 1296 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1297 { 1298 u16 i; 1299 1300 for (i = 0; i < adapter->rss_lut_size; i++) 1301 adapter->rss_lut[i] = i % adapter->num_active_queues; 1302 } 1303 1304 /** 1305 * iavf_init_rss - Prepare for RSS 1306 * @adapter: board private structure 1307 * 1308 * Return 0 on success, negative on failure 1309 **/ 1310 static int iavf_init_rss(struct iavf_adapter *adapter) 1311 { 1312 struct iavf_hw *hw = &adapter->hw; 1313 int ret; 1314 1315 if (!RSS_PF(adapter)) { 1316 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1317 if (adapter->vf_res->vf_cap_flags & 1318 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1319 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1320 else 1321 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1322 1323 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1324 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1325 } 1326 1327 iavf_fill_rss_lut(adapter); 1328 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1329 ret = iavf_config_rss(adapter); 1330 1331 return ret; 1332 } 1333 1334 /** 1335 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1336 * @adapter: board private structure to initialize 1337 * 1338 * We allocate one q_vector per queue interrupt. If allocation fails we 1339 * return -ENOMEM. 1340 **/ 1341 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1342 { 1343 int q_idx = 0, num_q_vectors; 1344 struct iavf_q_vector *q_vector; 1345 1346 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1347 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1348 GFP_KERNEL); 1349 if (!adapter->q_vectors) 1350 return -ENOMEM; 1351 1352 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1353 q_vector = &adapter->q_vectors[q_idx]; 1354 q_vector->adapter = adapter; 1355 q_vector->vsi = &adapter->vsi; 1356 q_vector->v_idx = q_idx; 1357 q_vector->reg_idx = q_idx; 1358 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1359 netif_napi_add(adapter->netdev, &q_vector->napi, 1360 iavf_napi_poll, NAPI_POLL_WEIGHT); 1361 } 1362 1363 return 0; 1364 } 1365 1366 /** 1367 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1368 * @adapter: board private structure to initialize 1369 * 1370 * This function frees the memory allocated to the q_vectors. In addition if 1371 * NAPI is enabled it will delete any references to the NAPI struct prior 1372 * to freeing the q_vector. 1373 **/ 1374 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1375 { 1376 int q_idx, num_q_vectors; 1377 int napi_vectors; 1378 1379 if (!adapter->q_vectors) 1380 return; 1381 1382 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1383 napi_vectors = adapter->num_active_queues; 1384 1385 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1386 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1387 1388 if (q_idx < napi_vectors) 1389 netif_napi_del(&q_vector->napi); 1390 } 1391 kfree(adapter->q_vectors); 1392 adapter->q_vectors = NULL; 1393 } 1394 1395 /** 1396 * iavf_reset_interrupt_capability - Reset MSIX setup 1397 * @adapter: board private structure 1398 * 1399 **/ 1400 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1401 { 1402 if (!adapter->msix_entries) 1403 return; 1404 1405 pci_disable_msix(adapter->pdev); 1406 kfree(adapter->msix_entries); 1407 adapter->msix_entries = NULL; 1408 } 1409 1410 /** 1411 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1412 * @adapter: board private structure to initialize 1413 * 1414 **/ 1415 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1416 { 1417 int err; 1418 1419 err = iavf_alloc_queues(adapter); 1420 if (err) { 1421 dev_err(&adapter->pdev->dev, 1422 "Unable to allocate memory for queues\n"); 1423 goto err_alloc_queues; 1424 } 1425 1426 rtnl_lock(); 1427 err = iavf_set_interrupt_capability(adapter); 1428 rtnl_unlock(); 1429 if (err) { 1430 dev_err(&adapter->pdev->dev, 1431 "Unable to setup interrupt capabilities\n"); 1432 goto err_set_interrupt; 1433 } 1434 1435 err = iavf_alloc_q_vectors(adapter); 1436 if (err) { 1437 dev_err(&adapter->pdev->dev, 1438 "Unable to allocate memory for queue vectors\n"); 1439 goto err_alloc_q_vectors; 1440 } 1441 1442 /* If we've made it so far while ADq flag being ON, then we haven't 1443 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1444 * resources have been allocated in the reset path. 1445 * Now we can truly claim that ADq is enabled. 1446 */ 1447 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1448 adapter->num_tc) 1449 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1450 adapter->num_tc); 1451 1452 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1453 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1454 adapter->num_active_queues); 1455 1456 return 0; 1457 err_alloc_q_vectors: 1458 iavf_reset_interrupt_capability(adapter); 1459 err_set_interrupt: 1460 iavf_free_queues(adapter); 1461 err_alloc_queues: 1462 return err; 1463 } 1464 1465 /** 1466 * iavf_free_rss - Free memory used by RSS structs 1467 * @adapter: board private structure 1468 **/ 1469 static void iavf_free_rss(struct iavf_adapter *adapter) 1470 { 1471 kfree(adapter->rss_key); 1472 adapter->rss_key = NULL; 1473 1474 kfree(adapter->rss_lut); 1475 adapter->rss_lut = NULL; 1476 } 1477 1478 /** 1479 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1480 * @adapter: board private structure 1481 * 1482 * Returns 0 on success, negative on failure 1483 **/ 1484 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1485 { 1486 struct net_device *netdev = adapter->netdev; 1487 int err; 1488 1489 if (netif_running(netdev)) 1490 iavf_free_traffic_irqs(adapter); 1491 iavf_free_misc_irq(adapter); 1492 iavf_reset_interrupt_capability(adapter); 1493 iavf_free_q_vectors(adapter); 1494 iavf_free_queues(adapter); 1495 1496 err = iavf_init_interrupt_scheme(adapter); 1497 if (err) 1498 goto err; 1499 1500 netif_tx_stop_all_queues(netdev); 1501 1502 err = iavf_request_misc_irq(adapter); 1503 if (err) 1504 goto err; 1505 1506 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1507 1508 iavf_map_rings_to_vectors(adapter); 1509 err: 1510 return err; 1511 } 1512 1513 /** 1514 * iavf_process_aq_command - process aq_required flags 1515 * and sends aq command 1516 * @adapter: pointer to iavf adapter structure 1517 * 1518 * Returns 0 on success 1519 * Returns error code if no command was sent 1520 * or error code if the command failed. 1521 **/ 1522 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1523 { 1524 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1525 return iavf_send_vf_config_msg(adapter); 1526 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1527 iavf_disable_queues(adapter); 1528 return 0; 1529 } 1530 1531 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1532 iavf_map_queues(adapter); 1533 return 0; 1534 } 1535 1536 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1537 iavf_add_ether_addrs(adapter); 1538 return 0; 1539 } 1540 1541 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1542 iavf_add_vlans(adapter); 1543 return 0; 1544 } 1545 1546 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1547 iavf_del_ether_addrs(adapter); 1548 return 0; 1549 } 1550 1551 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1552 iavf_del_vlans(adapter); 1553 return 0; 1554 } 1555 1556 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1557 iavf_enable_vlan_stripping(adapter); 1558 return 0; 1559 } 1560 1561 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1562 iavf_disable_vlan_stripping(adapter); 1563 return 0; 1564 } 1565 1566 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1567 iavf_configure_queues(adapter); 1568 return 0; 1569 } 1570 1571 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1572 iavf_enable_queues(adapter); 1573 return 0; 1574 } 1575 1576 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1577 /* This message goes straight to the firmware, not the 1578 * PF, so we don't have to set current_op as we will 1579 * not get a response through the ARQ. 1580 */ 1581 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1582 return 0; 1583 } 1584 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1585 iavf_get_hena(adapter); 1586 return 0; 1587 } 1588 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1589 iavf_set_hena(adapter); 1590 return 0; 1591 } 1592 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1593 iavf_set_rss_key(adapter); 1594 return 0; 1595 } 1596 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1597 iavf_set_rss_lut(adapter); 1598 return 0; 1599 } 1600 1601 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1602 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1603 FLAG_VF_MULTICAST_PROMISC); 1604 return 0; 1605 } 1606 1607 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1608 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1609 return 0; 1610 } 1611 1612 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1613 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1614 iavf_set_promiscuous(adapter, 0); 1615 return 0; 1616 } 1617 1618 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1619 iavf_enable_channels(adapter); 1620 return 0; 1621 } 1622 1623 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1624 iavf_disable_channels(adapter); 1625 return 0; 1626 } 1627 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1628 iavf_add_cloud_filter(adapter); 1629 return 0; 1630 } 1631 1632 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1633 iavf_del_cloud_filter(adapter); 1634 return 0; 1635 } 1636 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1637 iavf_del_cloud_filter(adapter); 1638 return 0; 1639 } 1640 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1641 iavf_add_cloud_filter(adapter); 1642 return 0; 1643 } 1644 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 1645 iavf_add_fdir_filter(adapter); 1646 return IAVF_SUCCESS; 1647 } 1648 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 1649 iavf_del_fdir_filter(adapter); 1650 return IAVF_SUCCESS; 1651 } 1652 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 1653 iavf_add_adv_rss_cfg(adapter); 1654 return 0; 1655 } 1656 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 1657 iavf_del_adv_rss_cfg(adapter); 1658 return 0; 1659 } 1660 return -EAGAIN; 1661 } 1662 1663 /** 1664 * iavf_startup - first step of driver startup 1665 * @adapter: board private structure 1666 * 1667 * Function process __IAVF_STARTUP driver state. 1668 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1669 * when fails it returns -EAGAIN 1670 **/ 1671 static int iavf_startup(struct iavf_adapter *adapter) 1672 { 1673 struct pci_dev *pdev = adapter->pdev; 1674 struct iavf_hw *hw = &adapter->hw; 1675 int err; 1676 1677 WARN_ON(adapter->state != __IAVF_STARTUP); 1678 1679 /* driver loaded, probe complete */ 1680 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1681 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1682 err = iavf_set_mac_type(hw); 1683 if (err) { 1684 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1685 goto err; 1686 } 1687 1688 err = iavf_check_reset_complete(hw); 1689 if (err) { 1690 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1691 err); 1692 goto err; 1693 } 1694 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1695 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1696 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1697 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1698 1699 err = iavf_init_adminq(hw); 1700 if (err) { 1701 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1702 goto err; 1703 } 1704 err = iavf_send_api_ver(adapter); 1705 if (err) { 1706 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1707 iavf_shutdown_adminq(hw); 1708 goto err; 1709 } 1710 adapter->state = __IAVF_INIT_VERSION_CHECK; 1711 err: 1712 return err; 1713 } 1714 1715 /** 1716 * iavf_init_version_check - second step of driver startup 1717 * @adapter: board private structure 1718 * 1719 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1720 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1721 * when fails it returns -EAGAIN 1722 **/ 1723 static int iavf_init_version_check(struct iavf_adapter *adapter) 1724 { 1725 struct pci_dev *pdev = adapter->pdev; 1726 struct iavf_hw *hw = &adapter->hw; 1727 int err = -EAGAIN; 1728 1729 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1730 1731 if (!iavf_asq_done(hw)) { 1732 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1733 iavf_shutdown_adminq(hw); 1734 adapter->state = __IAVF_STARTUP; 1735 goto err; 1736 } 1737 1738 /* aq msg sent, awaiting reply */ 1739 err = iavf_verify_api_ver(adapter); 1740 if (err) { 1741 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1742 err = iavf_send_api_ver(adapter); 1743 else 1744 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1745 adapter->pf_version.major, 1746 adapter->pf_version.minor, 1747 VIRTCHNL_VERSION_MAJOR, 1748 VIRTCHNL_VERSION_MINOR); 1749 goto err; 1750 } 1751 err = iavf_send_vf_config_msg(adapter); 1752 if (err) { 1753 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1754 err); 1755 goto err; 1756 } 1757 adapter->state = __IAVF_INIT_GET_RESOURCES; 1758 1759 err: 1760 return err; 1761 } 1762 1763 /** 1764 * iavf_init_get_resources - third step of driver startup 1765 * @adapter: board private structure 1766 * 1767 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1768 * finishes driver initialization procedure. 1769 * When success the state is changed to __IAVF_DOWN 1770 * when fails it returns -EAGAIN 1771 **/ 1772 static int iavf_init_get_resources(struct iavf_adapter *adapter) 1773 { 1774 struct net_device *netdev = adapter->netdev; 1775 struct pci_dev *pdev = adapter->pdev; 1776 struct iavf_hw *hw = &adapter->hw; 1777 int err; 1778 1779 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1780 /* aq msg sent, awaiting reply */ 1781 if (!adapter->vf_res) { 1782 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1783 GFP_KERNEL); 1784 if (!adapter->vf_res) { 1785 err = -ENOMEM; 1786 goto err; 1787 } 1788 } 1789 err = iavf_get_vf_config(adapter); 1790 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1791 err = iavf_send_vf_config_msg(adapter); 1792 goto err; 1793 } else if (err == IAVF_ERR_PARAM) { 1794 /* We only get ERR_PARAM if the device is in a very bad 1795 * state or if we've been disabled for previous bad 1796 * behavior. Either way, we're done now. 1797 */ 1798 iavf_shutdown_adminq(hw); 1799 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1800 return 0; 1801 } 1802 if (err) { 1803 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1804 goto err_alloc; 1805 } 1806 1807 err = iavf_process_config(adapter); 1808 if (err) 1809 goto err_alloc; 1810 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1811 1812 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1813 1814 netdev->netdev_ops = &iavf_netdev_ops; 1815 iavf_set_ethtool_ops(netdev); 1816 netdev->watchdog_timeo = 5 * HZ; 1817 1818 /* MTU range: 68 - 9710 */ 1819 netdev->min_mtu = ETH_MIN_MTU; 1820 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1821 1822 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1823 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1824 adapter->hw.mac.addr); 1825 eth_hw_addr_random(netdev); 1826 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1827 } else { 1828 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 1829 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1830 } 1831 1832 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1833 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1834 err = iavf_init_interrupt_scheme(adapter); 1835 if (err) 1836 goto err_sw_init; 1837 iavf_map_rings_to_vectors(adapter); 1838 if (adapter->vf_res->vf_cap_flags & 1839 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1840 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1841 1842 err = iavf_request_misc_irq(adapter); 1843 if (err) 1844 goto err_sw_init; 1845 1846 netif_carrier_off(netdev); 1847 adapter->link_up = false; 1848 1849 /* set the semaphore to prevent any callbacks after device registration 1850 * up to time when state of driver will be set to __IAVF_DOWN 1851 */ 1852 rtnl_lock(); 1853 if (!adapter->netdev_registered) { 1854 err = register_netdevice(netdev); 1855 if (err) { 1856 rtnl_unlock(); 1857 goto err_register; 1858 } 1859 } 1860 1861 adapter->netdev_registered = true; 1862 1863 netif_tx_stop_all_queues(netdev); 1864 if (CLIENT_ALLOWED(adapter)) { 1865 err = iavf_lan_add_device(adapter); 1866 if (err) 1867 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1868 err); 1869 } 1870 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1871 if (netdev->features & NETIF_F_GRO) 1872 dev_info(&pdev->dev, "GRO is enabled\n"); 1873 1874 adapter->state = __IAVF_DOWN; 1875 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1876 rtnl_unlock(); 1877 1878 iavf_misc_irq_enable(adapter); 1879 wake_up(&adapter->down_waitqueue); 1880 1881 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1882 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1883 if (!adapter->rss_key || !adapter->rss_lut) { 1884 err = -ENOMEM; 1885 goto err_mem; 1886 } 1887 if (RSS_AQ(adapter)) 1888 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1889 else 1890 iavf_init_rss(adapter); 1891 1892 return err; 1893 err_mem: 1894 iavf_free_rss(adapter); 1895 err_register: 1896 iavf_free_misc_irq(adapter); 1897 err_sw_init: 1898 iavf_reset_interrupt_capability(adapter); 1899 err_alloc: 1900 kfree(adapter->vf_res); 1901 adapter->vf_res = NULL; 1902 err: 1903 return err; 1904 } 1905 1906 /** 1907 * iavf_watchdog_task - Periodic call-back task 1908 * @work: pointer to work_struct 1909 **/ 1910 static void iavf_watchdog_task(struct work_struct *work) 1911 { 1912 struct iavf_adapter *adapter = container_of(work, 1913 struct iavf_adapter, 1914 watchdog_task.work); 1915 struct iavf_hw *hw = &adapter->hw; 1916 u32 reg_val; 1917 1918 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1919 goto restart_watchdog; 1920 1921 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1922 adapter->state = __IAVF_COMM_FAILED; 1923 1924 switch (adapter->state) { 1925 case __IAVF_COMM_FAILED: 1926 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 1927 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1928 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 1929 reg_val == VIRTCHNL_VFR_COMPLETED) { 1930 /* A chance for redemption! */ 1931 dev_err(&adapter->pdev->dev, 1932 "Hardware came out of reset. Attempting reinit.\n"); 1933 adapter->state = __IAVF_STARTUP; 1934 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1935 queue_delayed_work(iavf_wq, &adapter->init_task, 10); 1936 clear_bit(__IAVF_IN_CRITICAL_TASK, 1937 &adapter->crit_section); 1938 /* Don't reschedule the watchdog, since we've restarted 1939 * the init task. When init_task contacts the PF and 1940 * gets everything set up again, it'll restart the 1941 * watchdog for us. Down, boy. Sit. Stay. Woof. 1942 */ 1943 return; 1944 } 1945 adapter->aq_required = 0; 1946 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1947 clear_bit(__IAVF_IN_CRITICAL_TASK, 1948 &adapter->crit_section); 1949 queue_delayed_work(iavf_wq, 1950 &adapter->watchdog_task, 1951 msecs_to_jiffies(10)); 1952 goto watchdog_done; 1953 case __IAVF_RESETTING: 1954 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1955 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1956 return; 1957 case __IAVF_DOWN: 1958 case __IAVF_DOWN_PENDING: 1959 case __IAVF_TESTING: 1960 case __IAVF_RUNNING: 1961 if (adapter->current_op) { 1962 if (!iavf_asq_done(hw)) { 1963 dev_dbg(&adapter->pdev->dev, 1964 "Admin queue timeout\n"); 1965 iavf_send_api_ver(adapter); 1966 } 1967 } else { 1968 /* An error will be returned if no commands were 1969 * processed; use this opportunity to update stats 1970 */ 1971 if (iavf_process_aq_command(adapter) && 1972 adapter->state == __IAVF_RUNNING) 1973 iavf_request_stats(adapter); 1974 } 1975 break; 1976 case __IAVF_REMOVE: 1977 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1978 return; 1979 default: 1980 goto restart_watchdog; 1981 } 1982 1983 /* check for hw reset */ 1984 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 1985 if (!reg_val) { 1986 adapter->state = __IAVF_RESETTING; 1987 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1988 adapter->aq_required = 0; 1989 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1990 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1991 queue_work(iavf_wq, &adapter->reset_task); 1992 goto watchdog_done; 1993 } 1994 1995 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 1996 watchdog_done: 1997 if (adapter->state == __IAVF_RUNNING || 1998 adapter->state == __IAVF_COMM_FAILED) 1999 iavf_detect_recover_hung(&adapter->vsi); 2000 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2001 restart_watchdog: 2002 if (adapter->aq_required) 2003 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2004 msecs_to_jiffies(20)); 2005 else 2006 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2007 queue_work(iavf_wq, &adapter->adminq_task); 2008 } 2009 2010 static void iavf_disable_vf(struct iavf_adapter *adapter) 2011 { 2012 struct iavf_mac_filter *f, *ftmp; 2013 struct iavf_vlan_filter *fv, *fvtmp; 2014 struct iavf_cloud_filter *cf, *cftmp; 2015 2016 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2017 2018 /* We don't use netif_running() because it may be true prior to 2019 * ndo_open() returning, so we can't assume it means all our open 2020 * tasks have finished, since we're not holding the rtnl_lock here. 2021 */ 2022 if (adapter->state == __IAVF_RUNNING) { 2023 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2024 netif_carrier_off(adapter->netdev); 2025 netif_tx_disable(adapter->netdev); 2026 adapter->link_up = false; 2027 iavf_napi_disable_all(adapter); 2028 iavf_irq_disable(adapter); 2029 iavf_free_traffic_irqs(adapter); 2030 iavf_free_all_tx_resources(adapter); 2031 iavf_free_all_rx_resources(adapter); 2032 } 2033 2034 spin_lock_bh(&adapter->mac_vlan_list_lock); 2035 2036 /* Delete all of the filters */ 2037 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2038 list_del(&f->list); 2039 kfree(f); 2040 } 2041 2042 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2043 list_del(&fv->list); 2044 kfree(fv); 2045 } 2046 2047 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2048 2049 spin_lock_bh(&adapter->cloud_filter_list_lock); 2050 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2051 list_del(&cf->list); 2052 kfree(cf); 2053 adapter->num_cloud_filters--; 2054 } 2055 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2056 2057 iavf_free_misc_irq(adapter); 2058 iavf_reset_interrupt_capability(adapter); 2059 iavf_free_queues(adapter); 2060 iavf_free_q_vectors(adapter); 2061 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2062 iavf_shutdown_adminq(&adapter->hw); 2063 adapter->netdev->flags &= ~IFF_UP; 2064 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2065 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2066 adapter->state = __IAVF_DOWN; 2067 wake_up(&adapter->down_waitqueue); 2068 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2069 } 2070 2071 /** 2072 * iavf_reset_task - Call-back task to handle hardware reset 2073 * @work: pointer to work_struct 2074 * 2075 * During reset we need to shut down and reinitialize the admin queue 2076 * before we can use it to communicate with the PF again. We also clear 2077 * and reinit the rings because that context is lost as well. 2078 **/ 2079 static void iavf_reset_task(struct work_struct *work) 2080 { 2081 struct iavf_adapter *adapter = container_of(work, 2082 struct iavf_adapter, 2083 reset_task); 2084 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2085 struct net_device *netdev = adapter->netdev; 2086 struct iavf_hw *hw = &adapter->hw; 2087 struct iavf_mac_filter *f, *ftmp; 2088 struct iavf_vlan_filter *vlf; 2089 struct iavf_cloud_filter *cf; 2090 u32 reg_val; 2091 int i = 0, err; 2092 bool running; 2093 2094 /* When device is being removed it doesn't make sense to run the reset 2095 * task, just return in such a case. 2096 */ 2097 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 2098 return; 2099 2100 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, 2101 &adapter->crit_section)) 2102 usleep_range(500, 1000); 2103 if (CLIENT_ENABLED(adapter)) { 2104 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2105 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2106 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2107 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2108 cancel_delayed_work_sync(&adapter->client_task); 2109 iavf_notify_client_close(&adapter->vsi, true); 2110 } 2111 iavf_misc_irq_disable(adapter); 2112 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2113 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2114 /* Restart the AQ here. If we have been reset but didn't 2115 * detect it, or if the PF had to reinit, our AQ will be hosed. 2116 */ 2117 iavf_shutdown_adminq(hw); 2118 iavf_init_adminq(hw); 2119 iavf_request_reset(adapter); 2120 } 2121 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2122 2123 /* poll until we see the reset actually happen */ 2124 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2125 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2126 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2127 if (!reg_val) 2128 break; 2129 usleep_range(5000, 10000); 2130 } 2131 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2132 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2133 goto continue_reset; /* act like the reset happened */ 2134 } 2135 2136 /* wait until the reset is complete and the PF is responding to us */ 2137 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2138 /* sleep first to make sure a minimum wait time is met */ 2139 msleep(IAVF_RESET_WAIT_MS); 2140 2141 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2142 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2143 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2144 break; 2145 } 2146 2147 pci_set_master(adapter->pdev); 2148 2149 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2150 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2151 reg_val); 2152 iavf_disable_vf(adapter); 2153 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2154 return; /* Do not attempt to reinit. It's dead, Jim. */ 2155 } 2156 2157 continue_reset: 2158 /* We don't use netif_running() because it may be true prior to 2159 * ndo_open() returning, so we can't assume it means all our open 2160 * tasks have finished, since we're not holding the rtnl_lock here. 2161 */ 2162 running = ((adapter->state == __IAVF_RUNNING) || 2163 (adapter->state == __IAVF_RESETTING)); 2164 2165 if (running) { 2166 netif_carrier_off(netdev); 2167 netif_tx_stop_all_queues(netdev); 2168 adapter->link_up = false; 2169 iavf_napi_disable_all(adapter); 2170 } 2171 iavf_irq_disable(adapter); 2172 2173 adapter->state = __IAVF_RESETTING; 2174 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2175 2176 /* free the Tx/Rx rings and descriptors, might be better to just 2177 * re-use them sometime in the future 2178 */ 2179 iavf_free_all_rx_resources(adapter); 2180 iavf_free_all_tx_resources(adapter); 2181 2182 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2183 /* kill and reinit the admin queue */ 2184 iavf_shutdown_adminq(hw); 2185 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2186 err = iavf_init_adminq(hw); 2187 if (err) 2188 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2189 err); 2190 adapter->aq_required = 0; 2191 2192 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2193 err = iavf_reinit_interrupt_scheme(adapter); 2194 if (err) 2195 goto reset_err; 2196 } 2197 2198 if (RSS_AQ(adapter)) { 2199 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2200 } else { 2201 err = iavf_init_rss(adapter); 2202 if (err) 2203 goto reset_err; 2204 } 2205 2206 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2207 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2208 2209 spin_lock_bh(&adapter->mac_vlan_list_lock); 2210 2211 /* Delete filter for the current MAC address, it could have 2212 * been changed by the PF via administratively set MAC. 2213 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2214 */ 2215 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2216 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2217 list_del(&f->list); 2218 kfree(f); 2219 } 2220 } 2221 /* re-add all MAC filters */ 2222 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2223 f->add = true; 2224 } 2225 /* re-add all VLAN filters */ 2226 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2227 vlf->add = true; 2228 } 2229 2230 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2231 2232 /* check if TCs are running and re-add all cloud filters */ 2233 spin_lock_bh(&adapter->cloud_filter_list_lock); 2234 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2235 adapter->num_tc) { 2236 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2237 cf->add = true; 2238 } 2239 } 2240 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2241 2242 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2243 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2244 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2245 iavf_misc_irq_enable(adapter); 2246 2247 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2248 2249 /* We were running when the reset started, so we need to restore some 2250 * state here. 2251 */ 2252 if (running) { 2253 /* allocate transmit descriptors */ 2254 err = iavf_setup_all_tx_resources(adapter); 2255 if (err) 2256 goto reset_err; 2257 2258 /* allocate receive descriptors */ 2259 err = iavf_setup_all_rx_resources(adapter); 2260 if (err) 2261 goto reset_err; 2262 2263 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2264 err = iavf_request_traffic_irqs(adapter, netdev->name); 2265 if (err) 2266 goto reset_err; 2267 2268 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2269 } 2270 2271 iavf_configure(adapter); 2272 2273 iavf_up_complete(adapter); 2274 2275 iavf_irq_enable(adapter, true); 2276 } else { 2277 adapter->state = __IAVF_DOWN; 2278 wake_up(&adapter->down_waitqueue); 2279 } 2280 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2281 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2282 2283 return; 2284 reset_err: 2285 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2286 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2287 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2288 iavf_close(netdev); 2289 } 2290 2291 /** 2292 * iavf_adminq_task - worker thread to clean the admin queue 2293 * @work: pointer to work_struct containing our data 2294 **/ 2295 static void iavf_adminq_task(struct work_struct *work) 2296 { 2297 struct iavf_adapter *adapter = 2298 container_of(work, struct iavf_adapter, adminq_task); 2299 struct iavf_hw *hw = &adapter->hw; 2300 struct iavf_arq_event_info event; 2301 enum virtchnl_ops v_op; 2302 enum iavf_status ret, v_ret; 2303 u32 val, oldval; 2304 u16 pending; 2305 2306 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2307 goto out; 2308 2309 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2310 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2311 if (!event.msg_buf) 2312 goto out; 2313 2314 do { 2315 ret = iavf_clean_arq_element(hw, &event, &pending); 2316 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2317 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2318 2319 if (ret || !v_op) 2320 break; /* No event to process or error cleaning ARQ */ 2321 2322 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2323 event.msg_len); 2324 if (pending != 0) 2325 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2326 } while (pending); 2327 2328 if ((adapter->flags & 2329 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2330 adapter->state == __IAVF_RESETTING) 2331 goto freedom; 2332 2333 /* check for error indications */ 2334 val = rd32(hw, hw->aq.arq.len); 2335 if (val == 0xdeadbeef) /* indicates device in reset */ 2336 goto freedom; 2337 oldval = val; 2338 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2339 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2340 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2341 } 2342 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2343 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2344 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2345 } 2346 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2347 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2348 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2349 } 2350 if (oldval != val) 2351 wr32(hw, hw->aq.arq.len, val); 2352 2353 val = rd32(hw, hw->aq.asq.len); 2354 oldval = val; 2355 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2356 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2357 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2358 } 2359 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2360 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2361 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2362 } 2363 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2364 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2365 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2366 } 2367 if (oldval != val) 2368 wr32(hw, hw->aq.asq.len, val); 2369 2370 freedom: 2371 kfree(event.msg_buf); 2372 out: 2373 /* re-enable Admin queue interrupt cause */ 2374 iavf_misc_irq_enable(adapter); 2375 } 2376 2377 /** 2378 * iavf_client_task - worker thread to perform client work 2379 * @work: pointer to work_struct containing our data 2380 * 2381 * This task handles client interactions. Because client calls can be 2382 * reentrant, we can't handle them in the watchdog. 2383 **/ 2384 static void iavf_client_task(struct work_struct *work) 2385 { 2386 struct iavf_adapter *adapter = 2387 container_of(work, struct iavf_adapter, client_task.work); 2388 2389 /* If we can't get the client bit, just give up. We'll be rescheduled 2390 * later. 2391 */ 2392 2393 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) 2394 return; 2395 2396 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2397 iavf_client_subtask(adapter); 2398 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2399 goto out; 2400 } 2401 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2402 iavf_notify_client_l2_params(&adapter->vsi); 2403 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2404 goto out; 2405 } 2406 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2407 iavf_notify_client_close(&adapter->vsi, false); 2408 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2409 goto out; 2410 } 2411 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2412 iavf_notify_client_open(&adapter->vsi); 2413 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2414 } 2415 out: 2416 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2417 } 2418 2419 /** 2420 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2421 * @adapter: board private structure 2422 * 2423 * Free all transmit software resources 2424 **/ 2425 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2426 { 2427 int i; 2428 2429 if (!adapter->tx_rings) 2430 return; 2431 2432 for (i = 0; i < adapter->num_active_queues; i++) 2433 if (adapter->tx_rings[i].desc) 2434 iavf_free_tx_resources(&adapter->tx_rings[i]); 2435 } 2436 2437 /** 2438 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2439 * @adapter: board private structure 2440 * 2441 * If this function returns with an error, then it's possible one or 2442 * more of the rings is populated (while the rest are not). It is the 2443 * callers duty to clean those orphaned rings. 2444 * 2445 * Return 0 on success, negative on failure 2446 **/ 2447 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2448 { 2449 int i, err = 0; 2450 2451 for (i = 0; i < adapter->num_active_queues; i++) { 2452 adapter->tx_rings[i].count = adapter->tx_desc_count; 2453 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2454 if (!err) 2455 continue; 2456 dev_err(&adapter->pdev->dev, 2457 "Allocation for Tx Queue %u failed\n", i); 2458 break; 2459 } 2460 2461 return err; 2462 } 2463 2464 /** 2465 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2466 * @adapter: board private structure 2467 * 2468 * If this function returns with an error, then it's possible one or 2469 * more of the rings is populated (while the rest are not). It is the 2470 * callers duty to clean those orphaned rings. 2471 * 2472 * Return 0 on success, negative on failure 2473 **/ 2474 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2475 { 2476 int i, err = 0; 2477 2478 for (i = 0; i < adapter->num_active_queues; i++) { 2479 adapter->rx_rings[i].count = adapter->rx_desc_count; 2480 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2481 if (!err) 2482 continue; 2483 dev_err(&adapter->pdev->dev, 2484 "Allocation for Rx Queue %u failed\n", i); 2485 break; 2486 } 2487 return err; 2488 } 2489 2490 /** 2491 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2492 * @adapter: board private structure 2493 * 2494 * Free all receive software resources 2495 **/ 2496 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2497 { 2498 int i; 2499 2500 if (!adapter->rx_rings) 2501 return; 2502 2503 for (i = 0; i < adapter->num_active_queues; i++) 2504 if (adapter->rx_rings[i].desc) 2505 iavf_free_rx_resources(&adapter->rx_rings[i]); 2506 } 2507 2508 /** 2509 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2510 * @adapter: board private structure 2511 * @max_tx_rate: max Tx bw for a tc 2512 **/ 2513 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2514 u64 max_tx_rate) 2515 { 2516 int speed = 0, ret = 0; 2517 2518 if (ADV_LINK_SUPPORT(adapter)) { 2519 if (adapter->link_speed_mbps < U32_MAX) { 2520 speed = adapter->link_speed_mbps; 2521 goto validate_bw; 2522 } else { 2523 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2524 return -EINVAL; 2525 } 2526 } 2527 2528 switch (adapter->link_speed) { 2529 case VIRTCHNL_LINK_SPEED_40GB: 2530 speed = SPEED_40000; 2531 break; 2532 case VIRTCHNL_LINK_SPEED_25GB: 2533 speed = SPEED_25000; 2534 break; 2535 case VIRTCHNL_LINK_SPEED_20GB: 2536 speed = SPEED_20000; 2537 break; 2538 case VIRTCHNL_LINK_SPEED_10GB: 2539 speed = SPEED_10000; 2540 break; 2541 case VIRTCHNL_LINK_SPEED_5GB: 2542 speed = SPEED_5000; 2543 break; 2544 case VIRTCHNL_LINK_SPEED_2_5GB: 2545 speed = SPEED_2500; 2546 break; 2547 case VIRTCHNL_LINK_SPEED_1GB: 2548 speed = SPEED_1000; 2549 break; 2550 case VIRTCHNL_LINK_SPEED_100MB: 2551 speed = SPEED_100; 2552 break; 2553 default: 2554 break; 2555 } 2556 2557 validate_bw: 2558 if (max_tx_rate > speed) { 2559 dev_err(&adapter->pdev->dev, 2560 "Invalid tx rate specified\n"); 2561 ret = -EINVAL; 2562 } 2563 2564 return ret; 2565 } 2566 2567 /** 2568 * iavf_validate_ch_config - validate queue mapping info 2569 * @adapter: board private structure 2570 * @mqprio_qopt: queue parameters 2571 * 2572 * This function validates if the config provided by the user to 2573 * configure queue channels is valid or not. Returns 0 on a valid 2574 * config. 2575 **/ 2576 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2577 struct tc_mqprio_qopt_offload *mqprio_qopt) 2578 { 2579 u64 total_max_rate = 0; 2580 int i, num_qps = 0; 2581 u64 tx_rate = 0; 2582 int ret = 0; 2583 2584 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2585 mqprio_qopt->qopt.num_tc < 1) 2586 return -EINVAL; 2587 2588 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2589 if (!mqprio_qopt->qopt.count[i] || 2590 mqprio_qopt->qopt.offset[i] != num_qps) 2591 return -EINVAL; 2592 if (mqprio_qopt->min_rate[i]) { 2593 dev_err(&adapter->pdev->dev, 2594 "Invalid min tx rate (greater than 0) specified\n"); 2595 return -EINVAL; 2596 } 2597 /*convert to Mbps */ 2598 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2599 IAVF_MBPS_DIVISOR); 2600 total_max_rate += tx_rate; 2601 num_qps += mqprio_qopt->qopt.count[i]; 2602 } 2603 if (num_qps > IAVF_MAX_REQ_QUEUES) 2604 return -EINVAL; 2605 2606 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2607 return ret; 2608 } 2609 2610 /** 2611 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 2612 * @adapter: board private structure 2613 **/ 2614 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2615 { 2616 struct iavf_cloud_filter *cf, *cftmp; 2617 2618 spin_lock_bh(&adapter->cloud_filter_list_lock); 2619 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2620 list) { 2621 list_del(&cf->list); 2622 kfree(cf); 2623 adapter->num_cloud_filters--; 2624 } 2625 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2626 } 2627 2628 /** 2629 * __iavf_setup_tc - configure multiple traffic classes 2630 * @netdev: network interface device structure 2631 * @type_data: tc offload data 2632 * 2633 * This function processes the config information provided by the 2634 * user to configure traffic classes/queue channels and packages the 2635 * information to request the PF to setup traffic classes. 2636 * 2637 * Returns 0 on success. 2638 **/ 2639 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2640 { 2641 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2642 struct iavf_adapter *adapter = netdev_priv(netdev); 2643 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2644 u8 num_tc = 0, total_qps = 0; 2645 int ret = 0, netdev_tc = 0; 2646 u64 max_tx_rate; 2647 u16 mode; 2648 int i; 2649 2650 num_tc = mqprio_qopt->qopt.num_tc; 2651 mode = mqprio_qopt->mode; 2652 2653 /* delete queue_channel */ 2654 if (!mqprio_qopt->qopt.hw) { 2655 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2656 /* reset the tc configuration */ 2657 netdev_reset_tc(netdev); 2658 adapter->num_tc = 0; 2659 netif_tx_stop_all_queues(netdev); 2660 netif_tx_disable(netdev); 2661 iavf_del_all_cloud_filters(adapter); 2662 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2663 goto exit; 2664 } else { 2665 return -EINVAL; 2666 } 2667 } 2668 2669 /* add queue channel */ 2670 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2671 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2672 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2673 return -EOPNOTSUPP; 2674 } 2675 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2676 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2677 return -EINVAL; 2678 } 2679 2680 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2681 if (ret) 2682 return ret; 2683 /* Return if same TC config is requested */ 2684 if (adapter->num_tc == num_tc) 2685 return 0; 2686 adapter->num_tc = num_tc; 2687 2688 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2689 if (i < num_tc) { 2690 adapter->ch_config.ch_info[i].count = 2691 mqprio_qopt->qopt.count[i]; 2692 adapter->ch_config.ch_info[i].offset = 2693 mqprio_qopt->qopt.offset[i]; 2694 total_qps += mqprio_qopt->qopt.count[i]; 2695 max_tx_rate = mqprio_qopt->max_rate[i]; 2696 /* convert to Mbps */ 2697 max_tx_rate = div_u64(max_tx_rate, 2698 IAVF_MBPS_DIVISOR); 2699 adapter->ch_config.ch_info[i].max_tx_rate = 2700 max_tx_rate; 2701 } else { 2702 adapter->ch_config.ch_info[i].count = 1; 2703 adapter->ch_config.ch_info[i].offset = 0; 2704 } 2705 } 2706 adapter->ch_config.total_qps = total_qps; 2707 netif_tx_stop_all_queues(netdev); 2708 netif_tx_disable(netdev); 2709 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2710 netdev_reset_tc(netdev); 2711 /* Report the tc mapping up the stack */ 2712 netdev_set_num_tc(adapter->netdev, num_tc); 2713 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2714 u16 qcount = mqprio_qopt->qopt.count[i]; 2715 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2716 2717 if (i < num_tc) 2718 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2719 qoffset); 2720 } 2721 } 2722 exit: 2723 return ret; 2724 } 2725 2726 /** 2727 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2728 * @adapter: board private structure 2729 * @f: pointer to struct flow_cls_offload 2730 * @filter: pointer to cloud filter structure 2731 */ 2732 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2733 struct flow_cls_offload *f, 2734 struct iavf_cloud_filter *filter) 2735 { 2736 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2737 struct flow_dissector *dissector = rule->match.dissector; 2738 u16 n_proto_mask = 0; 2739 u16 n_proto_key = 0; 2740 u8 field_flags = 0; 2741 u16 addr_type = 0; 2742 u16 n_proto = 0; 2743 int i = 0; 2744 struct virtchnl_filter *vf = &filter->f; 2745 2746 if (dissector->used_keys & 2747 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2748 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2749 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2750 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2751 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2752 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2753 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2754 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2755 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2756 dissector->used_keys); 2757 return -EOPNOTSUPP; 2758 } 2759 2760 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2761 struct flow_match_enc_keyid match; 2762 2763 flow_rule_match_enc_keyid(rule, &match); 2764 if (match.mask->keyid != 0) 2765 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2766 } 2767 2768 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2769 struct flow_match_basic match; 2770 2771 flow_rule_match_basic(rule, &match); 2772 n_proto_key = ntohs(match.key->n_proto); 2773 n_proto_mask = ntohs(match.mask->n_proto); 2774 2775 if (n_proto_key == ETH_P_ALL) { 2776 n_proto_key = 0; 2777 n_proto_mask = 0; 2778 } 2779 n_proto = n_proto_key & n_proto_mask; 2780 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2781 return -EINVAL; 2782 if (n_proto == ETH_P_IPV6) { 2783 /* specify flow type as TCP IPv6 */ 2784 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2785 } 2786 2787 if (match.key->ip_proto != IPPROTO_TCP) { 2788 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2789 return -EINVAL; 2790 } 2791 } 2792 2793 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2794 struct flow_match_eth_addrs match; 2795 2796 flow_rule_match_eth_addrs(rule, &match); 2797 2798 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2799 if (!is_zero_ether_addr(match.mask->dst)) { 2800 if (is_broadcast_ether_addr(match.mask->dst)) { 2801 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2802 } else { 2803 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2804 match.mask->dst); 2805 return IAVF_ERR_CONFIG; 2806 } 2807 } 2808 2809 if (!is_zero_ether_addr(match.mask->src)) { 2810 if (is_broadcast_ether_addr(match.mask->src)) { 2811 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2812 } else { 2813 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2814 match.mask->src); 2815 return IAVF_ERR_CONFIG; 2816 } 2817 } 2818 2819 if (!is_zero_ether_addr(match.key->dst)) 2820 if (is_valid_ether_addr(match.key->dst) || 2821 is_multicast_ether_addr(match.key->dst)) { 2822 /* set the mask if a valid dst_mac address */ 2823 for (i = 0; i < ETH_ALEN; i++) 2824 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2825 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2826 match.key->dst); 2827 } 2828 2829 if (!is_zero_ether_addr(match.key->src)) 2830 if (is_valid_ether_addr(match.key->src) || 2831 is_multicast_ether_addr(match.key->src)) { 2832 /* set the mask if a valid dst_mac address */ 2833 for (i = 0; i < ETH_ALEN; i++) 2834 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2835 ether_addr_copy(vf->data.tcp_spec.src_mac, 2836 match.key->src); 2837 } 2838 } 2839 2840 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2841 struct flow_match_vlan match; 2842 2843 flow_rule_match_vlan(rule, &match); 2844 if (match.mask->vlan_id) { 2845 if (match.mask->vlan_id == VLAN_VID_MASK) { 2846 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2847 } else { 2848 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2849 match.mask->vlan_id); 2850 return IAVF_ERR_CONFIG; 2851 } 2852 } 2853 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2854 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2855 } 2856 2857 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2858 struct flow_match_control match; 2859 2860 flow_rule_match_control(rule, &match); 2861 addr_type = match.key->addr_type; 2862 } 2863 2864 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2865 struct flow_match_ipv4_addrs match; 2866 2867 flow_rule_match_ipv4_addrs(rule, &match); 2868 if (match.mask->dst) { 2869 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2870 field_flags |= IAVF_CLOUD_FIELD_IIP; 2871 } else { 2872 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2873 be32_to_cpu(match.mask->dst)); 2874 return IAVF_ERR_CONFIG; 2875 } 2876 } 2877 2878 if (match.mask->src) { 2879 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2880 field_flags |= IAVF_CLOUD_FIELD_IIP; 2881 } else { 2882 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2883 be32_to_cpu(match.mask->dst)); 2884 return IAVF_ERR_CONFIG; 2885 } 2886 } 2887 2888 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2889 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2890 return IAVF_ERR_CONFIG; 2891 } 2892 if (match.key->dst) { 2893 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2894 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2895 } 2896 if (match.key->src) { 2897 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2898 vf->data.tcp_spec.src_ip[0] = match.key->src; 2899 } 2900 } 2901 2902 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2903 struct flow_match_ipv6_addrs match; 2904 2905 flow_rule_match_ipv6_addrs(rule, &match); 2906 2907 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2908 if (ipv6_addr_any(&match.mask->dst)) { 2909 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2910 IPV6_ADDR_ANY); 2911 return IAVF_ERR_CONFIG; 2912 } 2913 2914 /* src and dest IPv6 address should not be LOOPBACK 2915 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2916 */ 2917 if (ipv6_addr_loopback(&match.key->dst) || 2918 ipv6_addr_loopback(&match.key->src)) { 2919 dev_err(&adapter->pdev->dev, 2920 "ipv6 addr should not be loopback\n"); 2921 return IAVF_ERR_CONFIG; 2922 } 2923 if (!ipv6_addr_any(&match.mask->dst) || 2924 !ipv6_addr_any(&match.mask->src)) 2925 field_flags |= IAVF_CLOUD_FIELD_IIP; 2926 2927 for (i = 0; i < 4; i++) 2928 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 2929 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 2930 sizeof(vf->data.tcp_spec.dst_ip)); 2931 for (i = 0; i < 4; i++) 2932 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 2933 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 2934 sizeof(vf->data.tcp_spec.src_ip)); 2935 } 2936 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2937 struct flow_match_ports match; 2938 2939 flow_rule_match_ports(rule, &match); 2940 if (match.mask->src) { 2941 if (match.mask->src == cpu_to_be16(0xffff)) { 2942 field_flags |= IAVF_CLOUD_FIELD_IIP; 2943 } else { 2944 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 2945 be16_to_cpu(match.mask->src)); 2946 return IAVF_ERR_CONFIG; 2947 } 2948 } 2949 2950 if (match.mask->dst) { 2951 if (match.mask->dst == cpu_to_be16(0xffff)) { 2952 field_flags |= IAVF_CLOUD_FIELD_IIP; 2953 } else { 2954 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 2955 be16_to_cpu(match.mask->dst)); 2956 return IAVF_ERR_CONFIG; 2957 } 2958 } 2959 if (match.key->dst) { 2960 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 2961 vf->data.tcp_spec.dst_port = match.key->dst; 2962 } 2963 2964 if (match.key->src) { 2965 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 2966 vf->data.tcp_spec.src_port = match.key->src; 2967 } 2968 } 2969 vf->field_flags = field_flags; 2970 2971 return 0; 2972 } 2973 2974 /** 2975 * iavf_handle_tclass - Forward to a traffic class on the device 2976 * @adapter: board private structure 2977 * @tc: traffic class index on the device 2978 * @filter: pointer to cloud filter structure 2979 */ 2980 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 2981 struct iavf_cloud_filter *filter) 2982 { 2983 if (tc == 0) 2984 return 0; 2985 if (tc < adapter->num_tc) { 2986 if (!filter->f.data.tcp_spec.dst_port) { 2987 dev_err(&adapter->pdev->dev, 2988 "Specify destination port to redirect to traffic class other than TC0\n"); 2989 return -EINVAL; 2990 } 2991 } 2992 /* redirect to a traffic class on the same device */ 2993 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 2994 filter->f.action_meta = tc; 2995 return 0; 2996 } 2997 2998 /** 2999 * iavf_configure_clsflower - Add tc flower filters 3000 * @adapter: board private structure 3001 * @cls_flower: Pointer to struct flow_cls_offload 3002 */ 3003 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3004 struct flow_cls_offload *cls_flower) 3005 { 3006 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3007 struct iavf_cloud_filter *filter = NULL; 3008 int err = -EINVAL, count = 50; 3009 3010 if (tc < 0) { 3011 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3012 return -EINVAL; 3013 } 3014 3015 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3016 if (!filter) 3017 return -ENOMEM; 3018 3019 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3020 &adapter->crit_section)) { 3021 if (--count == 0) 3022 goto err; 3023 udelay(1); 3024 } 3025 3026 filter->cookie = cls_flower->cookie; 3027 3028 /* set the mask to all zeroes to begin with */ 3029 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3030 /* start out with flow type and eth type IPv4 to begin with */ 3031 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3032 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3033 if (err < 0) 3034 goto err; 3035 3036 err = iavf_handle_tclass(adapter, tc, filter); 3037 if (err < 0) 3038 goto err; 3039 3040 /* add filter to the list */ 3041 spin_lock_bh(&adapter->cloud_filter_list_lock); 3042 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3043 adapter->num_cloud_filters++; 3044 filter->add = true; 3045 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3046 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3047 err: 3048 if (err) 3049 kfree(filter); 3050 3051 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3052 return err; 3053 } 3054 3055 /* iavf_find_cf - Find the cloud filter in the list 3056 * @adapter: Board private structure 3057 * @cookie: filter specific cookie 3058 * 3059 * Returns ptr to the filter object or NULL. Must be called while holding the 3060 * cloud_filter_list_lock. 3061 */ 3062 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3063 unsigned long *cookie) 3064 { 3065 struct iavf_cloud_filter *filter = NULL; 3066 3067 if (!cookie) 3068 return NULL; 3069 3070 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3071 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3072 return filter; 3073 } 3074 return NULL; 3075 } 3076 3077 /** 3078 * iavf_delete_clsflower - Remove tc flower filters 3079 * @adapter: board private structure 3080 * @cls_flower: Pointer to struct flow_cls_offload 3081 */ 3082 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3083 struct flow_cls_offload *cls_flower) 3084 { 3085 struct iavf_cloud_filter *filter = NULL; 3086 int err = 0; 3087 3088 spin_lock_bh(&adapter->cloud_filter_list_lock); 3089 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3090 if (filter) { 3091 filter->del = true; 3092 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3093 } else { 3094 err = -EINVAL; 3095 } 3096 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3097 3098 return err; 3099 } 3100 3101 /** 3102 * iavf_setup_tc_cls_flower - flower classifier offloads 3103 * @adapter: board private structure 3104 * @cls_flower: pointer to flow_cls_offload struct with flow info 3105 */ 3106 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3107 struct flow_cls_offload *cls_flower) 3108 { 3109 switch (cls_flower->command) { 3110 case FLOW_CLS_REPLACE: 3111 return iavf_configure_clsflower(adapter, cls_flower); 3112 case FLOW_CLS_DESTROY: 3113 return iavf_delete_clsflower(adapter, cls_flower); 3114 case FLOW_CLS_STATS: 3115 return -EOPNOTSUPP; 3116 default: 3117 return -EOPNOTSUPP; 3118 } 3119 } 3120 3121 /** 3122 * iavf_setup_tc_block_cb - block callback for tc 3123 * @type: type of offload 3124 * @type_data: offload data 3125 * @cb_priv: 3126 * 3127 * This function is the block callback for traffic classes 3128 **/ 3129 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3130 void *cb_priv) 3131 { 3132 struct iavf_adapter *adapter = cb_priv; 3133 3134 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3135 return -EOPNOTSUPP; 3136 3137 switch (type) { 3138 case TC_SETUP_CLSFLOWER: 3139 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3140 default: 3141 return -EOPNOTSUPP; 3142 } 3143 } 3144 3145 static LIST_HEAD(iavf_block_cb_list); 3146 3147 /** 3148 * iavf_setup_tc - configure multiple traffic classes 3149 * @netdev: network interface device structure 3150 * @type: type of offload 3151 * @type_data: tc offload data 3152 * 3153 * This function is the callback to ndo_setup_tc in the 3154 * netdev_ops. 3155 * 3156 * Returns 0 on success 3157 **/ 3158 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3159 void *type_data) 3160 { 3161 struct iavf_adapter *adapter = netdev_priv(netdev); 3162 3163 switch (type) { 3164 case TC_SETUP_QDISC_MQPRIO: 3165 return __iavf_setup_tc(netdev, type_data); 3166 case TC_SETUP_BLOCK: 3167 return flow_block_cb_setup_simple(type_data, 3168 &iavf_block_cb_list, 3169 iavf_setup_tc_block_cb, 3170 adapter, adapter, true); 3171 default: 3172 return -EOPNOTSUPP; 3173 } 3174 } 3175 3176 /** 3177 * iavf_open - Called when a network interface is made active 3178 * @netdev: network interface device structure 3179 * 3180 * Returns 0 on success, negative value on failure 3181 * 3182 * The open entry point is called when a network interface is made 3183 * active by the system (IFF_UP). At this point all resources needed 3184 * for transmit and receive operations are allocated, the interrupt 3185 * handler is registered with the OS, the watchdog is started, 3186 * and the stack is notified that the interface is ready. 3187 **/ 3188 static int iavf_open(struct net_device *netdev) 3189 { 3190 struct iavf_adapter *adapter = netdev_priv(netdev); 3191 int err; 3192 3193 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3194 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3195 return -EIO; 3196 } 3197 3198 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3199 &adapter->crit_section)) 3200 usleep_range(500, 1000); 3201 3202 if (adapter->state != __IAVF_DOWN) { 3203 err = -EBUSY; 3204 goto err_unlock; 3205 } 3206 3207 /* allocate transmit descriptors */ 3208 err = iavf_setup_all_tx_resources(adapter); 3209 if (err) 3210 goto err_setup_tx; 3211 3212 /* allocate receive descriptors */ 3213 err = iavf_setup_all_rx_resources(adapter); 3214 if (err) 3215 goto err_setup_rx; 3216 3217 /* clear any pending interrupts, may auto mask */ 3218 err = iavf_request_traffic_irqs(adapter, netdev->name); 3219 if (err) 3220 goto err_req_irq; 3221 3222 spin_lock_bh(&adapter->mac_vlan_list_lock); 3223 3224 iavf_add_filter(adapter, adapter->hw.mac.addr); 3225 3226 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3227 3228 iavf_configure(adapter); 3229 3230 iavf_up_complete(adapter); 3231 3232 iavf_irq_enable(adapter, true); 3233 3234 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3235 3236 return 0; 3237 3238 err_req_irq: 3239 iavf_down(adapter); 3240 iavf_free_traffic_irqs(adapter); 3241 err_setup_rx: 3242 iavf_free_all_rx_resources(adapter); 3243 err_setup_tx: 3244 iavf_free_all_tx_resources(adapter); 3245 err_unlock: 3246 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3247 3248 return err; 3249 } 3250 3251 /** 3252 * iavf_close - Disables a network interface 3253 * @netdev: network interface device structure 3254 * 3255 * Returns 0, this is not allowed to fail 3256 * 3257 * The close entry point is called when an interface is de-activated 3258 * by the OS. The hardware is still under the drivers control, but 3259 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3260 * are freed, along with all transmit and receive resources. 3261 **/ 3262 static int iavf_close(struct net_device *netdev) 3263 { 3264 struct iavf_adapter *adapter = netdev_priv(netdev); 3265 int status; 3266 3267 if (adapter->state <= __IAVF_DOWN_PENDING) 3268 return 0; 3269 3270 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3271 &adapter->crit_section)) 3272 usleep_range(500, 1000); 3273 3274 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3275 if (CLIENT_ENABLED(adapter)) 3276 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3277 3278 iavf_down(adapter); 3279 adapter->state = __IAVF_DOWN_PENDING; 3280 iavf_free_traffic_irqs(adapter); 3281 3282 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3283 3284 /* We explicitly don't free resources here because the hardware is 3285 * still active and can DMA into memory. Resources are cleared in 3286 * iavf_virtchnl_completion() after we get confirmation from the PF 3287 * driver that the rings have been stopped. 3288 * 3289 * Also, we wait for state to transition to __IAVF_DOWN before 3290 * returning. State change occurs in iavf_virtchnl_completion() after 3291 * VF resources are released (which occurs after PF driver processes and 3292 * responds to admin queue commands). 3293 */ 3294 3295 status = wait_event_timeout(adapter->down_waitqueue, 3296 adapter->state == __IAVF_DOWN, 3297 msecs_to_jiffies(500)); 3298 if (!status) 3299 netdev_warn(netdev, "Device resources not yet released\n"); 3300 return 0; 3301 } 3302 3303 /** 3304 * iavf_change_mtu - Change the Maximum Transfer Unit 3305 * @netdev: network interface device structure 3306 * @new_mtu: new value for maximum frame size 3307 * 3308 * Returns 0 on success, negative on failure 3309 **/ 3310 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3311 { 3312 struct iavf_adapter *adapter = netdev_priv(netdev); 3313 3314 netdev->mtu = new_mtu; 3315 if (CLIENT_ENABLED(adapter)) { 3316 iavf_notify_client_l2_params(&adapter->vsi); 3317 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3318 } 3319 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3320 queue_work(iavf_wq, &adapter->reset_task); 3321 3322 return 0; 3323 } 3324 3325 /** 3326 * iavf_set_features - set the netdev feature flags 3327 * @netdev: ptr to the netdev being adjusted 3328 * @features: the feature set that the stack is suggesting 3329 * Note: expects to be called while under rtnl_lock() 3330 **/ 3331 static int iavf_set_features(struct net_device *netdev, 3332 netdev_features_t features) 3333 { 3334 struct iavf_adapter *adapter = netdev_priv(netdev); 3335 3336 /* Don't allow changing VLAN_RX flag when adapter is not capable 3337 * of VLAN offload 3338 */ 3339 if (!VLAN_ALLOWED(adapter)) { 3340 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3341 return -EINVAL; 3342 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3343 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3344 adapter->aq_required |= 3345 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3346 else 3347 adapter->aq_required |= 3348 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3349 } 3350 3351 return 0; 3352 } 3353 3354 /** 3355 * iavf_features_check - Validate encapsulated packet conforms to limits 3356 * @skb: skb buff 3357 * @dev: This physical port's netdev 3358 * @features: Offload features that the stack believes apply 3359 **/ 3360 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3361 struct net_device *dev, 3362 netdev_features_t features) 3363 { 3364 size_t len; 3365 3366 /* No point in doing any of this if neither checksum nor GSO are 3367 * being requested for this frame. We can rule out both by just 3368 * checking for CHECKSUM_PARTIAL 3369 */ 3370 if (skb->ip_summed != CHECKSUM_PARTIAL) 3371 return features; 3372 3373 /* We cannot support GSO if the MSS is going to be less than 3374 * 64 bytes. If it is then we need to drop support for GSO. 3375 */ 3376 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3377 features &= ~NETIF_F_GSO_MASK; 3378 3379 /* MACLEN can support at most 63 words */ 3380 len = skb_network_header(skb) - skb->data; 3381 if (len & ~(63 * 2)) 3382 goto out_err; 3383 3384 /* IPLEN and EIPLEN can support at most 127 dwords */ 3385 len = skb_transport_header(skb) - skb_network_header(skb); 3386 if (len & ~(127 * 4)) 3387 goto out_err; 3388 3389 if (skb->encapsulation) { 3390 /* L4TUNLEN can support 127 words */ 3391 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3392 if (len & ~(127 * 2)) 3393 goto out_err; 3394 3395 /* IPLEN can support at most 127 dwords */ 3396 len = skb_inner_transport_header(skb) - 3397 skb_inner_network_header(skb); 3398 if (len & ~(127 * 4)) 3399 goto out_err; 3400 } 3401 3402 /* No need to validate L4LEN as TCP is the only protocol with a 3403 * a flexible value and we support all possible values supported 3404 * by TCP, which is at most 15 dwords 3405 */ 3406 3407 return features; 3408 out_err: 3409 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3410 } 3411 3412 /** 3413 * iavf_fix_features - fix up the netdev feature bits 3414 * @netdev: our net device 3415 * @features: desired feature bits 3416 * 3417 * Returns fixed-up features bits 3418 **/ 3419 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3420 netdev_features_t features) 3421 { 3422 struct iavf_adapter *adapter = netdev_priv(netdev); 3423 3424 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3425 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3426 NETIF_F_HW_VLAN_CTAG_RX | 3427 NETIF_F_HW_VLAN_CTAG_FILTER); 3428 3429 return features; 3430 } 3431 3432 static const struct net_device_ops iavf_netdev_ops = { 3433 .ndo_open = iavf_open, 3434 .ndo_stop = iavf_close, 3435 .ndo_start_xmit = iavf_xmit_frame, 3436 .ndo_set_rx_mode = iavf_set_rx_mode, 3437 .ndo_validate_addr = eth_validate_addr, 3438 .ndo_set_mac_address = iavf_set_mac, 3439 .ndo_change_mtu = iavf_change_mtu, 3440 .ndo_tx_timeout = iavf_tx_timeout, 3441 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3442 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3443 .ndo_features_check = iavf_features_check, 3444 .ndo_fix_features = iavf_fix_features, 3445 .ndo_set_features = iavf_set_features, 3446 .ndo_setup_tc = iavf_setup_tc, 3447 }; 3448 3449 /** 3450 * iavf_check_reset_complete - check that VF reset is complete 3451 * @hw: pointer to hw struct 3452 * 3453 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3454 **/ 3455 static int iavf_check_reset_complete(struct iavf_hw *hw) 3456 { 3457 u32 rstat; 3458 int i; 3459 3460 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3461 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3462 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3463 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3464 (rstat == VIRTCHNL_VFR_COMPLETED)) 3465 return 0; 3466 usleep_range(10, 20); 3467 } 3468 return -EBUSY; 3469 } 3470 3471 /** 3472 * iavf_process_config - Process the config information we got from the PF 3473 * @adapter: board private structure 3474 * 3475 * Verify that we have a valid config struct, and set up our netdev features 3476 * and our VSI struct. 3477 **/ 3478 int iavf_process_config(struct iavf_adapter *adapter) 3479 { 3480 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3481 int i, num_req_queues = adapter->num_req_queues; 3482 struct net_device *netdev = adapter->netdev; 3483 struct iavf_vsi *vsi = &adapter->vsi; 3484 netdev_features_t hw_enc_features; 3485 netdev_features_t hw_features; 3486 3487 /* got VF config message back from PF, now we can parse it */ 3488 for (i = 0; i < vfres->num_vsis; i++) { 3489 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3490 adapter->vsi_res = &vfres->vsi_res[i]; 3491 } 3492 if (!adapter->vsi_res) { 3493 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3494 return -ENODEV; 3495 } 3496 3497 if (num_req_queues && 3498 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3499 /* Problem. The PF gave us fewer queues than what we had 3500 * negotiated in our request. Need a reset to see if we can't 3501 * get back to a working state. 3502 */ 3503 dev_err(&adapter->pdev->dev, 3504 "Requested %d queues, but PF only gave us %d.\n", 3505 num_req_queues, 3506 adapter->vsi_res->num_queue_pairs); 3507 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3508 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3509 iavf_schedule_reset(adapter); 3510 return -ENODEV; 3511 } 3512 adapter->num_req_queues = 0; 3513 3514 hw_enc_features = NETIF_F_SG | 3515 NETIF_F_IP_CSUM | 3516 NETIF_F_IPV6_CSUM | 3517 NETIF_F_HIGHDMA | 3518 NETIF_F_SOFT_FEATURES | 3519 NETIF_F_TSO | 3520 NETIF_F_TSO_ECN | 3521 NETIF_F_TSO6 | 3522 NETIF_F_SCTP_CRC | 3523 NETIF_F_RXHASH | 3524 NETIF_F_RXCSUM | 3525 0; 3526 3527 /* advertise to stack only if offloads for encapsulated packets is 3528 * supported 3529 */ 3530 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3531 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3532 NETIF_F_GSO_GRE | 3533 NETIF_F_GSO_GRE_CSUM | 3534 NETIF_F_GSO_IPXIP4 | 3535 NETIF_F_GSO_IPXIP6 | 3536 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3537 NETIF_F_GSO_PARTIAL | 3538 0; 3539 3540 if (!(vfres->vf_cap_flags & 3541 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3542 netdev->gso_partial_features |= 3543 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3544 3545 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3546 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3547 netdev->hw_enc_features |= hw_enc_features; 3548 } 3549 /* record features VLANs can make use of */ 3550 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3551 3552 /* Write features and hw_features separately to avoid polluting 3553 * with, or dropping, features that are set when we registered. 3554 */ 3555 hw_features = hw_enc_features; 3556 3557 /* Enable VLAN features if supported */ 3558 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3559 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3560 NETIF_F_HW_VLAN_CTAG_RX); 3561 /* Enable cloud filter if ADQ is supported */ 3562 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3563 hw_features |= NETIF_F_HW_TC; 3564 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 3565 hw_features |= NETIF_F_GSO_UDP_L4; 3566 3567 netdev->hw_features |= hw_features; 3568 3569 netdev->features |= hw_features; 3570 3571 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3572 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3573 3574 netdev->priv_flags |= IFF_UNICAST_FLT; 3575 3576 /* Do not turn on offloads when they are requested to be turned off. 3577 * TSO needs minimum 576 bytes to work correctly. 3578 */ 3579 if (netdev->wanted_features) { 3580 if (!(netdev->wanted_features & NETIF_F_TSO) || 3581 netdev->mtu < 576) 3582 netdev->features &= ~NETIF_F_TSO; 3583 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3584 netdev->mtu < 576) 3585 netdev->features &= ~NETIF_F_TSO6; 3586 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3587 netdev->features &= ~NETIF_F_TSO_ECN; 3588 if (!(netdev->wanted_features & NETIF_F_GRO)) 3589 netdev->features &= ~NETIF_F_GRO; 3590 if (!(netdev->wanted_features & NETIF_F_GSO)) 3591 netdev->features &= ~NETIF_F_GSO; 3592 } 3593 3594 adapter->vsi.id = adapter->vsi_res->vsi_id; 3595 3596 adapter->vsi.back = adapter; 3597 adapter->vsi.base_vector = 1; 3598 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3599 vsi->netdev = adapter->netdev; 3600 vsi->qs_handle = adapter->vsi_res->qset_handle; 3601 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3602 adapter->rss_key_size = vfres->rss_key_size; 3603 adapter->rss_lut_size = vfres->rss_lut_size; 3604 } else { 3605 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3606 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3607 } 3608 3609 return 0; 3610 } 3611 3612 /** 3613 * iavf_init_task - worker thread to perform delayed initialization 3614 * @work: pointer to work_struct containing our data 3615 * 3616 * This task completes the work that was begun in probe. Due to the nature 3617 * of VF-PF communications, we may need to wait tens of milliseconds to get 3618 * responses back from the PF. Rather than busy-wait in probe and bog down the 3619 * whole system, we'll do it in a task so we can sleep. 3620 * This task only runs during driver init. Once we've established 3621 * communications with the PF driver and set up our netdev, the watchdog 3622 * takes over. 3623 **/ 3624 static void iavf_init_task(struct work_struct *work) 3625 { 3626 struct iavf_adapter *adapter = container_of(work, 3627 struct iavf_adapter, 3628 init_task.work); 3629 struct iavf_hw *hw = &adapter->hw; 3630 3631 switch (adapter->state) { 3632 case __IAVF_STARTUP: 3633 if (iavf_startup(adapter) < 0) 3634 goto init_failed; 3635 break; 3636 case __IAVF_INIT_VERSION_CHECK: 3637 if (iavf_init_version_check(adapter) < 0) 3638 goto init_failed; 3639 break; 3640 case __IAVF_INIT_GET_RESOURCES: 3641 if (iavf_init_get_resources(adapter) < 0) 3642 goto init_failed; 3643 return; 3644 default: 3645 goto init_failed; 3646 } 3647 3648 queue_delayed_work(iavf_wq, &adapter->init_task, 3649 msecs_to_jiffies(30)); 3650 return; 3651 init_failed: 3652 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 3653 dev_err(&adapter->pdev->dev, 3654 "Failed to communicate with PF; waiting before retry\n"); 3655 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3656 iavf_shutdown_adminq(hw); 3657 adapter->state = __IAVF_STARTUP; 3658 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); 3659 return; 3660 } 3661 queue_delayed_work(iavf_wq, &adapter->init_task, HZ); 3662 } 3663 3664 /** 3665 * iavf_shutdown - Shutdown the device in preparation for a reboot 3666 * @pdev: pci device structure 3667 **/ 3668 static void iavf_shutdown(struct pci_dev *pdev) 3669 { 3670 struct net_device *netdev = pci_get_drvdata(pdev); 3671 struct iavf_adapter *adapter = netdev_priv(netdev); 3672 3673 netif_device_detach(netdev); 3674 3675 if (netif_running(netdev)) 3676 iavf_close(netdev); 3677 3678 /* Prevent the watchdog from running. */ 3679 adapter->state = __IAVF_REMOVE; 3680 adapter->aq_required = 0; 3681 3682 #ifdef CONFIG_PM 3683 pci_save_state(pdev); 3684 3685 #endif 3686 pci_disable_device(pdev); 3687 } 3688 3689 /** 3690 * iavf_probe - Device Initialization Routine 3691 * @pdev: PCI device information struct 3692 * @ent: entry in iavf_pci_tbl 3693 * 3694 * Returns 0 on success, negative on failure 3695 * 3696 * iavf_probe initializes an adapter identified by a pci_dev structure. 3697 * The OS initialization, configuring of the adapter private structure, 3698 * and a hardware reset occur. 3699 **/ 3700 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3701 { 3702 struct net_device *netdev; 3703 struct iavf_adapter *adapter = NULL; 3704 struct iavf_hw *hw = NULL; 3705 int err; 3706 3707 err = pci_enable_device(pdev); 3708 if (err) 3709 return err; 3710 3711 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3712 if (err) { 3713 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3714 if (err) { 3715 dev_err(&pdev->dev, 3716 "DMA configuration failed: 0x%x\n", err); 3717 goto err_dma; 3718 } 3719 } 3720 3721 err = pci_request_regions(pdev, iavf_driver_name); 3722 if (err) { 3723 dev_err(&pdev->dev, 3724 "pci_request_regions failed 0x%x\n", err); 3725 goto err_pci_reg; 3726 } 3727 3728 pci_enable_pcie_error_reporting(pdev); 3729 3730 pci_set_master(pdev); 3731 3732 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3733 IAVF_MAX_REQ_QUEUES); 3734 if (!netdev) { 3735 err = -ENOMEM; 3736 goto err_alloc_etherdev; 3737 } 3738 3739 SET_NETDEV_DEV(netdev, &pdev->dev); 3740 3741 pci_set_drvdata(pdev, netdev); 3742 adapter = netdev_priv(netdev); 3743 3744 adapter->netdev = netdev; 3745 adapter->pdev = pdev; 3746 3747 hw = &adapter->hw; 3748 hw->back = adapter; 3749 3750 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3751 adapter->state = __IAVF_STARTUP; 3752 3753 /* Call save state here because it relies on the adapter struct. */ 3754 pci_save_state(pdev); 3755 3756 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3757 pci_resource_len(pdev, 0)); 3758 if (!hw->hw_addr) { 3759 err = -EIO; 3760 goto err_ioremap; 3761 } 3762 hw->vendor_id = pdev->vendor; 3763 hw->device_id = pdev->device; 3764 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3765 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3766 hw->subsystem_device_id = pdev->subsystem_device; 3767 hw->bus.device = PCI_SLOT(pdev->devfn); 3768 hw->bus.func = PCI_FUNC(pdev->devfn); 3769 hw->bus.bus_id = pdev->bus->number; 3770 3771 /* set up the locks for the AQ, do this only once in probe 3772 * and destroy them only once in remove 3773 */ 3774 mutex_init(&hw->aq.asq_mutex); 3775 mutex_init(&hw->aq.arq_mutex); 3776 3777 spin_lock_init(&adapter->mac_vlan_list_lock); 3778 spin_lock_init(&adapter->cloud_filter_list_lock); 3779 spin_lock_init(&adapter->fdir_fltr_lock); 3780 spin_lock_init(&adapter->adv_rss_lock); 3781 3782 INIT_LIST_HEAD(&adapter->mac_filter_list); 3783 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3784 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3785 INIT_LIST_HEAD(&adapter->fdir_list_head); 3786 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 3787 3788 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3789 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3790 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3791 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3792 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); 3793 queue_delayed_work(iavf_wq, &adapter->init_task, 3794 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3795 3796 /* Setup the wait queue for indicating transition to down status */ 3797 init_waitqueue_head(&adapter->down_waitqueue); 3798 3799 return 0; 3800 3801 err_ioremap: 3802 free_netdev(netdev); 3803 err_alloc_etherdev: 3804 pci_disable_pcie_error_reporting(pdev); 3805 pci_release_regions(pdev); 3806 err_pci_reg: 3807 err_dma: 3808 pci_disable_device(pdev); 3809 return err; 3810 } 3811 3812 /** 3813 * iavf_suspend - Power management suspend routine 3814 * @dev_d: device info pointer 3815 * 3816 * Called when the system (VM) is entering sleep/suspend. 3817 **/ 3818 static int __maybe_unused iavf_suspend(struct device *dev_d) 3819 { 3820 struct net_device *netdev = dev_get_drvdata(dev_d); 3821 struct iavf_adapter *adapter = netdev_priv(netdev); 3822 3823 netif_device_detach(netdev); 3824 3825 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3826 &adapter->crit_section)) 3827 usleep_range(500, 1000); 3828 3829 if (netif_running(netdev)) { 3830 rtnl_lock(); 3831 iavf_down(adapter); 3832 rtnl_unlock(); 3833 } 3834 iavf_free_misc_irq(adapter); 3835 iavf_reset_interrupt_capability(adapter); 3836 3837 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3838 3839 return 0; 3840 } 3841 3842 /** 3843 * iavf_resume - Power management resume routine 3844 * @dev_d: device info pointer 3845 * 3846 * Called when the system (VM) is resumed from sleep/suspend. 3847 **/ 3848 static int __maybe_unused iavf_resume(struct device *dev_d) 3849 { 3850 struct pci_dev *pdev = to_pci_dev(dev_d); 3851 struct net_device *netdev = pci_get_drvdata(pdev); 3852 struct iavf_adapter *adapter = netdev_priv(netdev); 3853 u32 err; 3854 3855 pci_set_master(pdev); 3856 3857 rtnl_lock(); 3858 err = iavf_set_interrupt_capability(adapter); 3859 if (err) { 3860 rtnl_unlock(); 3861 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3862 return err; 3863 } 3864 err = iavf_request_misc_irq(adapter); 3865 rtnl_unlock(); 3866 if (err) { 3867 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3868 return err; 3869 } 3870 3871 queue_work(iavf_wq, &adapter->reset_task); 3872 3873 netif_device_attach(netdev); 3874 3875 return err; 3876 } 3877 3878 /** 3879 * iavf_remove - Device Removal Routine 3880 * @pdev: PCI device information struct 3881 * 3882 * iavf_remove is called by the PCI subsystem to alert the driver 3883 * that it should release a PCI device. The could be caused by a 3884 * Hot-Plug event, or because the driver is going to be removed from 3885 * memory. 3886 **/ 3887 static void iavf_remove(struct pci_dev *pdev) 3888 { 3889 struct net_device *netdev = pci_get_drvdata(pdev); 3890 struct iavf_adapter *adapter = netdev_priv(netdev); 3891 struct iavf_fdir_fltr *fdir, *fdirtmp; 3892 struct iavf_vlan_filter *vlf, *vlftmp; 3893 struct iavf_adv_rss *rss, *rsstmp; 3894 struct iavf_mac_filter *f, *ftmp; 3895 struct iavf_cloud_filter *cf, *cftmp; 3896 struct iavf_hw *hw = &adapter->hw; 3897 int err; 3898 /* Indicate we are in remove and not to run reset_task */ 3899 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); 3900 cancel_delayed_work_sync(&adapter->init_task); 3901 cancel_work_sync(&adapter->reset_task); 3902 cancel_delayed_work_sync(&adapter->client_task); 3903 if (adapter->netdev_registered) { 3904 unregister_netdev(netdev); 3905 adapter->netdev_registered = false; 3906 } 3907 if (CLIENT_ALLOWED(adapter)) { 3908 err = iavf_lan_del_device(adapter); 3909 if (err) 3910 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3911 err); 3912 } 3913 3914 /* Shut down all the garbage mashers on the detention level */ 3915 adapter->state = __IAVF_REMOVE; 3916 adapter->aq_required = 0; 3917 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3918 iavf_request_reset(adapter); 3919 msleep(50); 3920 /* If the FW isn't responding, kick it once, but only once. */ 3921 if (!iavf_asq_done(hw)) { 3922 iavf_request_reset(adapter); 3923 msleep(50); 3924 } 3925 iavf_free_all_tx_resources(adapter); 3926 iavf_free_all_rx_resources(adapter); 3927 iavf_misc_irq_disable(adapter); 3928 iavf_free_misc_irq(adapter); 3929 iavf_reset_interrupt_capability(adapter); 3930 iavf_free_q_vectors(adapter); 3931 3932 cancel_delayed_work_sync(&adapter->watchdog_task); 3933 3934 cancel_work_sync(&adapter->adminq_task); 3935 3936 iavf_free_rss(adapter); 3937 3938 if (hw->aq.asq.count) 3939 iavf_shutdown_adminq(hw); 3940 3941 /* destroy the locks only once, here */ 3942 mutex_destroy(&hw->aq.arq_mutex); 3943 mutex_destroy(&hw->aq.asq_mutex); 3944 3945 iounmap(hw->hw_addr); 3946 pci_release_regions(pdev); 3947 iavf_free_queues(adapter); 3948 kfree(adapter->vf_res); 3949 spin_lock_bh(&adapter->mac_vlan_list_lock); 3950 /* If we got removed before an up/down sequence, we've got a filter 3951 * hanging out there that we need to get rid of. 3952 */ 3953 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3954 list_del(&f->list); 3955 kfree(f); 3956 } 3957 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 3958 list) { 3959 list_del(&vlf->list); 3960 kfree(vlf); 3961 } 3962 3963 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3964 3965 spin_lock_bh(&adapter->cloud_filter_list_lock); 3966 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 3967 list_del(&cf->list); 3968 kfree(cf); 3969 } 3970 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3971 3972 spin_lock_bh(&adapter->fdir_fltr_lock); 3973 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 3974 list_del(&fdir->list); 3975 kfree(fdir); 3976 } 3977 spin_unlock_bh(&adapter->fdir_fltr_lock); 3978 3979 spin_lock_bh(&adapter->adv_rss_lock); 3980 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 3981 list) { 3982 list_del(&rss->list); 3983 kfree(rss); 3984 } 3985 spin_unlock_bh(&adapter->adv_rss_lock); 3986 3987 free_netdev(netdev); 3988 3989 pci_disable_pcie_error_reporting(pdev); 3990 3991 pci_disable_device(pdev); 3992 } 3993 3994 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 3995 3996 static struct pci_driver iavf_driver = { 3997 .name = iavf_driver_name, 3998 .id_table = iavf_pci_tbl, 3999 .probe = iavf_probe, 4000 .remove = iavf_remove, 4001 .driver.pm = &iavf_pm_ops, 4002 .shutdown = iavf_shutdown, 4003 }; 4004 4005 /** 4006 * iavf_init_module - Driver Registration Routine 4007 * 4008 * iavf_init_module is the first routine called when the driver is 4009 * loaded. All it does is register with the PCI subsystem. 4010 **/ 4011 static int __init iavf_init_module(void) 4012 { 4013 int ret; 4014 4015 pr_info("iavf: %s\n", iavf_driver_string); 4016 4017 pr_info("%s\n", iavf_copyright); 4018 4019 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 4020 iavf_driver_name); 4021 if (!iavf_wq) { 4022 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 4023 return -ENOMEM; 4024 } 4025 ret = pci_register_driver(&iavf_driver); 4026 return ret; 4027 } 4028 4029 module_init(iavf_init_module); 4030 4031 /** 4032 * iavf_exit_module - Driver Exit Cleanup Routine 4033 * 4034 * iavf_exit_module is called just before the driver is removed 4035 * from memory. 4036 **/ 4037 static void __exit iavf_exit_module(void) 4038 { 4039 pci_unregister_driver(&iavf_driver); 4040 destroy_workqueue(iavf_wq); 4041 } 4042 4043 module_exit(iavf_exit_module); 4044 4045 /* iavf_main.c */ 4046