1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static int iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 struct workqueue_struct *iavf_wq; 53 54 /** 55 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 56 * @hw: pointer to the HW structure 57 * @mem: ptr to mem struct to fill out 58 * @size: size of memory requested 59 * @alignment: what to align the allocation to 60 **/ 61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 62 struct iavf_dma_mem *mem, 63 u64 size, u32 alignment) 64 { 65 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 66 67 if (!mem) 68 return IAVF_ERR_PARAM; 69 70 mem->size = ALIGN(size, alignment); 71 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 72 (dma_addr_t *)&mem->pa, GFP_KERNEL); 73 if (mem->va) 74 return 0; 75 else 76 return IAVF_ERR_NO_MEMORY; 77 } 78 79 /** 80 * iavf_free_dma_mem_d - OS specific memory free for shared code 81 * @hw: pointer to the HW structure 82 * @mem: ptr to mem struct to free 83 **/ 84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 85 struct iavf_dma_mem *mem) 86 { 87 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 88 89 if (!mem || !mem->va) 90 return IAVF_ERR_PARAM; 91 dma_free_coherent(&adapter->pdev->dev, mem->size, 92 mem->va, (dma_addr_t)mem->pa); 93 return 0; 94 } 95 96 /** 97 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 98 * @hw: pointer to the HW structure 99 * @mem: ptr to mem struct to fill out 100 * @size: size of memory requested 101 **/ 102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 103 struct iavf_virt_mem *mem, u32 size) 104 { 105 if (!mem) 106 return IAVF_ERR_PARAM; 107 108 mem->size = size; 109 mem->va = kzalloc(size, GFP_KERNEL); 110 111 if (mem->va) 112 return 0; 113 else 114 return IAVF_ERR_NO_MEMORY; 115 } 116 117 /** 118 * iavf_free_virt_mem_d - OS specific memory free for shared code 119 * @hw: pointer to the HW structure 120 * @mem: ptr to mem struct to free 121 **/ 122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 123 struct iavf_virt_mem *mem) 124 { 125 if (!mem) 126 return IAVF_ERR_PARAM; 127 128 /* it's ok to kfree a NULL pointer */ 129 kfree(mem->va); 130 131 return 0; 132 } 133 134 /** 135 * iavf_schedule_reset - Set the flags and schedule a reset event 136 * @adapter: board private structure 137 **/ 138 void iavf_schedule_reset(struct iavf_adapter *adapter) 139 { 140 if (!(adapter->flags & 141 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 142 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 143 queue_work(iavf_wq, &adapter->reset_task); 144 } 145 } 146 147 /** 148 * iavf_tx_timeout - Respond to a Tx Hang 149 * @netdev: network interface device structure 150 **/ 151 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 152 { 153 struct iavf_adapter *adapter = netdev_priv(netdev); 154 155 adapter->tx_timeout_count++; 156 iavf_schedule_reset(adapter); 157 } 158 159 /** 160 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 161 * @adapter: board private structure 162 **/ 163 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 164 { 165 struct iavf_hw *hw = &adapter->hw; 166 167 if (!adapter->msix_entries) 168 return; 169 170 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 171 172 iavf_flush(hw); 173 174 synchronize_irq(adapter->msix_entries[0].vector); 175 } 176 177 /** 178 * iavf_misc_irq_enable - Enable default interrupt generation settings 179 * @adapter: board private structure 180 **/ 181 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 182 { 183 struct iavf_hw *hw = &adapter->hw; 184 185 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 186 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 187 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 188 189 iavf_flush(hw); 190 } 191 192 /** 193 * iavf_irq_disable - Mask off interrupt generation on the NIC 194 * @adapter: board private structure 195 **/ 196 static void iavf_irq_disable(struct iavf_adapter *adapter) 197 { 198 int i; 199 struct iavf_hw *hw = &adapter->hw; 200 201 if (!adapter->msix_entries) 202 return; 203 204 for (i = 1; i < adapter->num_msix_vectors; i++) { 205 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 206 synchronize_irq(adapter->msix_entries[i].vector); 207 } 208 iavf_flush(hw); 209 } 210 211 /** 212 * iavf_irq_enable_queues - Enable interrupt for specified queues 213 * @adapter: board private structure 214 * @mask: bitmap of queues to enable 215 **/ 216 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 217 { 218 struct iavf_hw *hw = &adapter->hw; 219 int i; 220 221 for (i = 1; i < adapter->num_msix_vectors; i++) { 222 if (mask & BIT(i - 1)) { 223 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 224 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 225 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 226 } 227 } 228 } 229 230 /** 231 * iavf_irq_enable - Enable default interrupt generation settings 232 * @adapter: board private structure 233 * @flush: boolean value whether to run rd32() 234 **/ 235 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 236 { 237 struct iavf_hw *hw = &adapter->hw; 238 239 iavf_misc_irq_enable(adapter); 240 iavf_irq_enable_queues(adapter, ~0); 241 242 if (flush) 243 iavf_flush(hw); 244 } 245 246 /** 247 * iavf_msix_aq - Interrupt handler for vector 0 248 * @irq: interrupt number 249 * @data: pointer to netdev 250 **/ 251 static irqreturn_t iavf_msix_aq(int irq, void *data) 252 { 253 struct net_device *netdev = data; 254 struct iavf_adapter *adapter = netdev_priv(netdev); 255 struct iavf_hw *hw = &adapter->hw; 256 257 /* handle non-queue interrupts, these reads clear the registers */ 258 rd32(hw, IAVF_VFINT_ICR01); 259 rd32(hw, IAVF_VFINT_ICR0_ENA1); 260 261 /* schedule work on the private workqueue */ 262 queue_work(iavf_wq, &adapter->adminq_task); 263 264 return IRQ_HANDLED; 265 } 266 267 /** 268 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 269 * @irq: interrupt number 270 * @data: pointer to a q_vector 271 **/ 272 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 273 { 274 struct iavf_q_vector *q_vector = data; 275 276 if (!q_vector->tx.ring && !q_vector->rx.ring) 277 return IRQ_HANDLED; 278 279 napi_schedule_irqoff(&q_vector->napi); 280 281 return IRQ_HANDLED; 282 } 283 284 /** 285 * iavf_map_vector_to_rxq - associate irqs with rx queues 286 * @adapter: board private structure 287 * @v_idx: interrupt number 288 * @r_idx: queue number 289 **/ 290 static void 291 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 292 { 293 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 294 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 295 struct iavf_hw *hw = &adapter->hw; 296 297 rx_ring->q_vector = q_vector; 298 rx_ring->next = q_vector->rx.ring; 299 rx_ring->vsi = &adapter->vsi; 300 q_vector->rx.ring = rx_ring; 301 q_vector->rx.count++; 302 q_vector->rx.next_update = jiffies + 1; 303 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 304 q_vector->ring_mask |= BIT(r_idx); 305 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 306 q_vector->rx.current_itr >> 1); 307 q_vector->rx.current_itr = q_vector->rx.target_itr; 308 } 309 310 /** 311 * iavf_map_vector_to_txq - associate irqs with tx queues 312 * @adapter: board private structure 313 * @v_idx: interrupt number 314 * @t_idx: queue number 315 **/ 316 static void 317 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 318 { 319 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 320 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 321 struct iavf_hw *hw = &adapter->hw; 322 323 tx_ring->q_vector = q_vector; 324 tx_ring->next = q_vector->tx.ring; 325 tx_ring->vsi = &adapter->vsi; 326 q_vector->tx.ring = tx_ring; 327 q_vector->tx.count++; 328 q_vector->tx.next_update = jiffies + 1; 329 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 330 q_vector->num_ringpairs++; 331 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 332 q_vector->tx.target_itr >> 1); 333 q_vector->tx.current_itr = q_vector->tx.target_itr; 334 } 335 336 /** 337 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 338 * @adapter: board private structure to initialize 339 * 340 * This function maps descriptor rings to the queue-specific vectors 341 * we were allotted through the MSI-X enabling code. Ideally, we'd have 342 * one vector per ring/queue, but on a constrained vector budget, we 343 * group the rings as "efficiently" as possible. You would add new 344 * mapping configurations in here. 345 **/ 346 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 347 { 348 int rings_remaining = adapter->num_active_queues; 349 int ridx = 0, vidx = 0; 350 int q_vectors; 351 352 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 353 354 for (; ridx < rings_remaining; ridx++) { 355 iavf_map_vector_to_rxq(adapter, vidx, ridx); 356 iavf_map_vector_to_txq(adapter, vidx, ridx); 357 358 /* In the case where we have more queues than vectors, continue 359 * round-robin on vectors until all queues are mapped. 360 */ 361 if (++vidx >= q_vectors) 362 vidx = 0; 363 } 364 365 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 366 } 367 368 /** 369 * iavf_irq_affinity_notify - Callback for affinity changes 370 * @notify: context as to what irq was changed 371 * @mask: the new affinity mask 372 * 373 * This is a callback function used by the irq_set_affinity_notifier function 374 * so that we may register to receive changes to the irq affinity masks. 375 **/ 376 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 377 const cpumask_t *mask) 378 { 379 struct iavf_q_vector *q_vector = 380 container_of(notify, struct iavf_q_vector, affinity_notify); 381 382 cpumask_copy(&q_vector->affinity_mask, mask); 383 } 384 385 /** 386 * iavf_irq_affinity_release - Callback for affinity notifier release 387 * @ref: internal core kernel usage 388 * 389 * This is a callback function used by the irq_set_affinity_notifier function 390 * to inform the current notification subscriber that they will no longer 391 * receive notifications. 392 **/ 393 static void iavf_irq_affinity_release(struct kref *ref) {} 394 395 /** 396 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 397 * @adapter: board private structure 398 * @basename: device basename 399 * 400 * Allocates MSI-X vectors for tx and rx handling, and requests 401 * interrupts from the kernel. 402 **/ 403 static int 404 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 405 { 406 unsigned int vector, q_vectors; 407 unsigned int rx_int_idx = 0, tx_int_idx = 0; 408 int irq_num, err; 409 int cpu; 410 411 iavf_irq_disable(adapter); 412 /* Decrement for Other and TCP Timer vectors */ 413 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 414 415 for (vector = 0; vector < q_vectors; vector++) { 416 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 417 418 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 419 420 if (q_vector->tx.ring && q_vector->rx.ring) { 421 snprintf(q_vector->name, sizeof(q_vector->name), 422 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 423 tx_int_idx++; 424 } else if (q_vector->rx.ring) { 425 snprintf(q_vector->name, sizeof(q_vector->name), 426 "iavf-%s-rx-%d", basename, rx_int_idx++); 427 } else if (q_vector->tx.ring) { 428 snprintf(q_vector->name, sizeof(q_vector->name), 429 "iavf-%s-tx-%d", basename, tx_int_idx++); 430 } else { 431 /* skip this unused q_vector */ 432 continue; 433 } 434 err = request_irq(irq_num, 435 iavf_msix_clean_rings, 436 0, 437 q_vector->name, 438 q_vector); 439 if (err) { 440 dev_info(&adapter->pdev->dev, 441 "Request_irq failed, error: %d\n", err); 442 goto free_queue_irqs; 443 } 444 /* register for affinity change notifications */ 445 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 446 q_vector->affinity_notify.release = 447 iavf_irq_affinity_release; 448 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 449 /* Spread the IRQ affinity hints across online CPUs. Note that 450 * get_cpu_mask returns a mask with a permanent lifetime so 451 * it's safe to use as a hint for irq_set_affinity_hint. 452 */ 453 cpu = cpumask_local_spread(q_vector->v_idx, -1); 454 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 455 } 456 457 return 0; 458 459 free_queue_irqs: 460 while (vector) { 461 vector--; 462 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 463 irq_set_affinity_notifier(irq_num, NULL); 464 irq_set_affinity_hint(irq_num, NULL); 465 free_irq(irq_num, &adapter->q_vectors[vector]); 466 } 467 return err; 468 } 469 470 /** 471 * iavf_request_misc_irq - Initialize MSI-X interrupts 472 * @adapter: board private structure 473 * 474 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 475 * vector is only for the admin queue, and stays active even when the netdev 476 * is closed. 477 **/ 478 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 479 { 480 struct net_device *netdev = adapter->netdev; 481 int err; 482 483 snprintf(adapter->misc_vector_name, 484 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 485 dev_name(&adapter->pdev->dev)); 486 err = request_irq(adapter->msix_entries[0].vector, 487 &iavf_msix_aq, 0, 488 adapter->misc_vector_name, netdev); 489 if (err) { 490 dev_err(&adapter->pdev->dev, 491 "request_irq for %s failed: %d\n", 492 adapter->misc_vector_name, err); 493 free_irq(adapter->msix_entries[0].vector, netdev); 494 } 495 return err; 496 } 497 498 /** 499 * iavf_free_traffic_irqs - Free MSI-X interrupts 500 * @adapter: board private structure 501 * 502 * Frees all MSI-X vectors other than 0. 503 **/ 504 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 505 { 506 int vector, irq_num, q_vectors; 507 508 if (!adapter->msix_entries) 509 return; 510 511 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 512 513 for (vector = 0; vector < q_vectors; vector++) { 514 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 515 irq_set_affinity_notifier(irq_num, NULL); 516 irq_set_affinity_hint(irq_num, NULL); 517 free_irq(irq_num, &adapter->q_vectors[vector]); 518 } 519 } 520 521 /** 522 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 523 * @adapter: board private structure 524 * 525 * Frees MSI-X vector 0. 526 **/ 527 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 528 { 529 struct net_device *netdev = adapter->netdev; 530 531 if (!adapter->msix_entries) 532 return; 533 534 free_irq(adapter->msix_entries[0].vector, netdev); 535 } 536 537 /** 538 * iavf_configure_tx - Configure Transmit Unit after Reset 539 * @adapter: board private structure 540 * 541 * Configure the Tx unit of the MAC after a reset. 542 **/ 543 static void iavf_configure_tx(struct iavf_adapter *adapter) 544 { 545 struct iavf_hw *hw = &adapter->hw; 546 int i; 547 548 for (i = 0; i < adapter->num_active_queues; i++) 549 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 550 } 551 552 /** 553 * iavf_configure_rx - Configure Receive Unit after Reset 554 * @adapter: board private structure 555 * 556 * Configure the Rx unit of the MAC after a reset. 557 **/ 558 static void iavf_configure_rx(struct iavf_adapter *adapter) 559 { 560 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 561 struct iavf_hw *hw = &adapter->hw; 562 int i; 563 564 /* Legacy Rx will always default to a 2048 buffer size. */ 565 #if (PAGE_SIZE < 8192) 566 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 567 struct net_device *netdev = adapter->netdev; 568 569 /* For jumbo frames on systems with 4K pages we have to use 570 * an order 1 page, so we might as well increase the size 571 * of our Rx buffer to make better use of the available space 572 */ 573 rx_buf_len = IAVF_RXBUFFER_3072; 574 575 /* We use a 1536 buffer size for configurations with 576 * standard Ethernet mtu. On x86 this gives us enough room 577 * for shared info and 192 bytes of padding. 578 */ 579 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 580 (netdev->mtu <= ETH_DATA_LEN)) 581 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 582 } 583 #endif 584 585 for (i = 0; i < adapter->num_active_queues; i++) { 586 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 587 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 588 589 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 590 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 591 else 592 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 593 } 594 } 595 596 /** 597 * iavf_find_vlan - Search filter list for specific vlan filter 598 * @adapter: board private structure 599 * @vlan: vlan tag 600 * 601 * Returns ptr to the filter object or NULL. Must be called while holding the 602 * mac_vlan_list_lock. 603 **/ 604 static struct 605 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 606 { 607 struct iavf_vlan_filter *f; 608 609 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 610 if (vlan == f->vlan) 611 return f; 612 } 613 return NULL; 614 } 615 616 /** 617 * iavf_add_vlan - Add a vlan filter to the list 618 * @adapter: board private structure 619 * @vlan: VLAN tag 620 * 621 * Returns ptr to the filter object or NULL when no memory available. 622 **/ 623 static struct 624 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 625 { 626 struct iavf_vlan_filter *f = NULL; 627 628 spin_lock_bh(&adapter->mac_vlan_list_lock); 629 630 f = iavf_find_vlan(adapter, vlan); 631 if (!f) { 632 f = kzalloc(sizeof(*f), GFP_ATOMIC); 633 if (!f) 634 goto clearout; 635 636 f->vlan = vlan; 637 638 list_add_tail(&f->list, &adapter->vlan_filter_list); 639 f->add = true; 640 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 641 } 642 643 clearout: 644 spin_unlock_bh(&adapter->mac_vlan_list_lock); 645 return f; 646 } 647 648 /** 649 * iavf_del_vlan - Remove a vlan filter from the list 650 * @adapter: board private structure 651 * @vlan: VLAN tag 652 **/ 653 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 654 { 655 struct iavf_vlan_filter *f; 656 657 spin_lock_bh(&adapter->mac_vlan_list_lock); 658 659 f = iavf_find_vlan(adapter, vlan); 660 if (f) { 661 f->remove = true; 662 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 663 } 664 665 spin_unlock_bh(&adapter->mac_vlan_list_lock); 666 } 667 668 /** 669 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 670 * @netdev: network device struct 671 * @proto: unused protocol data 672 * @vid: VLAN tag 673 **/ 674 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 675 __always_unused __be16 proto, u16 vid) 676 { 677 struct iavf_adapter *adapter = netdev_priv(netdev); 678 679 if (!VLAN_ALLOWED(adapter)) 680 return -EIO; 681 if (iavf_add_vlan(adapter, vid) == NULL) 682 return -ENOMEM; 683 return 0; 684 } 685 686 /** 687 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 688 * @netdev: network device struct 689 * @proto: unused protocol data 690 * @vid: VLAN tag 691 **/ 692 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 693 __always_unused __be16 proto, u16 vid) 694 { 695 struct iavf_adapter *adapter = netdev_priv(netdev); 696 697 if (VLAN_ALLOWED(adapter)) { 698 iavf_del_vlan(adapter, vid); 699 return 0; 700 } 701 return -EIO; 702 } 703 704 /** 705 * iavf_find_filter - Search filter list for specific mac filter 706 * @adapter: board private structure 707 * @macaddr: the MAC address 708 * 709 * Returns ptr to the filter object or NULL. Must be called while holding the 710 * mac_vlan_list_lock. 711 **/ 712 static struct 713 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 714 const u8 *macaddr) 715 { 716 struct iavf_mac_filter *f; 717 718 if (!macaddr) 719 return NULL; 720 721 list_for_each_entry(f, &adapter->mac_filter_list, list) { 722 if (ether_addr_equal(macaddr, f->macaddr)) 723 return f; 724 } 725 return NULL; 726 } 727 728 /** 729 * iavf_add_filter - Add a mac filter to the filter list 730 * @adapter: board private structure 731 * @macaddr: the MAC address 732 * 733 * Returns ptr to the filter object or NULL when no memory available. 734 **/ 735 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 736 const u8 *macaddr) 737 { 738 struct iavf_mac_filter *f; 739 740 if (!macaddr) 741 return NULL; 742 743 f = iavf_find_filter(adapter, macaddr); 744 if (!f) { 745 f = kzalloc(sizeof(*f), GFP_ATOMIC); 746 if (!f) 747 return f; 748 749 ether_addr_copy(f->macaddr, macaddr); 750 751 list_add_tail(&f->list, &adapter->mac_filter_list); 752 f->add = true; 753 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 754 } else { 755 f->remove = false; 756 } 757 758 return f; 759 } 760 761 /** 762 * iavf_set_mac - NDO callback to set port mac address 763 * @netdev: network interface device structure 764 * @p: pointer to an address structure 765 * 766 * Returns 0 on success, negative on failure 767 **/ 768 static int iavf_set_mac(struct net_device *netdev, void *p) 769 { 770 struct iavf_adapter *adapter = netdev_priv(netdev); 771 struct iavf_hw *hw = &adapter->hw; 772 struct iavf_mac_filter *f; 773 struct sockaddr *addr = p; 774 775 if (!is_valid_ether_addr(addr->sa_data)) 776 return -EADDRNOTAVAIL; 777 778 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 779 return 0; 780 781 spin_lock_bh(&adapter->mac_vlan_list_lock); 782 783 f = iavf_find_filter(adapter, hw->mac.addr); 784 if (f) { 785 f->remove = true; 786 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 787 } 788 789 f = iavf_add_filter(adapter, addr->sa_data); 790 791 spin_unlock_bh(&adapter->mac_vlan_list_lock); 792 793 if (f) { 794 ether_addr_copy(hw->mac.addr, addr->sa_data); 795 } 796 797 return (f == NULL) ? -ENOMEM : 0; 798 } 799 800 /** 801 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 802 * @netdev: the netdevice 803 * @addr: address to add 804 * 805 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 806 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 807 */ 808 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 809 { 810 struct iavf_adapter *adapter = netdev_priv(netdev); 811 812 if (iavf_add_filter(adapter, addr)) 813 return 0; 814 else 815 return -ENOMEM; 816 } 817 818 /** 819 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 820 * @netdev: the netdevice 821 * @addr: address to add 822 * 823 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 824 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 825 */ 826 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 827 { 828 struct iavf_adapter *adapter = netdev_priv(netdev); 829 struct iavf_mac_filter *f; 830 831 /* Under some circumstances, we might receive a request to delete 832 * our own device address from our uc list. Because we store the 833 * device address in the VSI's MAC/VLAN filter list, we need to ignore 834 * such requests and not delete our device address from this list. 835 */ 836 if (ether_addr_equal(addr, netdev->dev_addr)) 837 return 0; 838 839 f = iavf_find_filter(adapter, addr); 840 if (f) { 841 f->remove = true; 842 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 843 } 844 return 0; 845 } 846 847 /** 848 * iavf_set_rx_mode - NDO callback to set the netdev filters 849 * @netdev: network interface device structure 850 **/ 851 static void iavf_set_rx_mode(struct net_device *netdev) 852 { 853 struct iavf_adapter *adapter = netdev_priv(netdev); 854 855 spin_lock_bh(&adapter->mac_vlan_list_lock); 856 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 857 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 858 spin_unlock_bh(&adapter->mac_vlan_list_lock); 859 860 if (netdev->flags & IFF_PROMISC && 861 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 862 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 863 else if (!(netdev->flags & IFF_PROMISC) && 864 adapter->flags & IAVF_FLAG_PROMISC_ON) 865 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 866 867 if (netdev->flags & IFF_ALLMULTI && 868 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 869 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 870 else if (!(netdev->flags & IFF_ALLMULTI) && 871 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 872 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 873 } 874 875 /** 876 * iavf_napi_enable_all - enable NAPI on all queue vectors 877 * @adapter: board private structure 878 **/ 879 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 880 { 881 int q_idx; 882 struct iavf_q_vector *q_vector; 883 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 884 885 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 886 struct napi_struct *napi; 887 888 q_vector = &adapter->q_vectors[q_idx]; 889 napi = &q_vector->napi; 890 napi_enable(napi); 891 } 892 } 893 894 /** 895 * iavf_napi_disable_all - disable NAPI on all queue vectors 896 * @adapter: board private structure 897 **/ 898 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 899 { 900 int q_idx; 901 struct iavf_q_vector *q_vector; 902 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 903 904 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 905 q_vector = &adapter->q_vectors[q_idx]; 906 napi_disable(&q_vector->napi); 907 } 908 } 909 910 /** 911 * iavf_configure - set up transmit and receive data structures 912 * @adapter: board private structure 913 **/ 914 static void iavf_configure(struct iavf_adapter *adapter) 915 { 916 struct net_device *netdev = adapter->netdev; 917 int i; 918 919 iavf_set_rx_mode(netdev); 920 921 iavf_configure_tx(adapter); 922 iavf_configure_rx(adapter); 923 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 924 925 for (i = 0; i < adapter->num_active_queues; i++) { 926 struct iavf_ring *ring = &adapter->rx_rings[i]; 927 928 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 929 } 930 } 931 932 /** 933 * iavf_up_complete - Finish the last steps of bringing up a connection 934 * @adapter: board private structure 935 * 936 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 937 **/ 938 static void iavf_up_complete(struct iavf_adapter *adapter) 939 { 940 adapter->state = __IAVF_RUNNING; 941 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 942 943 iavf_napi_enable_all(adapter); 944 945 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 946 if (CLIENT_ENABLED(adapter)) 947 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 948 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 949 } 950 951 /** 952 * iavf_down - Shutdown the connection processing 953 * @adapter: board private structure 954 * 955 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 956 **/ 957 void iavf_down(struct iavf_adapter *adapter) 958 { 959 struct net_device *netdev = adapter->netdev; 960 struct iavf_vlan_filter *vlf; 961 struct iavf_mac_filter *f; 962 struct iavf_cloud_filter *cf; 963 964 if (adapter->state <= __IAVF_DOWN_PENDING) 965 return; 966 967 netif_carrier_off(netdev); 968 netif_tx_disable(netdev); 969 adapter->link_up = false; 970 iavf_napi_disable_all(adapter); 971 iavf_irq_disable(adapter); 972 973 spin_lock_bh(&adapter->mac_vlan_list_lock); 974 975 /* clear the sync flag on all filters */ 976 __dev_uc_unsync(adapter->netdev, NULL); 977 __dev_mc_unsync(adapter->netdev, NULL); 978 979 /* remove all MAC filters */ 980 list_for_each_entry(f, &adapter->mac_filter_list, list) { 981 f->remove = true; 982 } 983 984 /* remove all VLAN filters */ 985 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 986 vlf->remove = true; 987 } 988 989 spin_unlock_bh(&adapter->mac_vlan_list_lock); 990 991 /* remove all cloud filters */ 992 spin_lock_bh(&adapter->cloud_filter_list_lock); 993 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 994 cf->del = true; 995 } 996 spin_unlock_bh(&adapter->cloud_filter_list_lock); 997 998 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 999 adapter->state != __IAVF_RESETTING) { 1000 /* cancel any current operation */ 1001 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1002 /* Schedule operations to close down the HW. Don't wait 1003 * here for this to complete. The watchdog is still running 1004 * and it will take care of this. 1005 */ 1006 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1007 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1008 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1009 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1010 } 1011 1012 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1013 } 1014 1015 /** 1016 * iavf_acquire_msix_vectors - Setup the MSIX capability 1017 * @adapter: board private structure 1018 * @vectors: number of vectors to request 1019 * 1020 * Work with the OS to set up the MSIX vectors needed. 1021 * 1022 * Returns 0 on success, negative on failure 1023 **/ 1024 static int 1025 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1026 { 1027 int err, vector_threshold; 1028 1029 /* We'll want at least 3 (vector_threshold): 1030 * 0) Other (Admin Queue and link, mostly) 1031 * 1) TxQ[0] Cleanup 1032 * 2) RxQ[0] Cleanup 1033 */ 1034 vector_threshold = MIN_MSIX_COUNT; 1035 1036 /* The more we get, the more we will assign to Tx/Rx Cleanup 1037 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1038 * Right now, we simply care about how many we'll get; we'll 1039 * set them up later while requesting irq's. 1040 */ 1041 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1042 vector_threshold, vectors); 1043 if (err < 0) { 1044 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1045 kfree(adapter->msix_entries); 1046 adapter->msix_entries = NULL; 1047 return err; 1048 } 1049 1050 /* Adjust for only the vectors we'll use, which is minimum 1051 * of max_msix_q_vectors + NONQ_VECS, or the number of 1052 * vectors we were allocated. 1053 */ 1054 adapter->num_msix_vectors = err; 1055 return 0; 1056 } 1057 1058 /** 1059 * iavf_free_queues - Free memory for all rings 1060 * @adapter: board private structure to initialize 1061 * 1062 * Free all of the memory associated with queue pairs. 1063 **/ 1064 static void iavf_free_queues(struct iavf_adapter *adapter) 1065 { 1066 if (!adapter->vsi_res) 1067 return; 1068 adapter->num_active_queues = 0; 1069 kfree(adapter->tx_rings); 1070 adapter->tx_rings = NULL; 1071 kfree(adapter->rx_rings); 1072 adapter->rx_rings = NULL; 1073 } 1074 1075 /** 1076 * iavf_alloc_queues - Allocate memory for all rings 1077 * @adapter: board private structure to initialize 1078 * 1079 * We allocate one ring per queue at run-time since we don't know the 1080 * number of queues at compile-time. The polling_netdev array is 1081 * intended for Multiqueue, but should work fine with a single queue. 1082 **/ 1083 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1084 { 1085 int i, num_active_queues; 1086 1087 /* If we're in reset reallocating queues we don't actually know yet for 1088 * certain the PF gave us the number of queues we asked for but we'll 1089 * assume it did. Once basic reset is finished we'll confirm once we 1090 * start negotiating config with PF. 1091 */ 1092 if (adapter->num_req_queues) 1093 num_active_queues = adapter->num_req_queues; 1094 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1095 adapter->num_tc) 1096 num_active_queues = adapter->ch_config.total_qps; 1097 else 1098 num_active_queues = min_t(int, 1099 adapter->vsi_res->num_queue_pairs, 1100 (int)(num_online_cpus())); 1101 1102 1103 adapter->tx_rings = kcalloc(num_active_queues, 1104 sizeof(struct iavf_ring), GFP_KERNEL); 1105 if (!adapter->tx_rings) 1106 goto err_out; 1107 adapter->rx_rings = kcalloc(num_active_queues, 1108 sizeof(struct iavf_ring), GFP_KERNEL); 1109 if (!adapter->rx_rings) 1110 goto err_out; 1111 1112 for (i = 0; i < num_active_queues; i++) { 1113 struct iavf_ring *tx_ring; 1114 struct iavf_ring *rx_ring; 1115 1116 tx_ring = &adapter->tx_rings[i]; 1117 1118 tx_ring->queue_index = i; 1119 tx_ring->netdev = adapter->netdev; 1120 tx_ring->dev = &adapter->pdev->dev; 1121 tx_ring->count = adapter->tx_desc_count; 1122 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1123 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1124 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1125 1126 rx_ring = &adapter->rx_rings[i]; 1127 rx_ring->queue_index = i; 1128 rx_ring->netdev = adapter->netdev; 1129 rx_ring->dev = &adapter->pdev->dev; 1130 rx_ring->count = adapter->rx_desc_count; 1131 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1132 } 1133 1134 adapter->num_active_queues = num_active_queues; 1135 1136 return 0; 1137 1138 err_out: 1139 iavf_free_queues(adapter); 1140 return -ENOMEM; 1141 } 1142 1143 /** 1144 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1145 * @adapter: board private structure to initialize 1146 * 1147 * Attempt to configure the interrupts using the best available 1148 * capabilities of the hardware and the kernel. 1149 **/ 1150 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1151 { 1152 int vector, v_budget; 1153 int pairs = 0; 1154 int err = 0; 1155 1156 if (!adapter->vsi_res) { 1157 err = -EIO; 1158 goto out; 1159 } 1160 pairs = adapter->num_active_queues; 1161 1162 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1163 * us much good if we have more vectors than CPUs. However, we already 1164 * limit the total number of queues by the number of CPUs so we do not 1165 * need any further limiting here. 1166 */ 1167 v_budget = min_t(int, pairs + NONQ_VECS, 1168 (int)adapter->vf_res->max_vectors); 1169 1170 adapter->msix_entries = kcalloc(v_budget, 1171 sizeof(struct msix_entry), GFP_KERNEL); 1172 if (!adapter->msix_entries) { 1173 err = -ENOMEM; 1174 goto out; 1175 } 1176 1177 for (vector = 0; vector < v_budget; vector++) 1178 adapter->msix_entries[vector].entry = vector; 1179 1180 err = iavf_acquire_msix_vectors(adapter, v_budget); 1181 1182 out: 1183 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1184 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1185 return err; 1186 } 1187 1188 /** 1189 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1190 * @adapter: board private structure 1191 * 1192 * Return 0 on success, negative on failure 1193 **/ 1194 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1195 { 1196 struct iavf_aqc_get_set_rss_key_data *rss_key = 1197 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1198 struct iavf_hw *hw = &adapter->hw; 1199 int ret = 0; 1200 1201 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1202 /* bail because we already have a command pending */ 1203 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1204 adapter->current_op); 1205 return -EBUSY; 1206 } 1207 1208 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1209 if (ret) { 1210 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1211 iavf_stat_str(hw, ret), 1212 iavf_aq_str(hw, hw->aq.asq_last_status)); 1213 return ret; 1214 1215 } 1216 1217 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1218 adapter->rss_lut, adapter->rss_lut_size); 1219 if (ret) { 1220 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1221 iavf_stat_str(hw, ret), 1222 iavf_aq_str(hw, hw->aq.asq_last_status)); 1223 } 1224 1225 return ret; 1226 1227 } 1228 1229 /** 1230 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1231 * @adapter: board private structure 1232 * 1233 * Returns 0 on success, negative on failure 1234 **/ 1235 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1236 { 1237 struct iavf_hw *hw = &adapter->hw; 1238 u32 *dw; 1239 u16 i; 1240 1241 dw = (u32 *)adapter->rss_key; 1242 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1243 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1244 1245 dw = (u32 *)adapter->rss_lut; 1246 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1247 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1248 1249 iavf_flush(hw); 1250 1251 return 0; 1252 } 1253 1254 /** 1255 * iavf_config_rss - Configure RSS keys and lut 1256 * @adapter: board private structure 1257 * 1258 * Returns 0 on success, negative on failure 1259 **/ 1260 int iavf_config_rss(struct iavf_adapter *adapter) 1261 { 1262 1263 if (RSS_PF(adapter)) { 1264 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1265 IAVF_FLAG_AQ_SET_RSS_KEY; 1266 return 0; 1267 } else if (RSS_AQ(adapter)) { 1268 return iavf_config_rss_aq(adapter); 1269 } else { 1270 return iavf_config_rss_reg(adapter); 1271 } 1272 } 1273 1274 /** 1275 * iavf_fill_rss_lut - Fill the lut with default values 1276 * @adapter: board private structure 1277 **/ 1278 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1279 { 1280 u16 i; 1281 1282 for (i = 0; i < adapter->rss_lut_size; i++) 1283 adapter->rss_lut[i] = i % adapter->num_active_queues; 1284 } 1285 1286 /** 1287 * iavf_init_rss - Prepare for RSS 1288 * @adapter: board private structure 1289 * 1290 * Return 0 on success, negative on failure 1291 **/ 1292 static int iavf_init_rss(struct iavf_adapter *adapter) 1293 { 1294 struct iavf_hw *hw = &adapter->hw; 1295 int ret; 1296 1297 if (!RSS_PF(adapter)) { 1298 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1299 if (adapter->vf_res->vf_cap_flags & 1300 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1301 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1302 else 1303 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1304 1305 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1306 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1307 } 1308 1309 iavf_fill_rss_lut(adapter); 1310 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1311 ret = iavf_config_rss(adapter); 1312 1313 return ret; 1314 } 1315 1316 /** 1317 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1318 * @adapter: board private structure to initialize 1319 * 1320 * We allocate one q_vector per queue interrupt. If allocation fails we 1321 * return -ENOMEM. 1322 **/ 1323 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1324 { 1325 int q_idx = 0, num_q_vectors; 1326 struct iavf_q_vector *q_vector; 1327 1328 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1329 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1330 GFP_KERNEL); 1331 if (!adapter->q_vectors) 1332 return -ENOMEM; 1333 1334 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1335 q_vector = &adapter->q_vectors[q_idx]; 1336 q_vector->adapter = adapter; 1337 q_vector->vsi = &adapter->vsi; 1338 q_vector->v_idx = q_idx; 1339 q_vector->reg_idx = q_idx; 1340 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1341 netif_napi_add(adapter->netdev, &q_vector->napi, 1342 iavf_napi_poll, NAPI_POLL_WEIGHT); 1343 } 1344 1345 return 0; 1346 } 1347 1348 /** 1349 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1350 * @adapter: board private structure to initialize 1351 * 1352 * This function frees the memory allocated to the q_vectors. In addition if 1353 * NAPI is enabled it will delete any references to the NAPI struct prior 1354 * to freeing the q_vector. 1355 **/ 1356 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1357 { 1358 int q_idx, num_q_vectors; 1359 int napi_vectors; 1360 1361 if (!adapter->q_vectors) 1362 return; 1363 1364 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1365 napi_vectors = adapter->num_active_queues; 1366 1367 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1368 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1369 1370 if (q_idx < napi_vectors) 1371 netif_napi_del(&q_vector->napi); 1372 } 1373 kfree(adapter->q_vectors); 1374 adapter->q_vectors = NULL; 1375 } 1376 1377 /** 1378 * iavf_reset_interrupt_capability - Reset MSIX setup 1379 * @adapter: board private structure 1380 * 1381 **/ 1382 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1383 { 1384 if (!adapter->msix_entries) 1385 return; 1386 1387 pci_disable_msix(adapter->pdev); 1388 kfree(adapter->msix_entries); 1389 adapter->msix_entries = NULL; 1390 } 1391 1392 /** 1393 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1394 * @adapter: board private structure to initialize 1395 * 1396 **/ 1397 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1398 { 1399 int err; 1400 1401 err = iavf_alloc_queues(adapter); 1402 if (err) { 1403 dev_err(&adapter->pdev->dev, 1404 "Unable to allocate memory for queues\n"); 1405 goto err_alloc_queues; 1406 } 1407 1408 rtnl_lock(); 1409 err = iavf_set_interrupt_capability(adapter); 1410 rtnl_unlock(); 1411 if (err) { 1412 dev_err(&adapter->pdev->dev, 1413 "Unable to setup interrupt capabilities\n"); 1414 goto err_set_interrupt; 1415 } 1416 1417 err = iavf_alloc_q_vectors(adapter); 1418 if (err) { 1419 dev_err(&adapter->pdev->dev, 1420 "Unable to allocate memory for queue vectors\n"); 1421 goto err_alloc_q_vectors; 1422 } 1423 1424 /* If we've made it so far while ADq flag being ON, then we haven't 1425 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1426 * resources have been allocated in the reset path. 1427 * Now we can truly claim that ADq is enabled. 1428 */ 1429 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1430 adapter->num_tc) 1431 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1432 adapter->num_tc); 1433 1434 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1435 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1436 adapter->num_active_queues); 1437 1438 return 0; 1439 err_alloc_q_vectors: 1440 iavf_reset_interrupt_capability(adapter); 1441 err_set_interrupt: 1442 iavf_free_queues(adapter); 1443 err_alloc_queues: 1444 return err; 1445 } 1446 1447 /** 1448 * iavf_free_rss - Free memory used by RSS structs 1449 * @adapter: board private structure 1450 **/ 1451 static void iavf_free_rss(struct iavf_adapter *adapter) 1452 { 1453 kfree(adapter->rss_key); 1454 adapter->rss_key = NULL; 1455 1456 kfree(adapter->rss_lut); 1457 adapter->rss_lut = NULL; 1458 } 1459 1460 /** 1461 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1462 * @adapter: board private structure 1463 * 1464 * Returns 0 on success, negative on failure 1465 **/ 1466 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1467 { 1468 struct net_device *netdev = adapter->netdev; 1469 int err; 1470 1471 if (netif_running(netdev)) 1472 iavf_free_traffic_irqs(adapter); 1473 iavf_free_misc_irq(adapter); 1474 iavf_reset_interrupt_capability(adapter); 1475 iavf_free_q_vectors(adapter); 1476 iavf_free_queues(adapter); 1477 1478 err = iavf_init_interrupt_scheme(adapter); 1479 if (err) 1480 goto err; 1481 1482 netif_tx_stop_all_queues(netdev); 1483 1484 err = iavf_request_misc_irq(adapter); 1485 if (err) 1486 goto err; 1487 1488 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1489 1490 iavf_map_rings_to_vectors(adapter); 1491 1492 if (RSS_AQ(adapter)) 1493 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1494 else 1495 err = iavf_init_rss(adapter); 1496 err: 1497 return err; 1498 } 1499 1500 /** 1501 * iavf_process_aq_command - process aq_required flags 1502 * and sends aq command 1503 * @adapter: pointer to iavf adapter structure 1504 * 1505 * Returns 0 on success 1506 * Returns error code if no command was sent 1507 * or error code if the command failed. 1508 **/ 1509 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1510 { 1511 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1512 return iavf_send_vf_config_msg(adapter); 1513 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1514 iavf_disable_queues(adapter); 1515 return 0; 1516 } 1517 1518 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1519 iavf_map_queues(adapter); 1520 return 0; 1521 } 1522 1523 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1524 iavf_add_ether_addrs(adapter); 1525 return 0; 1526 } 1527 1528 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1529 iavf_add_vlans(adapter); 1530 return 0; 1531 } 1532 1533 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1534 iavf_del_ether_addrs(adapter); 1535 return 0; 1536 } 1537 1538 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1539 iavf_del_vlans(adapter); 1540 return 0; 1541 } 1542 1543 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1544 iavf_enable_vlan_stripping(adapter); 1545 return 0; 1546 } 1547 1548 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1549 iavf_disable_vlan_stripping(adapter); 1550 return 0; 1551 } 1552 1553 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1554 iavf_configure_queues(adapter); 1555 return 0; 1556 } 1557 1558 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1559 iavf_enable_queues(adapter); 1560 return 0; 1561 } 1562 1563 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1564 /* This message goes straight to the firmware, not the 1565 * PF, so we don't have to set current_op as we will 1566 * not get a response through the ARQ. 1567 */ 1568 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1569 return 0; 1570 } 1571 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1572 iavf_get_hena(adapter); 1573 return 0; 1574 } 1575 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1576 iavf_set_hena(adapter); 1577 return 0; 1578 } 1579 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1580 iavf_set_rss_key(adapter); 1581 return 0; 1582 } 1583 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1584 iavf_set_rss_lut(adapter); 1585 return 0; 1586 } 1587 1588 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1589 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1590 FLAG_VF_MULTICAST_PROMISC); 1591 return 0; 1592 } 1593 1594 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1595 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1596 return 0; 1597 } 1598 1599 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1600 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1601 iavf_set_promiscuous(adapter, 0); 1602 return 0; 1603 } 1604 1605 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1606 iavf_enable_channels(adapter); 1607 return 0; 1608 } 1609 1610 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1611 iavf_disable_channels(adapter); 1612 return 0; 1613 } 1614 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1615 iavf_add_cloud_filter(adapter); 1616 return 0; 1617 } 1618 1619 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1620 iavf_del_cloud_filter(adapter); 1621 return 0; 1622 } 1623 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1624 iavf_del_cloud_filter(adapter); 1625 return 0; 1626 } 1627 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1628 iavf_add_cloud_filter(adapter); 1629 return 0; 1630 } 1631 return -EAGAIN; 1632 } 1633 1634 /** 1635 * iavf_startup - first step of driver startup 1636 * @adapter: board private structure 1637 * 1638 * Function process __IAVF_STARTUP driver state. 1639 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1640 * when fails it returns -EAGAIN 1641 **/ 1642 static int iavf_startup(struct iavf_adapter *adapter) 1643 { 1644 struct pci_dev *pdev = adapter->pdev; 1645 struct iavf_hw *hw = &adapter->hw; 1646 int err; 1647 1648 WARN_ON(adapter->state != __IAVF_STARTUP); 1649 1650 /* driver loaded, probe complete */ 1651 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1652 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1653 err = iavf_set_mac_type(hw); 1654 if (err) { 1655 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1656 goto err; 1657 } 1658 1659 err = iavf_check_reset_complete(hw); 1660 if (err) { 1661 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1662 err); 1663 goto err; 1664 } 1665 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1666 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1667 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1668 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1669 1670 err = iavf_init_adminq(hw); 1671 if (err) { 1672 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1673 goto err; 1674 } 1675 err = iavf_send_api_ver(adapter); 1676 if (err) { 1677 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1678 iavf_shutdown_adminq(hw); 1679 goto err; 1680 } 1681 adapter->state = __IAVF_INIT_VERSION_CHECK; 1682 err: 1683 return err; 1684 } 1685 1686 /** 1687 * iavf_init_version_check - second step of driver startup 1688 * @adapter: board private structure 1689 * 1690 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1691 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1692 * when fails it returns -EAGAIN 1693 **/ 1694 static int iavf_init_version_check(struct iavf_adapter *adapter) 1695 { 1696 struct pci_dev *pdev = adapter->pdev; 1697 struct iavf_hw *hw = &adapter->hw; 1698 int err = -EAGAIN; 1699 1700 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1701 1702 if (!iavf_asq_done(hw)) { 1703 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1704 iavf_shutdown_adminq(hw); 1705 adapter->state = __IAVF_STARTUP; 1706 goto err; 1707 } 1708 1709 /* aq msg sent, awaiting reply */ 1710 err = iavf_verify_api_ver(adapter); 1711 if (err) { 1712 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1713 err = iavf_send_api_ver(adapter); 1714 else 1715 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1716 adapter->pf_version.major, 1717 adapter->pf_version.minor, 1718 VIRTCHNL_VERSION_MAJOR, 1719 VIRTCHNL_VERSION_MINOR); 1720 goto err; 1721 } 1722 err = iavf_send_vf_config_msg(adapter); 1723 if (err) { 1724 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1725 err); 1726 goto err; 1727 } 1728 adapter->state = __IAVF_INIT_GET_RESOURCES; 1729 1730 err: 1731 return err; 1732 } 1733 1734 /** 1735 * iavf_init_get_resources - third step of driver startup 1736 * @adapter: board private structure 1737 * 1738 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1739 * finishes driver initialization procedure. 1740 * When success the state is changed to __IAVF_DOWN 1741 * when fails it returns -EAGAIN 1742 **/ 1743 static int iavf_init_get_resources(struct iavf_adapter *adapter) 1744 { 1745 struct net_device *netdev = adapter->netdev; 1746 struct pci_dev *pdev = adapter->pdev; 1747 struct iavf_hw *hw = &adapter->hw; 1748 int err; 1749 1750 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1751 /* aq msg sent, awaiting reply */ 1752 if (!adapter->vf_res) { 1753 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 1754 GFP_KERNEL); 1755 if (!adapter->vf_res) { 1756 err = -ENOMEM; 1757 goto err; 1758 } 1759 } 1760 err = iavf_get_vf_config(adapter); 1761 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1762 err = iavf_send_vf_config_msg(adapter); 1763 goto err; 1764 } else if (err == IAVF_ERR_PARAM) { 1765 /* We only get ERR_PARAM if the device is in a very bad 1766 * state or if we've been disabled for previous bad 1767 * behavior. Either way, we're done now. 1768 */ 1769 iavf_shutdown_adminq(hw); 1770 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1771 return 0; 1772 } 1773 if (err) { 1774 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1775 goto err_alloc; 1776 } 1777 1778 if (iavf_process_config(adapter)) 1779 goto err_alloc; 1780 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1781 1782 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1783 1784 netdev->netdev_ops = &iavf_netdev_ops; 1785 iavf_set_ethtool_ops(netdev); 1786 netdev->watchdog_timeo = 5 * HZ; 1787 1788 /* MTU range: 68 - 9710 */ 1789 netdev->min_mtu = ETH_MIN_MTU; 1790 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1791 1792 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1793 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1794 adapter->hw.mac.addr); 1795 eth_hw_addr_random(netdev); 1796 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1797 } else { 1798 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 1799 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1800 } 1801 1802 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1803 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1804 err = iavf_init_interrupt_scheme(adapter); 1805 if (err) 1806 goto err_sw_init; 1807 iavf_map_rings_to_vectors(adapter); 1808 if (adapter->vf_res->vf_cap_flags & 1809 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1810 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1811 1812 err = iavf_request_misc_irq(adapter); 1813 if (err) 1814 goto err_sw_init; 1815 1816 netif_carrier_off(netdev); 1817 adapter->link_up = false; 1818 1819 /* set the semaphore to prevent any callbacks after device registration 1820 * up to time when state of driver will be set to __IAVF_DOWN 1821 */ 1822 rtnl_lock(); 1823 if (!adapter->netdev_registered) { 1824 err = register_netdevice(netdev); 1825 if (err) { 1826 rtnl_unlock(); 1827 goto err_register; 1828 } 1829 } 1830 1831 adapter->netdev_registered = true; 1832 1833 netif_tx_stop_all_queues(netdev); 1834 if (CLIENT_ALLOWED(adapter)) { 1835 err = iavf_lan_add_device(adapter); 1836 if (err) { 1837 rtnl_unlock(); 1838 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1839 err); 1840 } 1841 } 1842 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1843 if (netdev->features & NETIF_F_GRO) 1844 dev_info(&pdev->dev, "GRO is enabled\n"); 1845 1846 adapter->state = __IAVF_DOWN; 1847 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1848 rtnl_unlock(); 1849 1850 iavf_misc_irq_enable(adapter); 1851 wake_up(&adapter->down_waitqueue); 1852 1853 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1854 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1855 if (!adapter->rss_key || !adapter->rss_lut) { 1856 err = -ENOMEM; 1857 goto err_mem; 1858 } 1859 if (RSS_AQ(adapter)) 1860 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1861 else 1862 iavf_init_rss(adapter); 1863 1864 return err; 1865 err_mem: 1866 iavf_free_rss(adapter); 1867 err_register: 1868 iavf_free_misc_irq(adapter); 1869 err_sw_init: 1870 iavf_reset_interrupt_capability(adapter); 1871 err_alloc: 1872 kfree(adapter->vf_res); 1873 adapter->vf_res = NULL; 1874 err: 1875 return err; 1876 } 1877 1878 /** 1879 * iavf_watchdog_task - Periodic call-back task 1880 * @work: pointer to work_struct 1881 **/ 1882 static void iavf_watchdog_task(struct work_struct *work) 1883 { 1884 struct iavf_adapter *adapter = container_of(work, 1885 struct iavf_adapter, 1886 watchdog_task.work); 1887 struct iavf_hw *hw = &adapter->hw; 1888 u32 reg_val; 1889 1890 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1891 goto restart_watchdog; 1892 1893 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1894 adapter->state = __IAVF_COMM_FAILED; 1895 1896 switch (adapter->state) { 1897 case __IAVF_COMM_FAILED: 1898 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 1899 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1900 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 1901 reg_val == VIRTCHNL_VFR_COMPLETED) { 1902 /* A chance for redemption! */ 1903 dev_err(&adapter->pdev->dev, 1904 "Hardware came out of reset. Attempting reinit.\n"); 1905 adapter->state = __IAVF_STARTUP; 1906 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1907 queue_delayed_work(iavf_wq, &adapter->init_task, 10); 1908 clear_bit(__IAVF_IN_CRITICAL_TASK, 1909 &adapter->crit_section); 1910 /* Don't reschedule the watchdog, since we've restarted 1911 * the init task. When init_task contacts the PF and 1912 * gets everything set up again, it'll restart the 1913 * watchdog for us. Down, boy. Sit. Stay. Woof. 1914 */ 1915 return; 1916 } 1917 adapter->aq_required = 0; 1918 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1919 clear_bit(__IAVF_IN_CRITICAL_TASK, 1920 &adapter->crit_section); 1921 queue_delayed_work(iavf_wq, 1922 &adapter->watchdog_task, 1923 msecs_to_jiffies(10)); 1924 goto watchdog_done; 1925 case __IAVF_RESETTING: 1926 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1927 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1928 return; 1929 case __IAVF_DOWN: 1930 case __IAVF_DOWN_PENDING: 1931 case __IAVF_TESTING: 1932 case __IAVF_RUNNING: 1933 if (adapter->current_op) { 1934 if (!iavf_asq_done(hw)) { 1935 dev_dbg(&adapter->pdev->dev, 1936 "Admin queue timeout\n"); 1937 iavf_send_api_ver(adapter); 1938 } 1939 } else { 1940 /* An error will be returned if no commands were 1941 * processed; use this opportunity to update stats 1942 */ 1943 if (iavf_process_aq_command(adapter) && 1944 adapter->state == __IAVF_RUNNING) 1945 iavf_request_stats(adapter); 1946 } 1947 break; 1948 case __IAVF_REMOVE: 1949 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1950 return; 1951 default: 1952 goto restart_watchdog; 1953 } 1954 1955 /* check for hw reset */ 1956 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 1957 if (!reg_val) { 1958 adapter->state = __IAVF_RESETTING; 1959 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1960 adapter->aq_required = 0; 1961 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1962 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1963 queue_work(iavf_wq, &adapter->reset_task); 1964 goto watchdog_done; 1965 } 1966 1967 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 1968 watchdog_done: 1969 if (adapter->state == __IAVF_RUNNING || 1970 adapter->state == __IAVF_COMM_FAILED) 1971 iavf_detect_recover_hung(&adapter->vsi); 1972 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1973 restart_watchdog: 1974 if (adapter->aq_required) 1975 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1976 msecs_to_jiffies(20)); 1977 else 1978 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1979 queue_work(iavf_wq, &adapter->adminq_task); 1980 } 1981 1982 static void iavf_disable_vf(struct iavf_adapter *adapter) 1983 { 1984 struct iavf_mac_filter *f, *ftmp; 1985 struct iavf_vlan_filter *fv, *fvtmp; 1986 struct iavf_cloud_filter *cf, *cftmp; 1987 1988 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 1989 1990 /* We don't use netif_running() because it may be true prior to 1991 * ndo_open() returning, so we can't assume it means all our open 1992 * tasks have finished, since we're not holding the rtnl_lock here. 1993 */ 1994 if (adapter->state == __IAVF_RUNNING) { 1995 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1996 netif_carrier_off(adapter->netdev); 1997 netif_tx_disable(adapter->netdev); 1998 adapter->link_up = false; 1999 iavf_napi_disable_all(adapter); 2000 iavf_irq_disable(adapter); 2001 iavf_free_traffic_irqs(adapter); 2002 iavf_free_all_tx_resources(adapter); 2003 iavf_free_all_rx_resources(adapter); 2004 } 2005 2006 spin_lock_bh(&adapter->mac_vlan_list_lock); 2007 2008 /* Delete all of the filters */ 2009 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2010 list_del(&f->list); 2011 kfree(f); 2012 } 2013 2014 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2015 list_del(&fv->list); 2016 kfree(fv); 2017 } 2018 2019 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2020 2021 spin_lock_bh(&adapter->cloud_filter_list_lock); 2022 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2023 list_del(&cf->list); 2024 kfree(cf); 2025 adapter->num_cloud_filters--; 2026 } 2027 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2028 2029 iavf_free_misc_irq(adapter); 2030 iavf_reset_interrupt_capability(adapter); 2031 iavf_free_queues(adapter); 2032 iavf_free_q_vectors(adapter); 2033 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2034 iavf_shutdown_adminq(&adapter->hw); 2035 adapter->netdev->flags &= ~IFF_UP; 2036 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2037 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2038 adapter->state = __IAVF_DOWN; 2039 wake_up(&adapter->down_waitqueue); 2040 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2041 } 2042 2043 /** 2044 * iavf_reset_task - Call-back task to handle hardware reset 2045 * @work: pointer to work_struct 2046 * 2047 * During reset we need to shut down and reinitialize the admin queue 2048 * before we can use it to communicate with the PF again. We also clear 2049 * and reinit the rings because that context is lost as well. 2050 **/ 2051 static void iavf_reset_task(struct work_struct *work) 2052 { 2053 struct iavf_adapter *adapter = container_of(work, 2054 struct iavf_adapter, 2055 reset_task); 2056 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2057 struct net_device *netdev = adapter->netdev; 2058 struct iavf_hw *hw = &adapter->hw; 2059 struct iavf_mac_filter *f, *ftmp; 2060 struct iavf_vlan_filter *vlf; 2061 struct iavf_cloud_filter *cf; 2062 u32 reg_val; 2063 int i = 0, err; 2064 bool running; 2065 2066 /* When device is being removed it doesn't make sense to run the reset 2067 * task, just return in such a case. 2068 */ 2069 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 2070 return; 2071 2072 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, 2073 &adapter->crit_section)) 2074 usleep_range(500, 1000); 2075 if (CLIENT_ENABLED(adapter)) { 2076 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2077 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2078 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2079 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2080 cancel_delayed_work_sync(&adapter->client_task); 2081 iavf_notify_client_close(&adapter->vsi, true); 2082 } 2083 iavf_misc_irq_disable(adapter); 2084 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2085 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2086 /* Restart the AQ here. If we have been reset but didn't 2087 * detect it, or if the PF had to reinit, our AQ will be hosed. 2088 */ 2089 iavf_shutdown_adminq(hw); 2090 iavf_init_adminq(hw); 2091 iavf_request_reset(adapter); 2092 } 2093 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2094 2095 /* poll until we see the reset actually happen */ 2096 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2097 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2098 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2099 if (!reg_val) 2100 break; 2101 usleep_range(5000, 10000); 2102 } 2103 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2104 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2105 goto continue_reset; /* act like the reset happened */ 2106 } 2107 2108 /* wait until the reset is complete and the PF is responding to us */ 2109 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2110 /* sleep first to make sure a minimum wait time is met */ 2111 msleep(IAVF_RESET_WAIT_MS); 2112 2113 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2114 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2115 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2116 break; 2117 } 2118 2119 pci_set_master(adapter->pdev); 2120 2121 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 2122 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2123 reg_val); 2124 iavf_disable_vf(adapter); 2125 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2126 return; /* Do not attempt to reinit. It's dead, Jim. */ 2127 } 2128 2129 continue_reset: 2130 /* We don't use netif_running() because it may be true prior to 2131 * ndo_open() returning, so we can't assume it means all our open 2132 * tasks have finished, since we're not holding the rtnl_lock here. 2133 */ 2134 running = ((adapter->state == __IAVF_RUNNING) || 2135 (adapter->state == __IAVF_RESETTING)); 2136 2137 if (running) { 2138 netif_carrier_off(netdev); 2139 netif_tx_stop_all_queues(netdev); 2140 adapter->link_up = false; 2141 iavf_napi_disable_all(adapter); 2142 } 2143 iavf_irq_disable(adapter); 2144 2145 adapter->state = __IAVF_RESETTING; 2146 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2147 2148 /* free the Tx/Rx rings and descriptors, might be better to just 2149 * re-use them sometime in the future 2150 */ 2151 iavf_free_all_rx_resources(adapter); 2152 iavf_free_all_tx_resources(adapter); 2153 2154 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2155 /* kill and reinit the admin queue */ 2156 iavf_shutdown_adminq(hw); 2157 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2158 err = iavf_init_adminq(hw); 2159 if (err) 2160 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2161 err); 2162 adapter->aq_required = 0; 2163 2164 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2165 err = iavf_reinit_interrupt_scheme(adapter); 2166 if (err) 2167 goto reset_err; 2168 } 2169 2170 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2171 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2172 2173 spin_lock_bh(&adapter->mac_vlan_list_lock); 2174 2175 /* Delete filter for the current MAC address, it could have 2176 * been changed by the PF via administratively set MAC. 2177 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 2178 */ 2179 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2180 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 2181 list_del(&f->list); 2182 kfree(f); 2183 } 2184 } 2185 /* re-add all MAC filters */ 2186 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2187 f->add = true; 2188 } 2189 /* re-add all VLAN filters */ 2190 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2191 vlf->add = true; 2192 } 2193 2194 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2195 2196 /* check if TCs are running and re-add all cloud filters */ 2197 spin_lock_bh(&adapter->cloud_filter_list_lock); 2198 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2199 adapter->num_tc) { 2200 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2201 cf->add = true; 2202 } 2203 } 2204 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2205 2206 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2207 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2208 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2209 iavf_misc_irq_enable(adapter); 2210 2211 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2212 2213 /* We were running when the reset started, so we need to restore some 2214 * state here. 2215 */ 2216 if (running) { 2217 /* allocate transmit descriptors */ 2218 err = iavf_setup_all_tx_resources(adapter); 2219 if (err) 2220 goto reset_err; 2221 2222 /* allocate receive descriptors */ 2223 err = iavf_setup_all_rx_resources(adapter); 2224 if (err) 2225 goto reset_err; 2226 2227 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2228 err = iavf_request_traffic_irqs(adapter, netdev->name); 2229 if (err) 2230 goto reset_err; 2231 2232 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2233 } 2234 2235 iavf_configure(adapter); 2236 2237 iavf_up_complete(adapter); 2238 2239 iavf_irq_enable(adapter, true); 2240 } else { 2241 adapter->state = __IAVF_DOWN; 2242 wake_up(&adapter->down_waitqueue); 2243 } 2244 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2245 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2246 2247 return; 2248 reset_err: 2249 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2250 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2251 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2252 iavf_close(netdev); 2253 } 2254 2255 /** 2256 * iavf_adminq_task - worker thread to clean the admin queue 2257 * @work: pointer to work_struct containing our data 2258 **/ 2259 static void iavf_adminq_task(struct work_struct *work) 2260 { 2261 struct iavf_adapter *adapter = 2262 container_of(work, struct iavf_adapter, adminq_task); 2263 struct iavf_hw *hw = &adapter->hw; 2264 struct iavf_arq_event_info event; 2265 enum virtchnl_ops v_op; 2266 enum iavf_status ret, v_ret; 2267 u32 val, oldval; 2268 u16 pending; 2269 2270 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2271 goto out; 2272 2273 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2274 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2275 if (!event.msg_buf) 2276 goto out; 2277 2278 do { 2279 ret = iavf_clean_arq_element(hw, &event, &pending); 2280 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2281 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2282 2283 if (ret || !v_op) 2284 break; /* No event to process or error cleaning ARQ */ 2285 2286 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2287 event.msg_len); 2288 if (pending != 0) 2289 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2290 } while (pending); 2291 2292 if ((adapter->flags & 2293 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2294 adapter->state == __IAVF_RESETTING) 2295 goto freedom; 2296 2297 /* check for error indications */ 2298 val = rd32(hw, hw->aq.arq.len); 2299 if (val == 0xdeadbeef) /* indicates device in reset */ 2300 goto freedom; 2301 oldval = val; 2302 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2303 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2304 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2305 } 2306 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2307 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2308 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2309 } 2310 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2311 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2312 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2313 } 2314 if (oldval != val) 2315 wr32(hw, hw->aq.arq.len, val); 2316 2317 val = rd32(hw, hw->aq.asq.len); 2318 oldval = val; 2319 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2320 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2321 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2322 } 2323 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2324 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2325 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2326 } 2327 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2328 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2329 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2330 } 2331 if (oldval != val) 2332 wr32(hw, hw->aq.asq.len, val); 2333 2334 freedom: 2335 kfree(event.msg_buf); 2336 out: 2337 /* re-enable Admin queue interrupt cause */ 2338 iavf_misc_irq_enable(adapter); 2339 } 2340 2341 /** 2342 * iavf_client_task - worker thread to perform client work 2343 * @work: pointer to work_struct containing our data 2344 * 2345 * This task handles client interactions. Because client calls can be 2346 * reentrant, we can't handle them in the watchdog. 2347 **/ 2348 static void iavf_client_task(struct work_struct *work) 2349 { 2350 struct iavf_adapter *adapter = 2351 container_of(work, struct iavf_adapter, client_task.work); 2352 2353 /* If we can't get the client bit, just give up. We'll be rescheduled 2354 * later. 2355 */ 2356 2357 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) 2358 return; 2359 2360 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2361 iavf_client_subtask(adapter); 2362 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2363 goto out; 2364 } 2365 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2366 iavf_notify_client_l2_params(&adapter->vsi); 2367 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2368 goto out; 2369 } 2370 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2371 iavf_notify_client_close(&adapter->vsi, false); 2372 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2373 goto out; 2374 } 2375 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2376 iavf_notify_client_open(&adapter->vsi); 2377 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2378 } 2379 out: 2380 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2381 } 2382 2383 /** 2384 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2385 * @adapter: board private structure 2386 * 2387 * Free all transmit software resources 2388 **/ 2389 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2390 { 2391 int i; 2392 2393 if (!adapter->tx_rings) 2394 return; 2395 2396 for (i = 0; i < adapter->num_active_queues; i++) 2397 if (adapter->tx_rings[i].desc) 2398 iavf_free_tx_resources(&adapter->tx_rings[i]); 2399 } 2400 2401 /** 2402 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2403 * @adapter: board private structure 2404 * 2405 * If this function returns with an error, then it's possible one or 2406 * more of the rings is populated (while the rest are not). It is the 2407 * callers duty to clean those orphaned rings. 2408 * 2409 * Return 0 on success, negative on failure 2410 **/ 2411 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2412 { 2413 int i, err = 0; 2414 2415 for (i = 0; i < adapter->num_active_queues; i++) { 2416 adapter->tx_rings[i].count = adapter->tx_desc_count; 2417 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2418 if (!err) 2419 continue; 2420 dev_err(&adapter->pdev->dev, 2421 "Allocation for Tx Queue %u failed\n", i); 2422 break; 2423 } 2424 2425 return err; 2426 } 2427 2428 /** 2429 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2430 * @adapter: board private structure 2431 * 2432 * If this function returns with an error, then it's possible one or 2433 * more of the rings is populated (while the rest are not). It is the 2434 * callers duty to clean those orphaned rings. 2435 * 2436 * Return 0 on success, negative on failure 2437 **/ 2438 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2439 { 2440 int i, err = 0; 2441 2442 for (i = 0; i < adapter->num_active_queues; i++) { 2443 adapter->rx_rings[i].count = adapter->rx_desc_count; 2444 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2445 if (!err) 2446 continue; 2447 dev_err(&adapter->pdev->dev, 2448 "Allocation for Rx Queue %u failed\n", i); 2449 break; 2450 } 2451 return err; 2452 } 2453 2454 /** 2455 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2456 * @adapter: board private structure 2457 * 2458 * Free all receive software resources 2459 **/ 2460 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2461 { 2462 int i; 2463 2464 if (!adapter->rx_rings) 2465 return; 2466 2467 for (i = 0; i < adapter->num_active_queues; i++) 2468 if (adapter->rx_rings[i].desc) 2469 iavf_free_rx_resources(&adapter->rx_rings[i]); 2470 } 2471 2472 /** 2473 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2474 * @adapter: board private structure 2475 * @max_tx_rate: max Tx bw for a tc 2476 **/ 2477 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2478 u64 max_tx_rate) 2479 { 2480 int speed = 0, ret = 0; 2481 2482 if (ADV_LINK_SUPPORT(adapter)) { 2483 if (adapter->link_speed_mbps < U32_MAX) { 2484 speed = adapter->link_speed_mbps; 2485 goto validate_bw; 2486 } else { 2487 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 2488 return -EINVAL; 2489 } 2490 } 2491 2492 switch (adapter->link_speed) { 2493 case VIRTCHNL_LINK_SPEED_40GB: 2494 speed = SPEED_40000; 2495 break; 2496 case VIRTCHNL_LINK_SPEED_25GB: 2497 speed = SPEED_25000; 2498 break; 2499 case VIRTCHNL_LINK_SPEED_20GB: 2500 speed = SPEED_20000; 2501 break; 2502 case VIRTCHNL_LINK_SPEED_10GB: 2503 speed = SPEED_10000; 2504 break; 2505 case VIRTCHNL_LINK_SPEED_5GB: 2506 speed = SPEED_5000; 2507 break; 2508 case VIRTCHNL_LINK_SPEED_2_5GB: 2509 speed = SPEED_2500; 2510 break; 2511 case VIRTCHNL_LINK_SPEED_1GB: 2512 speed = SPEED_1000; 2513 break; 2514 case VIRTCHNL_LINK_SPEED_100MB: 2515 speed = SPEED_100; 2516 break; 2517 default: 2518 break; 2519 } 2520 2521 validate_bw: 2522 if (max_tx_rate > speed) { 2523 dev_err(&adapter->pdev->dev, 2524 "Invalid tx rate specified\n"); 2525 ret = -EINVAL; 2526 } 2527 2528 return ret; 2529 } 2530 2531 /** 2532 * iavf_validate_channel_config - validate queue mapping info 2533 * @adapter: board private structure 2534 * @mqprio_qopt: queue parameters 2535 * 2536 * This function validates if the config provided by the user to 2537 * configure queue channels is valid or not. Returns 0 on a valid 2538 * config. 2539 **/ 2540 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2541 struct tc_mqprio_qopt_offload *mqprio_qopt) 2542 { 2543 u64 total_max_rate = 0; 2544 int i, num_qps = 0; 2545 u64 tx_rate = 0; 2546 int ret = 0; 2547 2548 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2549 mqprio_qopt->qopt.num_tc < 1) 2550 return -EINVAL; 2551 2552 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2553 if (!mqprio_qopt->qopt.count[i] || 2554 mqprio_qopt->qopt.offset[i] != num_qps) 2555 return -EINVAL; 2556 if (mqprio_qopt->min_rate[i]) { 2557 dev_err(&adapter->pdev->dev, 2558 "Invalid min tx rate (greater than 0) specified\n"); 2559 return -EINVAL; 2560 } 2561 /*convert to Mbps */ 2562 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2563 IAVF_MBPS_DIVISOR); 2564 total_max_rate += tx_rate; 2565 num_qps += mqprio_qopt->qopt.count[i]; 2566 } 2567 if (num_qps > IAVF_MAX_REQ_QUEUES) 2568 return -EINVAL; 2569 2570 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2571 return ret; 2572 } 2573 2574 /** 2575 * iavf_del_all_cloud_filters - delete all cloud filters 2576 * on the traffic classes 2577 **/ 2578 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2579 { 2580 struct iavf_cloud_filter *cf, *cftmp; 2581 2582 spin_lock_bh(&adapter->cloud_filter_list_lock); 2583 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2584 list) { 2585 list_del(&cf->list); 2586 kfree(cf); 2587 adapter->num_cloud_filters--; 2588 } 2589 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2590 } 2591 2592 /** 2593 * __iavf_setup_tc - configure multiple traffic classes 2594 * @netdev: network interface device structure 2595 * @type_date: tc offload data 2596 * 2597 * This function processes the config information provided by the 2598 * user to configure traffic classes/queue channels and packages the 2599 * information to request the PF to setup traffic classes. 2600 * 2601 * Returns 0 on success. 2602 **/ 2603 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2604 { 2605 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2606 struct iavf_adapter *adapter = netdev_priv(netdev); 2607 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2608 u8 num_tc = 0, total_qps = 0; 2609 int ret = 0, netdev_tc = 0; 2610 u64 max_tx_rate; 2611 u16 mode; 2612 int i; 2613 2614 num_tc = mqprio_qopt->qopt.num_tc; 2615 mode = mqprio_qopt->mode; 2616 2617 /* delete queue_channel */ 2618 if (!mqprio_qopt->qopt.hw) { 2619 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2620 /* reset the tc configuration */ 2621 netdev_reset_tc(netdev); 2622 adapter->num_tc = 0; 2623 netif_tx_stop_all_queues(netdev); 2624 netif_tx_disable(netdev); 2625 iavf_del_all_cloud_filters(adapter); 2626 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2627 goto exit; 2628 } else { 2629 return -EINVAL; 2630 } 2631 } 2632 2633 /* add queue channel */ 2634 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2635 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2636 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2637 return -EOPNOTSUPP; 2638 } 2639 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2640 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2641 return -EINVAL; 2642 } 2643 2644 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2645 if (ret) 2646 return ret; 2647 /* Return if same TC config is requested */ 2648 if (adapter->num_tc == num_tc) 2649 return 0; 2650 adapter->num_tc = num_tc; 2651 2652 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2653 if (i < num_tc) { 2654 adapter->ch_config.ch_info[i].count = 2655 mqprio_qopt->qopt.count[i]; 2656 adapter->ch_config.ch_info[i].offset = 2657 mqprio_qopt->qopt.offset[i]; 2658 total_qps += mqprio_qopt->qopt.count[i]; 2659 max_tx_rate = mqprio_qopt->max_rate[i]; 2660 /* convert to Mbps */ 2661 max_tx_rate = div_u64(max_tx_rate, 2662 IAVF_MBPS_DIVISOR); 2663 adapter->ch_config.ch_info[i].max_tx_rate = 2664 max_tx_rate; 2665 } else { 2666 adapter->ch_config.ch_info[i].count = 1; 2667 adapter->ch_config.ch_info[i].offset = 0; 2668 } 2669 } 2670 adapter->ch_config.total_qps = total_qps; 2671 netif_tx_stop_all_queues(netdev); 2672 netif_tx_disable(netdev); 2673 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2674 netdev_reset_tc(netdev); 2675 /* Report the tc mapping up the stack */ 2676 netdev_set_num_tc(adapter->netdev, num_tc); 2677 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2678 u16 qcount = mqprio_qopt->qopt.count[i]; 2679 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2680 2681 if (i < num_tc) 2682 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2683 qoffset); 2684 } 2685 } 2686 exit: 2687 return ret; 2688 } 2689 2690 /** 2691 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2692 * @adapter: board private structure 2693 * @cls_flower: pointer to struct flow_cls_offload 2694 * @filter: pointer to cloud filter structure 2695 */ 2696 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2697 struct flow_cls_offload *f, 2698 struct iavf_cloud_filter *filter) 2699 { 2700 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2701 struct flow_dissector *dissector = rule->match.dissector; 2702 u16 n_proto_mask = 0; 2703 u16 n_proto_key = 0; 2704 u8 field_flags = 0; 2705 u16 addr_type = 0; 2706 u16 n_proto = 0; 2707 int i = 0; 2708 struct virtchnl_filter *vf = &filter->f; 2709 2710 if (dissector->used_keys & 2711 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2712 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2713 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2714 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2715 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2716 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2717 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2718 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2719 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2720 dissector->used_keys); 2721 return -EOPNOTSUPP; 2722 } 2723 2724 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2725 struct flow_match_enc_keyid match; 2726 2727 flow_rule_match_enc_keyid(rule, &match); 2728 if (match.mask->keyid != 0) 2729 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2730 } 2731 2732 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2733 struct flow_match_basic match; 2734 2735 flow_rule_match_basic(rule, &match); 2736 n_proto_key = ntohs(match.key->n_proto); 2737 n_proto_mask = ntohs(match.mask->n_proto); 2738 2739 if (n_proto_key == ETH_P_ALL) { 2740 n_proto_key = 0; 2741 n_proto_mask = 0; 2742 } 2743 n_proto = n_proto_key & n_proto_mask; 2744 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2745 return -EINVAL; 2746 if (n_proto == ETH_P_IPV6) { 2747 /* specify flow type as TCP IPv6 */ 2748 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2749 } 2750 2751 if (match.key->ip_proto != IPPROTO_TCP) { 2752 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2753 return -EINVAL; 2754 } 2755 } 2756 2757 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2758 struct flow_match_eth_addrs match; 2759 2760 flow_rule_match_eth_addrs(rule, &match); 2761 2762 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2763 if (!is_zero_ether_addr(match.mask->dst)) { 2764 if (is_broadcast_ether_addr(match.mask->dst)) { 2765 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2766 } else { 2767 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2768 match.mask->dst); 2769 return IAVF_ERR_CONFIG; 2770 } 2771 } 2772 2773 if (!is_zero_ether_addr(match.mask->src)) { 2774 if (is_broadcast_ether_addr(match.mask->src)) { 2775 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2776 } else { 2777 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2778 match.mask->src); 2779 return IAVF_ERR_CONFIG; 2780 } 2781 } 2782 2783 if (!is_zero_ether_addr(match.key->dst)) 2784 if (is_valid_ether_addr(match.key->dst) || 2785 is_multicast_ether_addr(match.key->dst)) { 2786 /* set the mask if a valid dst_mac address */ 2787 for (i = 0; i < ETH_ALEN; i++) 2788 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2789 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2790 match.key->dst); 2791 } 2792 2793 if (!is_zero_ether_addr(match.key->src)) 2794 if (is_valid_ether_addr(match.key->src) || 2795 is_multicast_ether_addr(match.key->src)) { 2796 /* set the mask if a valid dst_mac address */ 2797 for (i = 0; i < ETH_ALEN; i++) 2798 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2799 ether_addr_copy(vf->data.tcp_spec.src_mac, 2800 match.key->src); 2801 } 2802 } 2803 2804 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2805 struct flow_match_vlan match; 2806 2807 flow_rule_match_vlan(rule, &match); 2808 if (match.mask->vlan_id) { 2809 if (match.mask->vlan_id == VLAN_VID_MASK) { 2810 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2811 } else { 2812 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2813 match.mask->vlan_id); 2814 return IAVF_ERR_CONFIG; 2815 } 2816 } 2817 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2818 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2819 } 2820 2821 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2822 struct flow_match_control match; 2823 2824 flow_rule_match_control(rule, &match); 2825 addr_type = match.key->addr_type; 2826 } 2827 2828 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2829 struct flow_match_ipv4_addrs match; 2830 2831 flow_rule_match_ipv4_addrs(rule, &match); 2832 if (match.mask->dst) { 2833 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2834 field_flags |= IAVF_CLOUD_FIELD_IIP; 2835 } else { 2836 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2837 be32_to_cpu(match.mask->dst)); 2838 return IAVF_ERR_CONFIG; 2839 } 2840 } 2841 2842 if (match.mask->src) { 2843 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2844 field_flags |= IAVF_CLOUD_FIELD_IIP; 2845 } else { 2846 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2847 be32_to_cpu(match.mask->dst)); 2848 return IAVF_ERR_CONFIG; 2849 } 2850 } 2851 2852 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2853 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2854 return IAVF_ERR_CONFIG; 2855 } 2856 if (match.key->dst) { 2857 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2858 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2859 } 2860 if (match.key->src) { 2861 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2862 vf->data.tcp_spec.src_ip[0] = match.key->src; 2863 } 2864 } 2865 2866 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2867 struct flow_match_ipv6_addrs match; 2868 2869 flow_rule_match_ipv6_addrs(rule, &match); 2870 2871 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2872 if (ipv6_addr_any(&match.mask->dst)) { 2873 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2874 IPV6_ADDR_ANY); 2875 return IAVF_ERR_CONFIG; 2876 } 2877 2878 /* src and dest IPv6 address should not be LOOPBACK 2879 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2880 */ 2881 if (ipv6_addr_loopback(&match.key->dst) || 2882 ipv6_addr_loopback(&match.key->src)) { 2883 dev_err(&adapter->pdev->dev, 2884 "ipv6 addr should not be loopback\n"); 2885 return IAVF_ERR_CONFIG; 2886 } 2887 if (!ipv6_addr_any(&match.mask->dst) || 2888 !ipv6_addr_any(&match.mask->src)) 2889 field_flags |= IAVF_CLOUD_FIELD_IIP; 2890 2891 for (i = 0; i < 4; i++) 2892 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 2893 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 2894 sizeof(vf->data.tcp_spec.dst_ip)); 2895 for (i = 0; i < 4; i++) 2896 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 2897 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 2898 sizeof(vf->data.tcp_spec.src_ip)); 2899 } 2900 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2901 struct flow_match_ports match; 2902 2903 flow_rule_match_ports(rule, &match); 2904 if (match.mask->src) { 2905 if (match.mask->src == cpu_to_be16(0xffff)) { 2906 field_flags |= IAVF_CLOUD_FIELD_IIP; 2907 } else { 2908 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 2909 be16_to_cpu(match.mask->src)); 2910 return IAVF_ERR_CONFIG; 2911 } 2912 } 2913 2914 if (match.mask->dst) { 2915 if (match.mask->dst == cpu_to_be16(0xffff)) { 2916 field_flags |= IAVF_CLOUD_FIELD_IIP; 2917 } else { 2918 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 2919 be16_to_cpu(match.mask->dst)); 2920 return IAVF_ERR_CONFIG; 2921 } 2922 } 2923 if (match.key->dst) { 2924 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 2925 vf->data.tcp_spec.dst_port = match.key->dst; 2926 } 2927 2928 if (match.key->src) { 2929 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 2930 vf->data.tcp_spec.src_port = match.key->src; 2931 } 2932 } 2933 vf->field_flags = field_flags; 2934 2935 return 0; 2936 } 2937 2938 /** 2939 * iavf_handle_tclass - Forward to a traffic class on the device 2940 * @adapter: board private structure 2941 * @tc: traffic class index on the device 2942 * @filter: pointer to cloud filter structure 2943 */ 2944 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 2945 struct iavf_cloud_filter *filter) 2946 { 2947 if (tc == 0) 2948 return 0; 2949 if (tc < adapter->num_tc) { 2950 if (!filter->f.data.tcp_spec.dst_port) { 2951 dev_err(&adapter->pdev->dev, 2952 "Specify destination port to redirect to traffic class other than TC0\n"); 2953 return -EINVAL; 2954 } 2955 } 2956 /* redirect to a traffic class on the same device */ 2957 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 2958 filter->f.action_meta = tc; 2959 return 0; 2960 } 2961 2962 /** 2963 * iavf_configure_clsflower - Add tc flower filters 2964 * @adapter: board private structure 2965 * @cls_flower: Pointer to struct flow_cls_offload 2966 */ 2967 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 2968 struct flow_cls_offload *cls_flower) 2969 { 2970 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 2971 struct iavf_cloud_filter *filter = NULL; 2972 int err = -EINVAL, count = 50; 2973 2974 if (tc < 0) { 2975 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 2976 return -EINVAL; 2977 } 2978 2979 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 2980 if (!filter) 2981 return -ENOMEM; 2982 2983 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 2984 &adapter->crit_section)) { 2985 if (--count == 0) 2986 goto err; 2987 udelay(1); 2988 } 2989 2990 filter->cookie = cls_flower->cookie; 2991 2992 /* set the mask to all zeroes to begin with */ 2993 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 2994 /* start out with flow type and eth type IPv4 to begin with */ 2995 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 2996 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 2997 if (err < 0) 2998 goto err; 2999 3000 err = iavf_handle_tclass(adapter, tc, filter); 3001 if (err < 0) 3002 goto err; 3003 3004 /* add filter to the list */ 3005 spin_lock_bh(&adapter->cloud_filter_list_lock); 3006 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3007 adapter->num_cloud_filters++; 3008 filter->add = true; 3009 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3010 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3011 err: 3012 if (err) 3013 kfree(filter); 3014 3015 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3016 return err; 3017 } 3018 3019 /* iavf_find_cf - Find the cloud filter in the list 3020 * @adapter: Board private structure 3021 * @cookie: filter specific cookie 3022 * 3023 * Returns ptr to the filter object or NULL. Must be called while holding the 3024 * cloud_filter_list_lock. 3025 */ 3026 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3027 unsigned long *cookie) 3028 { 3029 struct iavf_cloud_filter *filter = NULL; 3030 3031 if (!cookie) 3032 return NULL; 3033 3034 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3035 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3036 return filter; 3037 } 3038 return NULL; 3039 } 3040 3041 /** 3042 * iavf_delete_clsflower - Remove tc flower filters 3043 * @adapter: board private structure 3044 * @cls_flower: Pointer to struct flow_cls_offload 3045 */ 3046 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3047 struct flow_cls_offload *cls_flower) 3048 { 3049 struct iavf_cloud_filter *filter = NULL; 3050 int err = 0; 3051 3052 spin_lock_bh(&adapter->cloud_filter_list_lock); 3053 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3054 if (filter) { 3055 filter->del = true; 3056 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3057 } else { 3058 err = -EINVAL; 3059 } 3060 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3061 3062 return err; 3063 } 3064 3065 /** 3066 * iavf_setup_tc_cls_flower - flower classifier offloads 3067 * @netdev: net device to configure 3068 * @type_data: offload data 3069 */ 3070 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3071 struct flow_cls_offload *cls_flower) 3072 { 3073 switch (cls_flower->command) { 3074 case FLOW_CLS_REPLACE: 3075 return iavf_configure_clsflower(adapter, cls_flower); 3076 case FLOW_CLS_DESTROY: 3077 return iavf_delete_clsflower(adapter, cls_flower); 3078 case FLOW_CLS_STATS: 3079 return -EOPNOTSUPP; 3080 default: 3081 return -EOPNOTSUPP; 3082 } 3083 } 3084 3085 /** 3086 * iavf_setup_tc_block_cb - block callback for tc 3087 * @type: type of offload 3088 * @type_data: offload data 3089 * @cb_priv: 3090 * 3091 * This function is the block callback for traffic classes 3092 **/ 3093 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3094 void *cb_priv) 3095 { 3096 struct iavf_adapter *adapter = cb_priv; 3097 3098 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 3099 return -EOPNOTSUPP; 3100 3101 switch (type) { 3102 case TC_SETUP_CLSFLOWER: 3103 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3104 default: 3105 return -EOPNOTSUPP; 3106 } 3107 } 3108 3109 static LIST_HEAD(iavf_block_cb_list); 3110 3111 /** 3112 * iavf_setup_tc - configure multiple traffic classes 3113 * @netdev: network interface device structure 3114 * @type: type of offload 3115 * @type_date: tc offload data 3116 * 3117 * This function is the callback to ndo_setup_tc in the 3118 * netdev_ops. 3119 * 3120 * Returns 0 on success 3121 **/ 3122 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3123 void *type_data) 3124 { 3125 struct iavf_adapter *adapter = netdev_priv(netdev); 3126 3127 switch (type) { 3128 case TC_SETUP_QDISC_MQPRIO: 3129 return __iavf_setup_tc(netdev, type_data); 3130 case TC_SETUP_BLOCK: 3131 return flow_block_cb_setup_simple(type_data, 3132 &iavf_block_cb_list, 3133 iavf_setup_tc_block_cb, 3134 adapter, adapter, true); 3135 default: 3136 return -EOPNOTSUPP; 3137 } 3138 } 3139 3140 /** 3141 * iavf_open - Called when a network interface is made active 3142 * @netdev: network interface device structure 3143 * 3144 * Returns 0 on success, negative value on failure 3145 * 3146 * The open entry point is called when a network interface is made 3147 * active by the system (IFF_UP). At this point all resources needed 3148 * for transmit and receive operations are allocated, the interrupt 3149 * handler is registered with the OS, the watchdog is started, 3150 * and the stack is notified that the interface is ready. 3151 **/ 3152 static int iavf_open(struct net_device *netdev) 3153 { 3154 struct iavf_adapter *adapter = netdev_priv(netdev); 3155 int err; 3156 3157 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3158 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3159 return -EIO; 3160 } 3161 3162 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3163 &adapter->crit_section)) 3164 usleep_range(500, 1000); 3165 3166 if (adapter->state != __IAVF_DOWN) { 3167 err = -EBUSY; 3168 goto err_unlock; 3169 } 3170 3171 /* allocate transmit descriptors */ 3172 err = iavf_setup_all_tx_resources(adapter); 3173 if (err) 3174 goto err_setup_tx; 3175 3176 /* allocate receive descriptors */ 3177 err = iavf_setup_all_rx_resources(adapter); 3178 if (err) 3179 goto err_setup_rx; 3180 3181 /* clear any pending interrupts, may auto mask */ 3182 err = iavf_request_traffic_irqs(adapter, netdev->name); 3183 if (err) 3184 goto err_req_irq; 3185 3186 spin_lock_bh(&adapter->mac_vlan_list_lock); 3187 3188 iavf_add_filter(adapter, adapter->hw.mac.addr); 3189 3190 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3191 3192 iavf_configure(adapter); 3193 3194 iavf_up_complete(adapter); 3195 3196 iavf_irq_enable(adapter, true); 3197 3198 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3199 3200 return 0; 3201 3202 err_req_irq: 3203 iavf_down(adapter); 3204 iavf_free_traffic_irqs(adapter); 3205 err_setup_rx: 3206 iavf_free_all_rx_resources(adapter); 3207 err_setup_tx: 3208 iavf_free_all_tx_resources(adapter); 3209 err_unlock: 3210 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3211 3212 return err; 3213 } 3214 3215 /** 3216 * iavf_close - Disables a network interface 3217 * @netdev: network interface device structure 3218 * 3219 * Returns 0, this is not allowed to fail 3220 * 3221 * The close entry point is called when an interface is de-activated 3222 * by the OS. The hardware is still under the drivers control, but 3223 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3224 * are freed, along with all transmit and receive resources. 3225 **/ 3226 static int iavf_close(struct net_device *netdev) 3227 { 3228 struct iavf_adapter *adapter = netdev_priv(netdev); 3229 int status; 3230 3231 if (adapter->state <= __IAVF_DOWN_PENDING) 3232 return 0; 3233 3234 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3235 &adapter->crit_section)) 3236 usleep_range(500, 1000); 3237 3238 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3239 if (CLIENT_ENABLED(adapter)) 3240 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3241 3242 iavf_down(adapter); 3243 adapter->state = __IAVF_DOWN_PENDING; 3244 iavf_free_traffic_irqs(adapter); 3245 3246 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3247 3248 /* We explicitly don't free resources here because the hardware is 3249 * still active and can DMA into memory. Resources are cleared in 3250 * iavf_virtchnl_completion() after we get confirmation from the PF 3251 * driver that the rings have been stopped. 3252 * 3253 * Also, we wait for state to transition to __IAVF_DOWN before 3254 * returning. State change occurs in iavf_virtchnl_completion() after 3255 * VF resources are released (which occurs after PF driver processes and 3256 * responds to admin queue commands). 3257 */ 3258 3259 status = wait_event_timeout(adapter->down_waitqueue, 3260 adapter->state == __IAVF_DOWN, 3261 msecs_to_jiffies(500)); 3262 if (!status) 3263 netdev_warn(netdev, "Device resources not yet released\n"); 3264 return 0; 3265 } 3266 3267 /** 3268 * iavf_change_mtu - Change the Maximum Transfer Unit 3269 * @netdev: network interface device structure 3270 * @new_mtu: new value for maximum frame size 3271 * 3272 * Returns 0 on success, negative on failure 3273 **/ 3274 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3275 { 3276 struct iavf_adapter *adapter = netdev_priv(netdev); 3277 3278 netdev->mtu = new_mtu; 3279 if (CLIENT_ENABLED(adapter)) { 3280 iavf_notify_client_l2_params(&adapter->vsi); 3281 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3282 } 3283 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3284 queue_work(iavf_wq, &adapter->reset_task); 3285 3286 return 0; 3287 } 3288 3289 /** 3290 * iavf_set_features - set the netdev feature flags 3291 * @netdev: ptr to the netdev being adjusted 3292 * @features: the feature set that the stack is suggesting 3293 * Note: expects to be called while under rtnl_lock() 3294 **/ 3295 static int iavf_set_features(struct net_device *netdev, 3296 netdev_features_t features) 3297 { 3298 struct iavf_adapter *adapter = netdev_priv(netdev); 3299 3300 /* Don't allow changing VLAN_RX flag when adapter is not capable 3301 * of VLAN offload 3302 */ 3303 if (!VLAN_ALLOWED(adapter)) { 3304 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3305 return -EINVAL; 3306 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3307 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3308 adapter->aq_required |= 3309 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3310 else 3311 adapter->aq_required |= 3312 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3313 } 3314 3315 return 0; 3316 } 3317 3318 /** 3319 * iavf_features_check - Validate encapsulated packet conforms to limits 3320 * @skb: skb buff 3321 * @dev: This physical port's netdev 3322 * @features: Offload features that the stack believes apply 3323 **/ 3324 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3325 struct net_device *dev, 3326 netdev_features_t features) 3327 { 3328 size_t len; 3329 3330 /* No point in doing any of this if neither checksum nor GSO are 3331 * being requested for this frame. We can rule out both by just 3332 * checking for CHECKSUM_PARTIAL 3333 */ 3334 if (skb->ip_summed != CHECKSUM_PARTIAL) 3335 return features; 3336 3337 /* We cannot support GSO if the MSS is going to be less than 3338 * 64 bytes. If it is then we need to drop support for GSO. 3339 */ 3340 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3341 features &= ~NETIF_F_GSO_MASK; 3342 3343 /* MACLEN can support at most 63 words */ 3344 len = skb_network_header(skb) - skb->data; 3345 if (len & ~(63 * 2)) 3346 goto out_err; 3347 3348 /* IPLEN and EIPLEN can support at most 127 dwords */ 3349 len = skb_transport_header(skb) - skb_network_header(skb); 3350 if (len & ~(127 * 4)) 3351 goto out_err; 3352 3353 if (skb->encapsulation) { 3354 /* L4TUNLEN can support 127 words */ 3355 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3356 if (len & ~(127 * 2)) 3357 goto out_err; 3358 3359 /* IPLEN can support at most 127 dwords */ 3360 len = skb_inner_transport_header(skb) - 3361 skb_inner_network_header(skb); 3362 if (len & ~(127 * 4)) 3363 goto out_err; 3364 } 3365 3366 /* No need to validate L4LEN as TCP is the only protocol with a 3367 * a flexible value and we support all possible values supported 3368 * by TCP, which is at most 15 dwords 3369 */ 3370 3371 return features; 3372 out_err: 3373 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3374 } 3375 3376 /** 3377 * iavf_fix_features - fix up the netdev feature bits 3378 * @netdev: our net device 3379 * @features: desired feature bits 3380 * 3381 * Returns fixed-up features bits 3382 **/ 3383 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3384 netdev_features_t features) 3385 { 3386 struct iavf_adapter *adapter = netdev_priv(netdev); 3387 3388 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3389 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3390 NETIF_F_HW_VLAN_CTAG_RX | 3391 NETIF_F_HW_VLAN_CTAG_FILTER); 3392 3393 return features; 3394 } 3395 3396 static const struct net_device_ops iavf_netdev_ops = { 3397 .ndo_open = iavf_open, 3398 .ndo_stop = iavf_close, 3399 .ndo_start_xmit = iavf_xmit_frame, 3400 .ndo_set_rx_mode = iavf_set_rx_mode, 3401 .ndo_validate_addr = eth_validate_addr, 3402 .ndo_set_mac_address = iavf_set_mac, 3403 .ndo_change_mtu = iavf_change_mtu, 3404 .ndo_tx_timeout = iavf_tx_timeout, 3405 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3406 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3407 .ndo_features_check = iavf_features_check, 3408 .ndo_fix_features = iavf_fix_features, 3409 .ndo_set_features = iavf_set_features, 3410 .ndo_setup_tc = iavf_setup_tc, 3411 }; 3412 3413 /** 3414 * iavf_check_reset_complete - check that VF reset is complete 3415 * @hw: pointer to hw struct 3416 * 3417 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3418 **/ 3419 static int iavf_check_reset_complete(struct iavf_hw *hw) 3420 { 3421 u32 rstat; 3422 int i; 3423 3424 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 3425 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3426 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3427 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3428 (rstat == VIRTCHNL_VFR_COMPLETED)) 3429 return 0; 3430 usleep_range(10, 20); 3431 } 3432 return -EBUSY; 3433 } 3434 3435 /** 3436 * iavf_process_config - Process the config information we got from the PF 3437 * @adapter: board private structure 3438 * 3439 * Verify that we have a valid config struct, and set up our netdev features 3440 * and our VSI struct. 3441 **/ 3442 int iavf_process_config(struct iavf_adapter *adapter) 3443 { 3444 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3445 int i, num_req_queues = adapter->num_req_queues; 3446 struct net_device *netdev = adapter->netdev; 3447 struct iavf_vsi *vsi = &adapter->vsi; 3448 netdev_features_t hw_enc_features; 3449 netdev_features_t hw_features; 3450 3451 /* got VF config message back from PF, now we can parse it */ 3452 for (i = 0; i < vfres->num_vsis; i++) { 3453 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3454 adapter->vsi_res = &vfres->vsi_res[i]; 3455 } 3456 if (!adapter->vsi_res) { 3457 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3458 return -ENODEV; 3459 } 3460 3461 if (num_req_queues && 3462 num_req_queues > adapter->vsi_res->num_queue_pairs) { 3463 /* Problem. The PF gave us fewer queues than what we had 3464 * negotiated in our request. Need a reset to see if we can't 3465 * get back to a working state. 3466 */ 3467 dev_err(&adapter->pdev->dev, 3468 "Requested %d queues, but PF only gave us %d.\n", 3469 num_req_queues, 3470 adapter->vsi_res->num_queue_pairs); 3471 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3472 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3473 iavf_schedule_reset(adapter); 3474 return -ENODEV; 3475 } 3476 adapter->num_req_queues = 0; 3477 3478 hw_enc_features = NETIF_F_SG | 3479 NETIF_F_IP_CSUM | 3480 NETIF_F_IPV6_CSUM | 3481 NETIF_F_HIGHDMA | 3482 NETIF_F_SOFT_FEATURES | 3483 NETIF_F_TSO | 3484 NETIF_F_TSO_ECN | 3485 NETIF_F_TSO6 | 3486 NETIF_F_SCTP_CRC | 3487 NETIF_F_RXHASH | 3488 NETIF_F_RXCSUM | 3489 0; 3490 3491 /* advertise to stack only if offloads for encapsulated packets is 3492 * supported 3493 */ 3494 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3495 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3496 NETIF_F_GSO_GRE | 3497 NETIF_F_GSO_GRE_CSUM | 3498 NETIF_F_GSO_IPXIP4 | 3499 NETIF_F_GSO_IPXIP6 | 3500 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3501 NETIF_F_GSO_PARTIAL | 3502 0; 3503 3504 if (!(vfres->vf_cap_flags & 3505 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3506 netdev->gso_partial_features |= 3507 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3508 3509 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3510 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3511 netdev->hw_enc_features |= hw_enc_features; 3512 } 3513 /* record features VLANs can make use of */ 3514 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3515 3516 /* Write features and hw_features separately to avoid polluting 3517 * with, or dropping, features that are set when we registered. 3518 */ 3519 hw_features = hw_enc_features; 3520 3521 /* Enable VLAN features if supported */ 3522 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3523 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3524 NETIF_F_HW_VLAN_CTAG_RX); 3525 /* Enable cloud filter if ADQ is supported */ 3526 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3527 hw_features |= NETIF_F_HW_TC; 3528 3529 netdev->hw_features |= hw_features; 3530 3531 netdev->features |= hw_features; 3532 3533 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3534 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3535 3536 netdev->priv_flags |= IFF_UNICAST_FLT; 3537 3538 /* Do not turn on offloads when they are requested to be turned off. 3539 * TSO needs minimum 576 bytes to work correctly. 3540 */ 3541 if (netdev->wanted_features) { 3542 if (!(netdev->wanted_features & NETIF_F_TSO) || 3543 netdev->mtu < 576) 3544 netdev->features &= ~NETIF_F_TSO; 3545 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3546 netdev->mtu < 576) 3547 netdev->features &= ~NETIF_F_TSO6; 3548 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3549 netdev->features &= ~NETIF_F_TSO_ECN; 3550 if (!(netdev->wanted_features & NETIF_F_GRO)) 3551 netdev->features &= ~NETIF_F_GRO; 3552 if (!(netdev->wanted_features & NETIF_F_GSO)) 3553 netdev->features &= ~NETIF_F_GSO; 3554 } 3555 3556 adapter->vsi.id = adapter->vsi_res->vsi_id; 3557 3558 adapter->vsi.back = adapter; 3559 adapter->vsi.base_vector = 1; 3560 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3561 vsi->netdev = adapter->netdev; 3562 vsi->qs_handle = adapter->vsi_res->qset_handle; 3563 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3564 adapter->rss_key_size = vfres->rss_key_size; 3565 adapter->rss_lut_size = vfres->rss_lut_size; 3566 } else { 3567 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3568 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3569 } 3570 3571 return 0; 3572 } 3573 3574 /** 3575 * iavf_init_task - worker thread to perform delayed initialization 3576 * @work: pointer to work_struct containing our data 3577 * 3578 * This task completes the work that was begun in probe. Due to the nature 3579 * of VF-PF communications, we may need to wait tens of milliseconds to get 3580 * responses back from the PF. Rather than busy-wait in probe and bog down the 3581 * whole system, we'll do it in a task so we can sleep. 3582 * This task only runs during driver init. Once we've established 3583 * communications with the PF driver and set up our netdev, the watchdog 3584 * takes over. 3585 **/ 3586 static void iavf_init_task(struct work_struct *work) 3587 { 3588 struct iavf_adapter *adapter = container_of(work, 3589 struct iavf_adapter, 3590 init_task.work); 3591 struct iavf_hw *hw = &adapter->hw; 3592 3593 switch (adapter->state) { 3594 case __IAVF_STARTUP: 3595 if (iavf_startup(adapter) < 0) 3596 goto init_failed; 3597 break; 3598 case __IAVF_INIT_VERSION_CHECK: 3599 if (iavf_init_version_check(adapter) < 0) 3600 goto init_failed; 3601 break; 3602 case __IAVF_INIT_GET_RESOURCES: 3603 if (iavf_init_get_resources(adapter) < 0) 3604 goto init_failed; 3605 return; 3606 default: 3607 goto init_failed; 3608 } 3609 3610 queue_delayed_work(iavf_wq, &adapter->init_task, 3611 msecs_to_jiffies(30)); 3612 return; 3613 init_failed: 3614 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 3615 dev_err(&adapter->pdev->dev, 3616 "Failed to communicate with PF; waiting before retry\n"); 3617 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3618 iavf_shutdown_adminq(hw); 3619 adapter->state = __IAVF_STARTUP; 3620 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); 3621 return; 3622 } 3623 queue_delayed_work(iavf_wq, &adapter->init_task, HZ); 3624 } 3625 3626 /** 3627 * iavf_shutdown - Shutdown the device in preparation for a reboot 3628 * @pdev: pci device structure 3629 **/ 3630 static void iavf_shutdown(struct pci_dev *pdev) 3631 { 3632 struct net_device *netdev = pci_get_drvdata(pdev); 3633 struct iavf_adapter *adapter = netdev_priv(netdev); 3634 3635 netif_device_detach(netdev); 3636 3637 if (netif_running(netdev)) 3638 iavf_close(netdev); 3639 3640 /* Prevent the watchdog from running. */ 3641 adapter->state = __IAVF_REMOVE; 3642 adapter->aq_required = 0; 3643 3644 #ifdef CONFIG_PM 3645 pci_save_state(pdev); 3646 3647 #endif 3648 pci_disable_device(pdev); 3649 } 3650 3651 /** 3652 * iavf_probe - Device Initialization Routine 3653 * @pdev: PCI device information struct 3654 * @ent: entry in iavf_pci_tbl 3655 * 3656 * Returns 0 on success, negative on failure 3657 * 3658 * iavf_probe initializes an adapter identified by a pci_dev structure. 3659 * The OS initialization, configuring of the adapter private structure, 3660 * and a hardware reset occur. 3661 **/ 3662 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3663 { 3664 struct net_device *netdev; 3665 struct iavf_adapter *adapter = NULL; 3666 struct iavf_hw *hw = NULL; 3667 int err; 3668 3669 err = pci_enable_device(pdev); 3670 if (err) 3671 return err; 3672 3673 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3674 if (err) { 3675 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3676 if (err) { 3677 dev_err(&pdev->dev, 3678 "DMA configuration failed: 0x%x\n", err); 3679 goto err_dma; 3680 } 3681 } 3682 3683 err = pci_request_regions(pdev, iavf_driver_name); 3684 if (err) { 3685 dev_err(&pdev->dev, 3686 "pci_request_regions failed 0x%x\n", err); 3687 goto err_pci_reg; 3688 } 3689 3690 pci_enable_pcie_error_reporting(pdev); 3691 3692 pci_set_master(pdev); 3693 3694 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3695 IAVF_MAX_REQ_QUEUES); 3696 if (!netdev) { 3697 err = -ENOMEM; 3698 goto err_alloc_etherdev; 3699 } 3700 3701 SET_NETDEV_DEV(netdev, &pdev->dev); 3702 3703 pci_set_drvdata(pdev, netdev); 3704 adapter = netdev_priv(netdev); 3705 3706 adapter->netdev = netdev; 3707 adapter->pdev = pdev; 3708 3709 hw = &adapter->hw; 3710 hw->back = adapter; 3711 3712 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3713 adapter->state = __IAVF_STARTUP; 3714 3715 /* Call save state here because it relies on the adapter struct. */ 3716 pci_save_state(pdev); 3717 3718 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3719 pci_resource_len(pdev, 0)); 3720 if (!hw->hw_addr) { 3721 err = -EIO; 3722 goto err_ioremap; 3723 } 3724 hw->vendor_id = pdev->vendor; 3725 hw->device_id = pdev->device; 3726 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3727 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3728 hw->subsystem_device_id = pdev->subsystem_device; 3729 hw->bus.device = PCI_SLOT(pdev->devfn); 3730 hw->bus.func = PCI_FUNC(pdev->devfn); 3731 hw->bus.bus_id = pdev->bus->number; 3732 3733 /* set up the locks for the AQ, do this only once in probe 3734 * and destroy them only once in remove 3735 */ 3736 mutex_init(&hw->aq.asq_mutex); 3737 mutex_init(&hw->aq.arq_mutex); 3738 3739 spin_lock_init(&adapter->mac_vlan_list_lock); 3740 spin_lock_init(&adapter->cloud_filter_list_lock); 3741 3742 INIT_LIST_HEAD(&adapter->mac_filter_list); 3743 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3744 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3745 3746 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3747 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3748 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3749 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3750 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); 3751 queue_delayed_work(iavf_wq, &adapter->init_task, 3752 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3753 3754 /* Setup the wait queue for indicating transition to down status */ 3755 init_waitqueue_head(&adapter->down_waitqueue); 3756 3757 return 0; 3758 3759 err_ioremap: 3760 free_netdev(netdev); 3761 err_alloc_etherdev: 3762 pci_release_regions(pdev); 3763 err_pci_reg: 3764 err_dma: 3765 pci_disable_device(pdev); 3766 return err; 3767 } 3768 3769 /** 3770 * iavf_suspend - Power management suspend routine 3771 * @pdev: PCI device information struct 3772 * @state: unused 3773 * 3774 * Called when the system (VM) is entering sleep/suspend. 3775 **/ 3776 static int __maybe_unused iavf_suspend(struct device *dev_d) 3777 { 3778 struct net_device *netdev = dev_get_drvdata(dev_d); 3779 struct iavf_adapter *adapter = netdev_priv(netdev); 3780 3781 netif_device_detach(netdev); 3782 3783 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3784 &adapter->crit_section)) 3785 usleep_range(500, 1000); 3786 3787 if (netif_running(netdev)) { 3788 rtnl_lock(); 3789 iavf_down(adapter); 3790 rtnl_unlock(); 3791 } 3792 iavf_free_misc_irq(adapter); 3793 iavf_reset_interrupt_capability(adapter); 3794 3795 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3796 3797 return 0; 3798 } 3799 3800 /** 3801 * iavf_resume - Power management resume routine 3802 * @pdev: PCI device information struct 3803 * 3804 * Called when the system (VM) is resumed from sleep/suspend. 3805 **/ 3806 static int __maybe_unused iavf_resume(struct device *dev_d) 3807 { 3808 struct pci_dev *pdev = to_pci_dev(dev_d); 3809 struct iavf_adapter *adapter = pci_get_drvdata(pdev); 3810 struct net_device *netdev = adapter->netdev; 3811 u32 err; 3812 3813 pci_set_master(pdev); 3814 3815 rtnl_lock(); 3816 err = iavf_set_interrupt_capability(adapter); 3817 if (err) { 3818 rtnl_unlock(); 3819 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3820 return err; 3821 } 3822 err = iavf_request_misc_irq(adapter); 3823 rtnl_unlock(); 3824 if (err) { 3825 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3826 return err; 3827 } 3828 3829 queue_work(iavf_wq, &adapter->reset_task); 3830 3831 netif_device_attach(netdev); 3832 3833 return err; 3834 } 3835 3836 /** 3837 * iavf_remove - Device Removal Routine 3838 * @pdev: PCI device information struct 3839 * 3840 * iavf_remove is called by the PCI subsystem to alert the driver 3841 * that it should release a PCI device. The could be caused by a 3842 * Hot-Plug event, or because the driver is going to be removed from 3843 * memory. 3844 **/ 3845 static void iavf_remove(struct pci_dev *pdev) 3846 { 3847 struct net_device *netdev = pci_get_drvdata(pdev); 3848 struct iavf_adapter *adapter = netdev_priv(netdev); 3849 struct iavf_vlan_filter *vlf, *vlftmp; 3850 struct iavf_mac_filter *f, *ftmp; 3851 struct iavf_cloud_filter *cf, *cftmp; 3852 struct iavf_hw *hw = &adapter->hw; 3853 int err; 3854 /* Indicate we are in remove and not to run reset_task */ 3855 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); 3856 cancel_delayed_work_sync(&adapter->init_task); 3857 cancel_work_sync(&adapter->reset_task); 3858 cancel_delayed_work_sync(&adapter->client_task); 3859 if (adapter->netdev_registered) { 3860 unregister_netdev(netdev); 3861 adapter->netdev_registered = false; 3862 } 3863 if (CLIENT_ALLOWED(adapter)) { 3864 err = iavf_lan_del_device(adapter); 3865 if (err) 3866 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3867 err); 3868 } 3869 3870 /* Shut down all the garbage mashers on the detention level */ 3871 adapter->state = __IAVF_REMOVE; 3872 adapter->aq_required = 0; 3873 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3874 iavf_request_reset(adapter); 3875 msleep(50); 3876 /* If the FW isn't responding, kick it once, but only once. */ 3877 if (!iavf_asq_done(hw)) { 3878 iavf_request_reset(adapter); 3879 msleep(50); 3880 } 3881 iavf_free_all_tx_resources(adapter); 3882 iavf_free_all_rx_resources(adapter); 3883 iavf_misc_irq_disable(adapter); 3884 iavf_free_misc_irq(adapter); 3885 iavf_reset_interrupt_capability(adapter); 3886 iavf_free_q_vectors(adapter); 3887 3888 cancel_delayed_work_sync(&adapter->watchdog_task); 3889 3890 cancel_work_sync(&adapter->adminq_task); 3891 3892 iavf_free_rss(adapter); 3893 3894 if (hw->aq.asq.count) 3895 iavf_shutdown_adminq(hw); 3896 3897 /* destroy the locks only once, here */ 3898 mutex_destroy(&hw->aq.arq_mutex); 3899 mutex_destroy(&hw->aq.asq_mutex); 3900 3901 iounmap(hw->hw_addr); 3902 pci_release_regions(pdev); 3903 iavf_free_all_tx_resources(adapter); 3904 iavf_free_all_rx_resources(adapter); 3905 iavf_free_queues(adapter); 3906 kfree(adapter->vf_res); 3907 spin_lock_bh(&adapter->mac_vlan_list_lock); 3908 /* If we got removed before an up/down sequence, we've got a filter 3909 * hanging out there that we need to get rid of. 3910 */ 3911 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3912 list_del(&f->list); 3913 kfree(f); 3914 } 3915 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 3916 list) { 3917 list_del(&vlf->list); 3918 kfree(vlf); 3919 } 3920 3921 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3922 3923 spin_lock_bh(&adapter->cloud_filter_list_lock); 3924 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 3925 list_del(&cf->list); 3926 kfree(cf); 3927 } 3928 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3929 3930 free_netdev(netdev); 3931 3932 pci_disable_pcie_error_reporting(pdev); 3933 3934 pci_disable_device(pdev); 3935 } 3936 3937 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 3938 3939 static struct pci_driver iavf_driver = { 3940 .name = iavf_driver_name, 3941 .id_table = iavf_pci_tbl, 3942 .probe = iavf_probe, 3943 .remove = iavf_remove, 3944 .driver.pm = &iavf_pm_ops, 3945 .shutdown = iavf_shutdown, 3946 }; 3947 3948 /** 3949 * iavf_init_module - Driver Registration Routine 3950 * 3951 * iavf_init_module is the first routine called when the driver is 3952 * loaded. All it does is register with the PCI subsystem. 3953 **/ 3954 static int __init iavf_init_module(void) 3955 { 3956 int ret; 3957 3958 pr_info("iavf: %s\n", iavf_driver_string); 3959 3960 pr_info("%s\n", iavf_copyright); 3961 3962 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 3963 iavf_driver_name); 3964 if (!iavf_wq) { 3965 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 3966 return -ENOMEM; 3967 } 3968 ret = pci_register_driver(&iavf_driver); 3969 return ret; 3970 } 3971 3972 module_init(iavf_init_module); 3973 3974 /** 3975 * iavf_exit_module - Driver Exit Cleanup Routine 3976 * 3977 * iavf_exit_module is called just before the driver is removed 3978 * from memory. 3979 **/ 3980 static void __exit iavf_exit_module(void) 3981 { 3982 pci_unregister_driver(&iavf_driver); 3983 destroy_workqueue(iavf_wq); 3984 } 3985 3986 module_exit(iavf_exit_module); 3987 3988 /* iavf_main.c */ 3989