1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static int iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 #define DRV_KERN "-k" 25 26 #define DRV_VERSION_MAJOR 3 27 #define DRV_VERSION_MINOR 2 28 #define DRV_VERSION_BUILD 3 29 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 30 __stringify(DRV_VERSION_MINOR) "." \ 31 __stringify(DRV_VERSION_BUILD) \ 32 DRV_KERN 33 const char iavf_driver_version[] = DRV_VERSION; 34 static const char iavf_copyright[] = 35 "Copyright (c) 2013 - 2018 Intel Corporation."; 36 37 /* iavf_pci_tbl - PCI Device ID Table 38 * 39 * Wildcard entries (PCI_ANY_ID) should come last 40 * Last entry must be all 0s 41 * 42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 43 * Class, Class Mask, private data (not used) } 44 */ 45 static const struct pci_device_id iavf_pci_tbl[] = { 46 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 47 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 48 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 49 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 50 /* required last entry */ 51 {0, } 52 }; 53 54 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 55 56 MODULE_ALIAS("i40evf"); 57 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 58 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 59 MODULE_LICENSE("GPL v2"); 60 MODULE_VERSION(DRV_VERSION); 61 62 static const struct net_device_ops iavf_netdev_ops; 63 struct workqueue_struct *iavf_wq; 64 65 /** 66 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 67 * @hw: pointer to the HW structure 68 * @mem: ptr to mem struct to fill out 69 * @size: size of memory requested 70 * @alignment: what to align the allocation to 71 **/ 72 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 73 struct iavf_dma_mem *mem, 74 u64 size, u32 alignment) 75 { 76 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 77 78 if (!mem) 79 return IAVF_ERR_PARAM; 80 81 mem->size = ALIGN(size, alignment); 82 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 83 (dma_addr_t *)&mem->pa, GFP_KERNEL); 84 if (mem->va) 85 return 0; 86 else 87 return IAVF_ERR_NO_MEMORY; 88 } 89 90 /** 91 * iavf_free_dma_mem_d - OS specific memory free for shared code 92 * @hw: pointer to the HW structure 93 * @mem: ptr to mem struct to free 94 **/ 95 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 96 struct iavf_dma_mem *mem) 97 { 98 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 99 100 if (!mem || !mem->va) 101 return IAVF_ERR_PARAM; 102 dma_free_coherent(&adapter->pdev->dev, mem->size, 103 mem->va, (dma_addr_t)mem->pa); 104 return 0; 105 } 106 107 /** 108 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 109 * @hw: pointer to the HW structure 110 * @mem: ptr to mem struct to fill out 111 * @size: size of memory requested 112 **/ 113 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 114 struct iavf_virt_mem *mem, u32 size) 115 { 116 if (!mem) 117 return IAVF_ERR_PARAM; 118 119 mem->size = size; 120 mem->va = kzalloc(size, GFP_KERNEL); 121 122 if (mem->va) 123 return 0; 124 else 125 return IAVF_ERR_NO_MEMORY; 126 } 127 128 /** 129 * iavf_free_virt_mem_d - OS specific memory free for shared code 130 * @hw: pointer to the HW structure 131 * @mem: ptr to mem struct to free 132 **/ 133 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 134 struct iavf_virt_mem *mem) 135 { 136 if (!mem) 137 return IAVF_ERR_PARAM; 138 139 /* it's ok to kfree a NULL pointer */ 140 kfree(mem->va); 141 142 return 0; 143 } 144 145 /** 146 * iavf_schedule_reset - Set the flags and schedule a reset event 147 * @adapter: board private structure 148 **/ 149 void iavf_schedule_reset(struct iavf_adapter *adapter) 150 { 151 if (!(adapter->flags & 152 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 153 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 154 queue_work(iavf_wq, &adapter->reset_task); 155 } 156 } 157 158 /** 159 * iavf_tx_timeout - Respond to a Tx Hang 160 * @netdev: network interface device structure 161 **/ 162 static void iavf_tx_timeout(struct net_device *netdev) 163 { 164 struct iavf_adapter *adapter = netdev_priv(netdev); 165 166 adapter->tx_timeout_count++; 167 iavf_schedule_reset(adapter); 168 } 169 170 /** 171 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 172 * @adapter: board private structure 173 **/ 174 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 175 { 176 struct iavf_hw *hw = &adapter->hw; 177 178 if (!adapter->msix_entries) 179 return; 180 181 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 182 183 iavf_flush(hw); 184 185 synchronize_irq(adapter->msix_entries[0].vector); 186 } 187 188 /** 189 * iavf_misc_irq_enable - Enable default interrupt generation settings 190 * @adapter: board private structure 191 **/ 192 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 193 { 194 struct iavf_hw *hw = &adapter->hw; 195 196 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 197 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 198 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 199 200 iavf_flush(hw); 201 } 202 203 /** 204 * iavf_irq_disable - Mask off interrupt generation on the NIC 205 * @adapter: board private structure 206 **/ 207 static void iavf_irq_disable(struct iavf_adapter *adapter) 208 { 209 int i; 210 struct iavf_hw *hw = &adapter->hw; 211 212 if (!adapter->msix_entries) 213 return; 214 215 for (i = 1; i < adapter->num_msix_vectors; i++) { 216 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 217 synchronize_irq(adapter->msix_entries[i].vector); 218 } 219 iavf_flush(hw); 220 } 221 222 /** 223 * iavf_irq_enable_queues - Enable interrupt for specified queues 224 * @adapter: board private structure 225 * @mask: bitmap of queues to enable 226 **/ 227 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 228 { 229 struct iavf_hw *hw = &adapter->hw; 230 int i; 231 232 for (i = 1; i < adapter->num_msix_vectors; i++) { 233 if (mask & BIT(i - 1)) { 234 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 235 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 236 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 237 } 238 } 239 } 240 241 /** 242 * iavf_irq_enable - Enable default interrupt generation settings 243 * @adapter: board private structure 244 * @flush: boolean value whether to run rd32() 245 **/ 246 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 247 { 248 struct iavf_hw *hw = &adapter->hw; 249 250 iavf_misc_irq_enable(adapter); 251 iavf_irq_enable_queues(adapter, ~0); 252 253 if (flush) 254 iavf_flush(hw); 255 } 256 257 /** 258 * iavf_msix_aq - Interrupt handler for vector 0 259 * @irq: interrupt number 260 * @data: pointer to netdev 261 **/ 262 static irqreturn_t iavf_msix_aq(int irq, void *data) 263 { 264 struct net_device *netdev = data; 265 struct iavf_adapter *adapter = netdev_priv(netdev); 266 struct iavf_hw *hw = &adapter->hw; 267 268 /* handle non-queue interrupts, these reads clear the registers */ 269 rd32(hw, IAVF_VFINT_ICR01); 270 rd32(hw, IAVF_VFINT_ICR0_ENA1); 271 272 /* schedule work on the private workqueue */ 273 queue_work(iavf_wq, &adapter->adminq_task); 274 275 return IRQ_HANDLED; 276 } 277 278 /** 279 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 280 * @irq: interrupt number 281 * @data: pointer to a q_vector 282 **/ 283 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 284 { 285 struct iavf_q_vector *q_vector = data; 286 287 if (!q_vector->tx.ring && !q_vector->rx.ring) 288 return IRQ_HANDLED; 289 290 napi_schedule_irqoff(&q_vector->napi); 291 292 return IRQ_HANDLED; 293 } 294 295 /** 296 * iavf_map_vector_to_rxq - associate irqs with rx queues 297 * @adapter: board private structure 298 * @v_idx: interrupt number 299 * @r_idx: queue number 300 **/ 301 static void 302 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 303 { 304 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 305 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 306 struct iavf_hw *hw = &adapter->hw; 307 308 rx_ring->q_vector = q_vector; 309 rx_ring->next = q_vector->rx.ring; 310 rx_ring->vsi = &adapter->vsi; 311 q_vector->rx.ring = rx_ring; 312 q_vector->rx.count++; 313 q_vector->rx.next_update = jiffies + 1; 314 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 315 q_vector->ring_mask |= BIT(r_idx); 316 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 317 q_vector->rx.current_itr); 318 q_vector->rx.current_itr = q_vector->rx.target_itr; 319 } 320 321 /** 322 * iavf_map_vector_to_txq - associate irqs with tx queues 323 * @adapter: board private structure 324 * @v_idx: interrupt number 325 * @t_idx: queue number 326 **/ 327 static void 328 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 329 { 330 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 331 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 332 struct iavf_hw *hw = &adapter->hw; 333 334 tx_ring->q_vector = q_vector; 335 tx_ring->next = q_vector->tx.ring; 336 tx_ring->vsi = &adapter->vsi; 337 q_vector->tx.ring = tx_ring; 338 q_vector->tx.count++; 339 q_vector->tx.next_update = jiffies + 1; 340 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 341 q_vector->num_ringpairs++; 342 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 343 q_vector->tx.target_itr); 344 q_vector->tx.current_itr = q_vector->tx.target_itr; 345 } 346 347 /** 348 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 349 * @adapter: board private structure to initialize 350 * 351 * This function maps descriptor rings to the queue-specific vectors 352 * we were allotted through the MSI-X enabling code. Ideally, we'd have 353 * one vector per ring/queue, but on a constrained vector budget, we 354 * group the rings as "efficiently" as possible. You would add new 355 * mapping configurations in here. 356 **/ 357 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 358 { 359 int rings_remaining = adapter->num_active_queues; 360 int ridx = 0, vidx = 0; 361 int q_vectors; 362 363 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 364 365 for (; ridx < rings_remaining; ridx++) { 366 iavf_map_vector_to_rxq(adapter, vidx, ridx); 367 iavf_map_vector_to_txq(adapter, vidx, ridx); 368 369 /* In the case where we have more queues than vectors, continue 370 * round-robin on vectors until all queues are mapped. 371 */ 372 if (++vidx >= q_vectors) 373 vidx = 0; 374 } 375 376 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 377 } 378 379 /** 380 * iavf_irq_affinity_notify - Callback for affinity changes 381 * @notify: context as to what irq was changed 382 * @mask: the new affinity mask 383 * 384 * This is a callback function used by the irq_set_affinity_notifier function 385 * so that we may register to receive changes to the irq affinity masks. 386 **/ 387 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 388 const cpumask_t *mask) 389 { 390 struct iavf_q_vector *q_vector = 391 container_of(notify, struct iavf_q_vector, affinity_notify); 392 393 cpumask_copy(&q_vector->affinity_mask, mask); 394 } 395 396 /** 397 * iavf_irq_affinity_release - Callback for affinity notifier release 398 * @ref: internal core kernel usage 399 * 400 * This is a callback function used by the irq_set_affinity_notifier function 401 * to inform the current notification subscriber that they will no longer 402 * receive notifications. 403 **/ 404 static void iavf_irq_affinity_release(struct kref *ref) {} 405 406 /** 407 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 408 * @adapter: board private structure 409 * @basename: device basename 410 * 411 * Allocates MSI-X vectors for tx and rx handling, and requests 412 * interrupts from the kernel. 413 **/ 414 static int 415 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 416 { 417 unsigned int vector, q_vectors; 418 unsigned int rx_int_idx = 0, tx_int_idx = 0; 419 int irq_num, err; 420 int cpu; 421 422 iavf_irq_disable(adapter); 423 /* Decrement for Other and TCP Timer vectors */ 424 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 425 426 for (vector = 0; vector < q_vectors; vector++) { 427 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 428 429 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 430 431 if (q_vector->tx.ring && q_vector->rx.ring) { 432 snprintf(q_vector->name, sizeof(q_vector->name), 433 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 434 tx_int_idx++; 435 } else if (q_vector->rx.ring) { 436 snprintf(q_vector->name, sizeof(q_vector->name), 437 "iavf-%s-rx-%d", basename, rx_int_idx++); 438 } else if (q_vector->tx.ring) { 439 snprintf(q_vector->name, sizeof(q_vector->name), 440 "iavf-%s-tx-%d", basename, tx_int_idx++); 441 } else { 442 /* skip this unused q_vector */ 443 continue; 444 } 445 err = request_irq(irq_num, 446 iavf_msix_clean_rings, 447 0, 448 q_vector->name, 449 q_vector); 450 if (err) { 451 dev_info(&adapter->pdev->dev, 452 "Request_irq failed, error: %d\n", err); 453 goto free_queue_irqs; 454 } 455 /* register for affinity change notifications */ 456 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 457 q_vector->affinity_notify.release = 458 iavf_irq_affinity_release; 459 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 460 /* Spread the IRQ affinity hints across online CPUs. Note that 461 * get_cpu_mask returns a mask with a permanent lifetime so 462 * it's safe to use as a hint for irq_set_affinity_hint. 463 */ 464 cpu = cpumask_local_spread(q_vector->v_idx, -1); 465 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 466 } 467 468 return 0; 469 470 free_queue_irqs: 471 while (vector) { 472 vector--; 473 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 474 irq_set_affinity_notifier(irq_num, NULL); 475 irq_set_affinity_hint(irq_num, NULL); 476 free_irq(irq_num, &adapter->q_vectors[vector]); 477 } 478 return err; 479 } 480 481 /** 482 * iavf_request_misc_irq - Initialize MSI-X interrupts 483 * @adapter: board private structure 484 * 485 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 486 * vector is only for the admin queue, and stays active even when the netdev 487 * is closed. 488 **/ 489 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 490 { 491 struct net_device *netdev = adapter->netdev; 492 int err; 493 494 snprintf(adapter->misc_vector_name, 495 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 496 dev_name(&adapter->pdev->dev)); 497 err = request_irq(adapter->msix_entries[0].vector, 498 &iavf_msix_aq, 0, 499 adapter->misc_vector_name, netdev); 500 if (err) { 501 dev_err(&adapter->pdev->dev, 502 "request_irq for %s failed: %d\n", 503 adapter->misc_vector_name, err); 504 free_irq(adapter->msix_entries[0].vector, netdev); 505 } 506 return err; 507 } 508 509 /** 510 * iavf_free_traffic_irqs - Free MSI-X interrupts 511 * @adapter: board private structure 512 * 513 * Frees all MSI-X vectors other than 0. 514 **/ 515 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 516 { 517 int vector, irq_num, q_vectors; 518 519 if (!adapter->msix_entries) 520 return; 521 522 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 523 524 for (vector = 0; vector < q_vectors; vector++) { 525 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 526 irq_set_affinity_notifier(irq_num, NULL); 527 irq_set_affinity_hint(irq_num, NULL); 528 free_irq(irq_num, &adapter->q_vectors[vector]); 529 } 530 } 531 532 /** 533 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 534 * @adapter: board private structure 535 * 536 * Frees MSI-X vector 0. 537 **/ 538 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 539 { 540 struct net_device *netdev = adapter->netdev; 541 542 if (!adapter->msix_entries) 543 return; 544 545 free_irq(adapter->msix_entries[0].vector, netdev); 546 } 547 548 /** 549 * iavf_configure_tx - Configure Transmit Unit after Reset 550 * @adapter: board private structure 551 * 552 * Configure the Tx unit of the MAC after a reset. 553 **/ 554 static void iavf_configure_tx(struct iavf_adapter *adapter) 555 { 556 struct iavf_hw *hw = &adapter->hw; 557 int i; 558 559 for (i = 0; i < adapter->num_active_queues; i++) 560 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 561 } 562 563 /** 564 * iavf_configure_rx - Configure Receive Unit after Reset 565 * @adapter: board private structure 566 * 567 * Configure the Rx unit of the MAC after a reset. 568 **/ 569 static void iavf_configure_rx(struct iavf_adapter *adapter) 570 { 571 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 572 struct iavf_hw *hw = &adapter->hw; 573 int i; 574 575 /* Legacy Rx will always default to a 2048 buffer size. */ 576 #if (PAGE_SIZE < 8192) 577 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 578 struct net_device *netdev = adapter->netdev; 579 580 /* For jumbo frames on systems with 4K pages we have to use 581 * an order 1 page, so we might as well increase the size 582 * of our Rx buffer to make better use of the available space 583 */ 584 rx_buf_len = IAVF_RXBUFFER_3072; 585 586 /* We use a 1536 buffer size for configurations with 587 * standard Ethernet mtu. On x86 this gives us enough room 588 * for shared info and 192 bytes of padding. 589 */ 590 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 591 (netdev->mtu <= ETH_DATA_LEN)) 592 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 593 } 594 #endif 595 596 for (i = 0; i < adapter->num_active_queues; i++) { 597 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 598 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 599 600 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 601 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 602 else 603 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 604 } 605 } 606 607 /** 608 * iavf_find_vlan - Search filter list for specific vlan filter 609 * @adapter: board private structure 610 * @vlan: vlan tag 611 * 612 * Returns ptr to the filter object or NULL. Must be called while holding the 613 * mac_vlan_list_lock. 614 **/ 615 static struct 616 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 617 { 618 struct iavf_vlan_filter *f; 619 620 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 621 if (vlan == f->vlan) 622 return f; 623 } 624 return NULL; 625 } 626 627 /** 628 * iavf_add_vlan - Add a vlan filter to the list 629 * @adapter: board private structure 630 * @vlan: VLAN tag 631 * 632 * Returns ptr to the filter object or NULL when no memory available. 633 **/ 634 static struct 635 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 636 { 637 struct iavf_vlan_filter *f = NULL; 638 639 spin_lock_bh(&adapter->mac_vlan_list_lock); 640 641 f = iavf_find_vlan(adapter, vlan); 642 if (!f) { 643 f = kzalloc(sizeof(*f), GFP_ATOMIC); 644 if (!f) 645 goto clearout; 646 647 f->vlan = vlan; 648 649 list_add_tail(&f->list, &adapter->vlan_filter_list); 650 f->add = true; 651 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 652 } 653 654 clearout: 655 spin_unlock_bh(&adapter->mac_vlan_list_lock); 656 return f; 657 } 658 659 /** 660 * iavf_del_vlan - Remove a vlan filter from the list 661 * @adapter: board private structure 662 * @vlan: VLAN tag 663 **/ 664 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 665 { 666 struct iavf_vlan_filter *f; 667 668 spin_lock_bh(&adapter->mac_vlan_list_lock); 669 670 f = iavf_find_vlan(adapter, vlan); 671 if (f) { 672 f->remove = true; 673 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 674 } 675 676 spin_unlock_bh(&adapter->mac_vlan_list_lock); 677 } 678 679 /** 680 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 681 * @netdev: network device struct 682 * @proto: unused protocol data 683 * @vid: VLAN tag 684 **/ 685 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 686 __always_unused __be16 proto, u16 vid) 687 { 688 struct iavf_adapter *adapter = netdev_priv(netdev); 689 690 if (!VLAN_ALLOWED(adapter)) 691 return -EIO; 692 if (iavf_add_vlan(adapter, vid) == NULL) 693 return -ENOMEM; 694 return 0; 695 } 696 697 /** 698 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 699 * @netdev: network device struct 700 * @proto: unused protocol data 701 * @vid: VLAN tag 702 **/ 703 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 704 __always_unused __be16 proto, u16 vid) 705 { 706 struct iavf_adapter *adapter = netdev_priv(netdev); 707 708 if (VLAN_ALLOWED(adapter)) { 709 iavf_del_vlan(adapter, vid); 710 return 0; 711 } 712 return -EIO; 713 } 714 715 /** 716 * iavf_find_filter - Search filter list for specific mac filter 717 * @adapter: board private structure 718 * @macaddr: the MAC address 719 * 720 * Returns ptr to the filter object or NULL. Must be called while holding the 721 * mac_vlan_list_lock. 722 **/ 723 static struct 724 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 725 const u8 *macaddr) 726 { 727 struct iavf_mac_filter *f; 728 729 if (!macaddr) 730 return NULL; 731 732 list_for_each_entry(f, &adapter->mac_filter_list, list) { 733 if (ether_addr_equal(macaddr, f->macaddr)) 734 return f; 735 } 736 return NULL; 737 } 738 739 /** 740 * iavf_add_filter - Add a mac filter to the filter list 741 * @adapter: board private structure 742 * @macaddr: the MAC address 743 * 744 * Returns ptr to the filter object or NULL when no memory available. 745 **/ 746 static struct 747 iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 748 const u8 *macaddr) 749 { 750 struct iavf_mac_filter *f; 751 752 if (!macaddr) 753 return NULL; 754 755 f = iavf_find_filter(adapter, macaddr); 756 if (!f) { 757 f = kzalloc(sizeof(*f), GFP_ATOMIC); 758 if (!f) 759 return f; 760 761 ether_addr_copy(f->macaddr, macaddr); 762 763 list_add_tail(&f->list, &adapter->mac_filter_list); 764 f->add = true; 765 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 766 } else { 767 f->remove = false; 768 } 769 770 return f; 771 } 772 773 /** 774 * iavf_set_mac - NDO callback to set port mac address 775 * @netdev: network interface device structure 776 * @p: pointer to an address structure 777 * 778 * Returns 0 on success, negative on failure 779 **/ 780 static int iavf_set_mac(struct net_device *netdev, void *p) 781 { 782 struct iavf_adapter *adapter = netdev_priv(netdev); 783 struct iavf_hw *hw = &adapter->hw; 784 struct iavf_mac_filter *f; 785 struct sockaddr *addr = p; 786 787 if (!is_valid_ether_addr(addr->sa_data)) 788 return -EADDRNOTAVAIL; 789 790 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 791 return 0; 792 793 spin_lock_bh(&adapter->mac_vlan_list_lock); 794 795 f = iavf_find_filter(adapter, hw->mac.addr); 796 if (f) { 797 f->remove = true; 798 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 799 } 800 801 f = iavf_add_filter(adapter, addr->sa_data); 802 803 spin_unlock_bh(&adapter->mac_vlan_list_lock); 804 805 if (f) { 806 ether_addr_copy(hw->mac.addr, addr->sa_data); 807 } 808 809 return (f == NULL) ? -ENOMEM : 0; 810 } 811 812 /** 813 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 814 * @netdev: the netdevice 815 * @addr: address to add 816 * 817 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 818 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 819 */ 820 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 821 { 822 struct iavf_adapter *adapter = netdev_priv(netdev); 823 824 if (iavf_add_filter(adapter, addr)) 825 return 0; 826 else 827 return -ENOMEM; 828 } 829 830 /** 831 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 832 * @netdev: the netdevice 833 * @addr: address to add 834 * 835 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 836 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 837 */ 838 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 839 { 840 struct iavf_adapter *adapter = netdev_priv(netdev); 841 struct iavf_mac_filter *f; 842 843 /* Under some circumstances, we might receive a request to delete 844 * our own device address from our uc list. Because we store the 845 * device address in the VSI's MAC/VLAN filter list, we need to ignore 846 * such requests and not delete our device address from this list. 847 */ 848 if (ether_addr_equal(addr, netdev->dev_addr)) 849 return 0; 850 851 f = iavf_find_filter(adapter, addr); 852 if (f) { 853 f->remove = true; 854 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 855 } 856 return 0; 857 } 858 859 /** 860 * iavf_set_rx_mode - NDO callback to set the netdev filters 861 * @netdev: network interface device structure 862 **/ 863 static void iavf_set_rx_mode(struct net_device *netdev) 864 { 865 struct iavf_adapter *adapter = netdev_priv(netdev); 866 867 spin_lock_bh(&adapter->mac_vlan_list_lock); 868 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 869 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 870 spin_unlock_bh(&adapter->mac_vlan_list_lock); 871 872 if (netdev->flags & IFF_PROMISC && 873 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 874 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 875 else if (!(netdev->flags & IFF_PROMISC) && 876 adapter->flags & IAVF_FLAG_PROMISC_ON) 877 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 878 879 if (netdev->flags & IFF_ALLMULTI && 880 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 881 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 882 else if (!(netdev->flags & IFF_ALLMULTI) && 883 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 884 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 885 } 886 887 /** 888 * iavf_napi_enable_all - enable NAPI on all queue vectors 889 * @adapter: board private structure 890 **/ 891 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 892 { 893 int q_idx; 894 struct iavf_q_vector *q_vector; 895 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 896 897 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 898 struct napi_struct *napi; 899 900 q_vector = &adapter->q_vectors[q_idx]; 901 napi = &q_vector->napi; 902 napi_enable(napi); 903 } 904 } 905 906 /** 907 * iavf_napi_disable_all - disable NAPI on all queue vectors 908 * @adapter: board private structure 909 **/ 910 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 911 { 912 int q_idx; 913 struct iavf_q_vector *q_vector; 914 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 915 916 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 917 q_vector = &adapter->q_vectors[q_idx]; 918 napi_disable(&q_vector->napi); 919 } 920 } 921 922 /** 923 * iavf_configure - set up transmit and receive data structures 924 * @adapter: board private structure 925 **/ 926 static void iavf_configure(struct iavf_adapter *adapter) 927 { 928 struct net_device *netdev = adapter->netdev; 929 int i; 930 931 iavf_set_rx_mode(netdev); 932 933 iavf_configure_tx(adapter); 934 iavf_configure_rx(adapter); 935 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 936 937 for (i = 0; i < adapter->num_active_queues; i++) { 938 struct iavf_ring *ring = &adapter->rx_rings[i]; 939 940 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 941 } 942 } 943 944 /** 945 * iavf_up_complete - Finish the last steps of bringing up a connection 946 * @adapter: board private structure 947 * 948 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 949 **/ 950 static void iavf_up_complete(struct iavf_adapter *adapter) 951 { 952 adapter->state = __IAVF_RUNNING; 953 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 954 955 iavf_napi_enable_all(adapter); 956 957 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 958 if (CLIENT_ENABLED(adapter)) 959 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 960 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 961 } 962 963 /** 964 * iavf_down - Shutdown the connection processing 965 * @adapter: board private structure 966 * 967 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 968 **/ 969 void iavf_down(struct iavf_adapter *adapter) 970 { 971 struct net_device *netdev = adapter->netdev; 972 struct iavf_vlan_filter *vlf; 973 struct iavf_mac_filter *f; 974 struct iavf_cloud_filter *cf; 975 976 if (adapter->state <= __IAVF_DOWN_PENDING) 977 return; 978 979 netif_carrier_off(netdev); 980 netif_tx_disable(netdev); 981 adapter->link_up = false; 982 iavf_napi_disable_all(adapter); 983 iavf_irq_disable(adapter); 984 985 spin_lock_bh(&adapter->mac_vlan_list_lock); 986 987 /* clear the sync flag on all filters */ 988 __dev_uc_unsync(adapter->netdev, NULL); 989 __dev_mc_unsync(adapter->netdev, NULL); 990 991 /* remove all MAC filters */ 992 list_for_each_entry(f, &adapter->mac_filter_list, list) { 993 f->remove = true; 994 } 995 996 /* remove all VLAN filters */ 997 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 998 vlf->remove = true; 999 } 1000 1001 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1002 1003 /* remove all cloud filters */ 1004 spin_lock_bh(&adapter->cloud_filter_list_lock); 1005 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1006 cf->del = true; 1007 } 1008 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1009 1010 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1011 adapter->state != __IAVF_RESETTING) { 1012 /* cancel any current operation */ 1013 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1014 /* Schedule operations to close down the HW. Don't wait 1015 * here for this to complete. The watchdog is still running 1016 * and it will take care of this. 1017 */ 1018 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1019 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1020 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1021 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1022 } 1023 1024 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1025 } 1026 1027 /** 1028 * iavf_acquire_msix_vectors - Setup the MSIX capability 1029 * @adapter: board private structure 1030 * @vectors: number of vectors to request 1031 * 1032 * Work with the OS to set up the MSIX vectors needed. 1033 * 1034 * Returns 0 on success, negative on failure 1035 **/ 1036 static int 1037 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1038 { 1039 int err, vector_threshold; 1040 1041 /* We'll want at least 3 (vector_threshold): 1042 * 0) Other (Admin Queue and link, mostly) 1043 * 1) TxQ[0] Cleanup 1044 * 2) RxQ[0] Cleanup 1045 */ 1046 vector_threshold = MIN_MSIX_COUNT; 1047 1048 /* The more we get, the more we will assign to Tx/Rx Cleanup 1049 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1050 * Right now, we simply care about how many we'll get; we'll 1051 * set them up later while requesting irq's. 1052 */ 1053 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1054 vector_threshold, vectors); 1055 if (err < 0) { 1056 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1057 kfree(adapter->msix_entries); 1058 adapter->msix_entries = NULL; 1059 return err; 1060 } 1061 1062 /* Adjust for only the vectors we'll use, which is minimum 1063 * of max_msix_q_vectors + NONQ_VECS, or the number of 1064 * vectors we were allocated. 1065 */ 1066 adapter->num_msix_vectors = err; 1067 return 0; 1068 } 1069 1070 /** 1071 * iavf_free_queues - Free memory for all rings 1072 * @adapter: board private structure to initialize 1073 * 1074 * Free all of the memory associated with queue pairs. 1075 **/ 1076 static void iavf_free_queues(struct iavf_adapter *adapter) 1077 { 1078 if (!adapter->vsi_res) 1079 return; 1080 adapter->num_active_queues = 0; 1081 kfree(adapter->tx_rings); 1082 adapter->tx_rings = NULL; 1083 kfree(adapter->rx_rings); 1084 adapter->rx_rings = NULL; 1085 } 1086 1087 /** 1088 * iavf_alloc_queues - Allocate memory for all rings 1089 * @adapter: board private structure to initialize 1090 * 1091 * We allocate one ring per queue at run-time since we don't know the 1092 * number of queues at compile-time. The polling_netdev array is 1093 * intended for Multiqueue, but should work fine with a single queue. 1094 **/ 1095 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1096 { 1097 int i, num_active_queues; 1098 1099 /* If we're in reset reallocating queues we don't actually know yet for 1100 * certain the PF gave us the number of queues we asked for but we'll 1101 * assume it did. Once basic reset is finished we'll confirm once we 1102 * start negotiating config with PF. 1103 */ 1104 if (adapter->num_req_queues) 1105 num_active_queues = adapter->num_req_queues; 1106 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1107 adapter->num_tc) 1108 num_active_queues = adapter->ch_config.total_qps; 1109 else 1110 num_active_queues = min_t(int, 1111 adapter->vsi_res->num_queue_pairs, 1112 (int)(num_online_cpus())); 1113 1114 1115 adapter->tx_rings = kcalloc(num_active_queues, 1116 sizeof(struct iavf_ring), GFP_KERNEL); 1117 if (!adapter->tx_rings) 1118 goto err_out; 1119 adapter->rx_rings = kcalloc(num_active_queues, 1120 sizeof(struct iavf_ring), GFP_KERNEL); 1121 if (!adapter->rx_rings) 1122 goto err_out; 1123 1124 for (i = 0; i < num_active_queues; i++) { 1125 struct iavf_ring *tx_ring; 1126 struct iavf_ring *rx_ring; 1127 1128 tx_ring = &adapter->tx_rings[i]; 1129 1130 tx_ring->queue_index = i; 1131 tx_ring->netdev = adapter->netdev; 1132 tx_ring->dev = &adapter->pdev->dev; 1133 tx_ring->count = adapter->tx_desc_count; 1134 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1135 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1136 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1137 1138 rx_ring = &adapter->rx_rings[i]; 1139 rx_ring->queue_index = i; 1140 rx_ring->netdev = adapter->netdev; 1141 rx_ring->dev = &adapter->pdev->dev; 1142 rx_ring->count = adapter->rx_desc_count; 1143 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1144 } 1145 1146 adapter->num_active_queues = num_active_queues; 1147 1148 return 0; 1149 1150 err_out: 1151 iavf_free_queues(adapter); 1152 return -ENOMEM; 1153 } 1154 1155 /** 1156 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1157 * @adapter: board private structure to initialize 1158 * 1159 * Attempt to configure the interrupts using the best available 1160 * capabilities of the hardware and the kernel. 1161 **/ 1162 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1163 { 1164 int vector, v_budget; 1165 int pairs = 0; 1166 int err = 0; 1167 1168 if (!adapter->vsi_res) { 1169 err = -EIO; 1170 goto out; 1171 } 1172 pairs = adapter->num_active_queues; 1173 1174 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1175 * us much good if we have more vectors than CPUs. However, we already 1176 * limit the total number of queues by the number of CPUs so we do not 1177 * need any further limiting here. 1178 */ 1179 v_budget = min_t(int, pairs + NONQ_VECS, 1180 (int)adapter->vf_res->max_vectors); 1181 1182 adapter->msix_entries = kcalloc(v_budget, 1183 sizeof(struct msix_entry), GFP_KERNEL); 1184 if (!adapter->msix_entries) { 1185 err = -ENOMEM; 1186 goto out; 1187 } 1188 1189 for (vector = 0; vector < v_budget; vector++) 1190 adapter->msix_entries[vector].entry = vector; 1191 1192 err = iavf_acquire_msix_vectors(adapter, v_budget); 1193 1194 out: 1195 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1196 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1197 return err; 1198 } 1199 1200 /** 1201 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1202 * @adapter: board private structure 1203 * 1204 * Return 0 on success, negative on failure 1205 **/ 1206 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1207 { 1208 struct iavf_aqc_get_set_rss_key_data *rss_key = 1209 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1210 struct iavf_hw *hw = &adapter->hw; 1211 int ret = 0; 1212 1213 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1214 /* bail because we already have a command pending */ 1215 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1216 adapter->current_op); 1217 return -EBUSY; 1218 } 1219 1220 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1221 if (ret) { 1222 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1223 iavf_stat_str(hw, ret), 1224 iavf_aq_str(hw, hw->aq.asq_last_status)); 1225 return ret; 1226 1227 } 1228 1229 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1230 adapter->rss_lut, adapter->rss_lut_size); 1231 if (ret) { 1232 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1233 iavf_stat_str(hw, ret), 1234 iavf_aq_str(hw, hw->aq.asq_last_status)); 1235 } 1236 1237 return ret; 1238 1239 } 1240 1241 /** 1242 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1243 * @adapter: board private structure 1244 * 1245 * Returns 0 on success, negative on failure 1246 **/ 1247 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1248 { 1249 struct iavf_hw *hw = &adapter->hw; 1250 u32 *dw; 1251 u16 i; 1252 1253 dw = (u32 *)adapter->rss_key; 1254 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1255 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1256 1257 dw = (u32 *)adapter->rss_lut; 1258 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1259 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1260 1261 iavf_flush(hw); 1262 1263 return 0; 1264 } 1265 1266 /** 1267 * iavf_config_rss - Configure RSS keys and lut 1268 * @adapter: board private structure 1269 * 1270 * Returns 0 on success, negative on failure 1271 **/ 1272 int iavf_config_rss(struct iavf_adapter *adapter) 1273 { 1274 1275 if (RSS_PF(adapter)) { 1276 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1277 IAVF_FLAG_AQ_SET_RSS_KEY; 1278 return 0; 1279 } else if (RSS_AQ(adapter)) { 1280 return iavf_config_rss_aq(adapter); 1281 } else { 1282 return iavf_config_rss_reg(adapter); 1283 } 1284 } 1285 1286 /** 1287 * iavf_fill_rss_lut - Fill the lut with default values 1288 * @adapter: board private structure 1289 **/ 1290 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1291 { 1292 u16 i; 1293 1294 for (i = 0; i < adapter->rss_lut_size; i++) 1295 adapter->rss_lut[i] = i % adapter->num_active_queues; 1296 } 1297 1298 /** 1299 * iavf_init_rss - Prepare for RSS 1300 * @adapter: board private structure 1301 * 1302 * Return 0 on success, negative on failure 1303 **/ 1304 static int iavf_init_rss(struct iavf_adapter *adapter) 1305 { 1306 struct iavf_hw *hw = &adapter->hw; 1307 int ret; 1308 1309 if (!RSS_PF(adapter)) { 1310 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1311 if (adapter->vf_res->vf_cap_flags & 1312 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1313 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1314 else 1315 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1316 1317 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1318 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1319 } 1320 1321 iavf_fill_rss_lut(adapter); 1322 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1323 ret = iavf_config_rss(adapter); 1324 1325 return ret; 1326 } 1327 1328 /** 1329 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1330 * @adapter: board private structure to initialize 1331 * 1332 * We allocate one q_vector per queue interrupt. If allocation fails we 1333 * return -ENOMEM. 1334 **/ 1335 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1336 { 1337 int q_idx = 0, num_q_vectors; 1338 struct iavf_q_vector *q_vector; 1339 1340 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1341 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1342 GFP_KERNEL); 1343 if (!adapter->q_vectors) 1344 return -ENOMEM; 1345 1346 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1347 q_vector = &adapter->q_vectors[q_idx]; 1348 q_vector->adapter = adapter; 1349 q_vector->vsi = &adapter->vsi; 1350 q_vector->v_idx = q_idx; 1351 q_vector->reg_idx = q_idx; 1352 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1353 netif_napi_add(adapter->netdev, &q_vector->napi, 1354 iavf_napi_poll, NAPI_POLL_WEIGHT); 1355 } 1356 1357 return 0; 1358 } 1359 1360 /** 1361 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1362 * @adapter: board private structure to initialize 1363 * 1364 * This function frees the memory allocated to the q_vectors. In addition if 1365 * NAPI is enabled it will delete any references to the NAPI struct prior 1366 * to freeing the q_vector. 1367 **/ 1368 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1369 { 1370 int q_idx, num_q_vectors; 1371 int napi_vectors; 1372 1373 if (!adapter->q_vectors) 1374 return; 1375 1376 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1377 napi_vectors = adapter->num_active_queues; 1378 1379 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1380 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1381 1382 if (q_idx < napi_vectors) 1383 netif_napi_del(&q_vector->napi); 1384 } 1385 kfree(adapter->q_vectors); 1386 adapter->q_vectors = NULL; 1387 } 1388 1389 /** 1390 * iavf_reset_interrupt_capability - Reset MSIX setup 1391 * @adapter: board private structure 1392 * 1393 **/ 1394 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1395 { 1396 if (!adapter->msix_entries) 1397 return; 1398 1399 pci_disable_msix(adapter->pdev); 1400 kfree(adapter->msix_entries); 1401 adapter->msix_entries = NULL; 1402 } 1403 1404 /** 1405 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1406 * @adapter: board private structure to initialize 1407 * 1408 **/ 1409 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1410 { 1411 int err; 1412 1413 err = iavf_alloc_queues(adapter); 1414 if (err) { 1415 dev_err(&adapter->pdev->dev, 1416 "Unable to allocate memory for queues\n"); 1417 goto err_alloc_queues; 1418 } 1419 1420 rtnl_lock(); 1421 err = iavf_set_interrupt_capability(adapter); 1422 rtnl_unlock(); 1423 if (err) { 1424 dev_err(&adapter->pdev->dev, 1425 "Unable to setup interrupt capabilities\n"); 1426 goto err_set_interrupt; 1427 } 1428 1429 err = iavf_alloc_q_vectors(adapter); 1430 if (err) { 1431 dev_err(&adapter->pdev->dev, 1432 "Unable to allocate memory for queue vectors\n"); 1433 goto err_alloc_q_vectors; 1434 } 1435 1436 /* If we've made it so far while ADq flag being ON, then we haven't 1437 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1438 * resources have been allocated in the reset path. 1439 * Now we can truly claim that ADq is enabled. 1440 */ 1441 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1442 adapter->num_tc) 1443 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1444 adapter->num_tc); 1445 1446 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1447 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1448 adapter->num_active_queues); 1449 1450 return 0; 1451 err_alloc_q_vectors: 1452 iavf_reset_interrupt_capability(adapter); 1453 err_set_interrupt: 1454 iavf_free_queues(adapter); 1455 err_alloc_queues: 1456 return err; 1457 } 1458 1459 /** 1460 * iavf_free_rss - Free memory used by RSS structs 1461 * @adapter: board private structure 1462 **/ 1463 static void iavf_free_rss(struct iavf_adapter *adapter) 1464 { 1465 kfree(adapter->rss_key); 1466 adapter->rss_key = NULL; 1467 1468 kfree(adapter->rss_lut); 1469 adapter->rss_lut = NULL; 1470 } 1471 1472 /** 1473 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1474 * @adapter: board private structure 1475 * 1476 * Returns 0 on success, negative on failure 1477 **/ 1478 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1479 { 1480 struct net_device *netdev = adapter->netdev; 1481 int err; 1482 1483 if (netif_running(netdev)) 1484 iavf_free_traffic_irqs(adapter); 1485 iavf_free_misc_irq(adapter); 1486 iavf_reset_interrupt_capability(adapter); 1487 iavf_free_q_vectors(adapter); 1488 iavf_free_queues(adapter); 1489 1490 err = iavf_init_interrupt_scheme(adapter); 1491 if (err) 1492 goto err; 1493 1494 netif_tx_stop_all_queues(netdev); 1495 1496 err = iavf_request_misc_irq(adapter); 1497 if (err) 1498 goto err; 1499 1500 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1501 1502 iavf_map_rings_to_vectors(adapter); 1503 1504 if (RSS_AQ(adapter)) 1505 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1506 else 1507 err = iavf_init_rss(adapter); 1508 err: 1509 return err; 1510 } 1511 1512 /** 1513 * iavf_process_aq_command - process aq_required flags 1514 * and sends aq command 1515 * @adapter: pointer to iavf adapter structure 1516 * 1517 * Returns 0 on success 1518 * Returns error code if no command was sent 1519 * or error code if the command failed. 1520 **/ 1521 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1522 { 1523 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1524 return iavf_send_vf_config_msg(adapter); 1525 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1526 iavf_disable_queues(adapter); 1527 return 0; 1528 } 1529 1530 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1531 iavf_map_queues(adapter); 1532 return 0; 1533 } 1534 1535 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1536 iavf_add_ether_addrs(adapter); 1537 return 0; 1538 } 1539 1540 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1541 iavf_add_vlans(adapter); 1542 return 0; 1543 } 1544 1545 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1546 iavf_del_ether_addrs(adapter); 1547 return 0; 1548 } 1549 1550 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1551 iavf_del_vlans(adapter); 1552 return 0; 1553 } 1554 1555 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1556 iavf_enable_vlan_stripping(adapter); 1557 return 0; 1558 } 1559 1560 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1561 iavf_disable_vlan_stripping(adapter); 1562 return 0; 1563 } 1564 1565 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1566 iavf_configure_queues(adapter); 1567 return 0; 1568 } 1569 1570 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1571 iavf_enable_queues(adapter); 1572 return 0; 1573 } 1574 1575 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1576 /* This message goes straight to the firmware, not the 1577 * PF, so we don't have to set current_op as we will 1578 * not get a response through the ARQ. 1579 */ 1580 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1581 return 0; 1582 } 1583 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1584 iavf_get_hena(adapter); 1585 return 0; 1586 } 1587 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1588 iavf_set_hena(adapter); 1589 return 0; 1590 } 1591 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1592 iavf_set_rss_key(adapter); 1593 return 0; 1594 } 1595 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1596 iavf_set_rss_lut(adapter); 1597 return 0; 1598 } 1599 1600 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1601 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1602 FLAG_VF_MULTICAST_PROMISC); 1603 return 0; 1604 } 1605 1606 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1607 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1608 return 0; 1609 } 1610 1611 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1612 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1613 iavf_set_promiscuous(adapter, 0); 1614 return 0; 1615 } 1616 1617 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1618 iavf_enable_channels(adapter); 1619 return 0; 1620 } 1621 1622 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1623 iavf_disable_channels(adapter); 1624 return 0; 1625 } 1626 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1627 iavf_add_cloud_filter(adapter); 1628 return 0; 1629 } 1630 1631 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1632 iavf_del_cloud_filter(adapter); 1633 return 0; 1634 } 1635 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1636 iavf_del_cloud_filter(adapter); 1637 return 0; 1638 } 1639 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1640 iavf_add_cloud_filter(adapter); 1641 return 0; 1642 } 1643 return -EAGAIN; 1644 } 1645 1646 /** 1647 * iavf_startup - first step of driver startup 1648 * @adapter: board private structure 1649 * 1650 * Function process __IAVF_STARTUP driver state. 1651 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1652 * when fails it returns -EAGAIN 1653 **/ 1654 static int iavf_startup(struct iavf_adapter *adapter) 1655 { 1656 struct pci_dev *pdev = adapter->pdev; 1657 struct iavf_hw *hw = &adapter->hw; 1658 int err; 1659 1660 WARN_ON(adapter->state != __IAVF_STARTUP); 1661 1662 /* driver loaded, probe complete */ 1663 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1664 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1665 err = iavf_set_mac_type(hw); 1666 if (err) { 1667 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1668 goto err; 1669 } 1670 1671 err = iavf_check_reset_complete(hw); 1672 if (err) { 1673 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1674 err); 1675 goto err; 1676 } 1677 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1678 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1679 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1680 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1681 1682 err = iavf_init_adminq(hw); 1683 if (err) { 1684 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1685 goto err; 1686 } 1687 err = iavf_send_api_ver(adapter); 1688 if (err) { 1689 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1690 iavf_shutdown_adminq(hw); 1691 goto err; 1692 } 1693 adapter->state = __IAVF_INIT_VERSION_CHECK; 1694 err: 1695 return err; 1696 } 1697 1698 /** 1699 * iavf_init_version_check - second step of driver startup 1700 * @adapter: board private structure 1701 * 1702 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1703 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1704 * when fails it returns -EAGAIN 1705 **/ 1706 static int iavf_init_version_check(struct iavf_adapter *adapter) 1707 { 1708 struct pci_dev *pdev = adapter->pdev; 1709 struct iavf_hw *hw = &adapter->hw; 1710 int err = -EAGAIN; 1711 1712 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1713 1714 if (!iavf_asq_done(hw)) { 1715 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1716 iavf_shutdown_adminq(hw); 1717 adapter->state = __IAVF_STARTUP; 1718 goto err; 1719 } 1720 1721 /* aq msg sent, awaiting reply */ 1722 err = iavf_verify_api_ver(adapter); 1723 if (err) { 1724 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1725 err = iavf_send_api_ver(adapter); 1726 else 1727 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1728 adapter->pf_version.major, 1729 adapter->pf_version.minor, 1730 VIRTCHNL_VERSION_MAJOR, 1731 VIRTCHNL_VERSION_MINOR); 1732 goto err; 1733 } 1734 err = iavf_send_vf_config_msg(adapter); 1735 if (err) { 1736 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1737 err); 1738 goto err; 1739 } 1740 adapter->state = __IAVF_INIT_GET_RESOURCES; 1741 1742 err: 1743 return err; 1744 } 1745 1746 /** 1747 * iavf_init_get_resources - third step of driver startup 1748 * @adapter: board private structure 1749 * 1750 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1751 * finishes driver initialization procedure. 1752 * When success the state is changed to __IAVF_DOWN 1753 * when fails it returns -EAGAIN 1754 **/ 1755 static int iavf_init_get_resources(struct iavf_adapter *adapter) 1756 { 1757 struct net_device *netdev = adapter->netdev; 1758 struct pci_dev *pdev = adapter->pdev; 1759 struct iavf_hw *hw = &adapter->hw; 1760 int err = 0, bufsz; 1761 1762 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1763 /* aq msg sent, awaiting reply */ 1764 if (!adapter->vf_res) { 1765 bufsz = sizeof(struct virtchnl_vf_resource) + 1766 (IAVF_MAX_VF_VSI * 1767 sizeof(struct virtchnl_vsi_resource)); 1768 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); 1769 if (!adapter->vf_res) 1770 goto err; 1771 } 1772 err = iavf_get_vf_config(adapter); 1773 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1774 err = iavf_send_vf_config_msg(adapter); 1775 goto err; 1776 } else if (err == IAVF_ERR_PARAM) { 1777 /* We only get ERR_PARAM if the device is in a very bad 1778 * state or if we've been disabled for previous bad 1779 * behavior. Either way, we're done now. 1780 */ 1781 iavf_shutdown_adminq(hw); 1782 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1783 return 0; 1784 } 1785 if (err) { 1786 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1787 goto err_alloc; 1788 } 1789 1790 if (iavf_process_config(adapter)) 1791 goto err_alloc; 1792 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1793 1794 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1795 1796 netdev->netdev_ops = &iavf_netdev_ops; 1797 iavf_set_ethtool_ops(netdev); 1798 netdev->watchdog_timeo = 5 * HZ; 1799 1800 /* MTU range: 68 - 9710 */ 1801 netdev->min_mtu = ETH_MIN_MTU; 1802 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1803 1804 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1805 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1806 adapter->hw.mac.addr); 1807 eth_hw_addr_random(netdev); 1808 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1809 } else { 1810 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 1811 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1812 } 1813 1814 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1815 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1816 err = iavf_init_interrupt_scheme(adapter); 1817 if (err) 1818 goto err_sw_init; 1819 iavf_map_rings_to_vectors(adapter); 1820 if (adapter->vf_res->vf_cap_flags & 1821 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1822 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1823 1824 err = iavf_request_misc_irq(adapter); 1825 if (err) 1826 goto err_sw_init; 1827 1828 netif_carrier_off(netdev); 1829 adapter->link_up = false; 1830 1831 /* set the semaphore to prevent any callbacks after device registration 1832 * up to time when state of driver will be set to __IAVF_DOWN 1833 */ 1834 rtnl_lock(); 1835 if (!adapter->netdev_registered) { 1836 err = register_netdevice(netdev); 1837 if (err) { 1838 rtnl_unlock(); 1839 goto err_register; 1840 } 1841 } 1842 1843 adapter->netdev_registered = true; 1844 1845 netif_tx_stop_all_queues(netdev); 1846 if (CLIENT_ALLOWED(adapter)) { 1847 err = iavf_lan_add_device(adapter); 1848 if (err) { 1849 rtnl_unlock(); 1850 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1851 err); 1852 } 1853 } 1854 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1855 if (netdev->features & NETIF_F_GRO) 1856 dev_info(&pdev->dev, "GRO is enabled\n"); 1857 1858 adapter->state = __IAVF_DOWN; 1859 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1860 rtnl_unlock(); 1861 1862 iavf_misc_irq_enable(adapter); 1863 wake_up(&adapter->down_waitqueue); 1864 1865 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1866 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1867 if (!adapter->rss_key || !adapter->rss_lut) 1868 goto err_mem; 1869 if (RSS_AQ(adapter)) 1870 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1871 else 1872 iavf_init_rss(adapter); 1873 1874 return err; 1875 err_mem: 1876 iavf_free_rss(adapter); 1877 err_register: 1878 iavf_free_misc_irq(adapter); 1879 err_sw_init: 1880 iavf_reset_interrupt_capability(adapter); 1881 err_alloc: 1882 kfree(adapter->vf_res); 1883 adapter->vf_res = NULL; 1884 err: 1885 return err; 1886 } 1887 1888 /** 1889 * iavf_watchdog_task - Periodic call-back task 1890 * @work: pointer to work_struct 1891 **/ 1892 static void iavf_watchdog_task(struct work_struct *work) 1893 { 1894 struct iavf_adapter *adapter = container_of(work, 1895 struct iavf_adapter, 1896 watchdog_task.work); 1897 struct iavf_hw *hw = &adapter->hw; 1898 u32 reg_val; 1899 1900 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1901 goto restart_watchdog; 1902 1903 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1904 adapter->state = __IAVF_COMM_FAILED; 1905 1906 switch (adapter->state) { 1907 case __IAVF_COMM_FAILED: 1908 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 1909 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1910 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 1911 reg_val == VIRTCHNL_VFR_COMPLETED) { 1912 /* A chance for redemption! */ 1913 dev_err(&adapter->pdev->dev, 1914 "Hardware came out of reset. Attempting reinit.\n"); 1915 adapter->state = __IAVF_STARTUP; 1916 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1917 queue_delayed_work(iavf_wq, &adapter->init_task, 10); 1918 clear_bit(__IAVF_IN_CRITICAL_TASK, 1919 &adapter->crit_section); 1920 /* Don't reschedule the watchdog, since we've restarted 1921 * the init task. When init_task contacts the PF and 1922 * gets everything set up again, it'll restart the 1923 * watchdog for us. Down, boy. Sit. Stay. Woof. 1924 */ 1925 return; 1926 } 1927 adapter->aq_required = 0; 1928 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1929 clear_bit(__IAVF_IN_CRITICAL_TASK, 1930 &adapter->crit_section); 1931 queue_delayed_work(iavf_wq, 1932 &adapter->watchdog_task, 1933 msecs_to_jiffies(10)); 1934 goto watchdog_done; 1935 case __IAVF_RESETTING: 1936 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1937 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1938 return; 1939 case __IAVF_DOWN: 1940 case __IAVF_DOWN_PENDING: 1941 case __IAVF_TESTING: 1942 case __IAVF_RUNNING: 1943 if (adapter->current_op) { 1944 if (!iavf_asq_done(hw)) { 1945 dev_dbg(&adapter->pdev->dev, 1946 "Admin queue timeout\n"); 1947 iavf_send_api_ver(adapter); 1948 } 1949 } else { 1950 if (!iavf_process_aq_command(adapter) && 1951 adapter->state == __IAVF_RUNNING) 1952 iavf_request_stats(adapter); 1953 } 1954 break; 1955 case __IAVF_REMOVE: 1956 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1957 return; 1958 default: 1959 goto restart_watchdog; 1960 } 1961 1962 /* check for hw reset */ 1963 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 1964 if (!reg_val) { 1965 adapter->state = __IAVF_RESETTING; 1966 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1967 adapter->aq_required = 0; 1968 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1969 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1970 queue_work(iavf_wq, &adapter->reset_task); 1971 goto watchdog_done; 1972 } 1973 1974 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 1975 watchdog_done: 1976 if (adapter->state == __IAVF_RUNNING || 1977 adapter->state == __IAVF_COMM_FAILED) 1978 iavf_detect_recover_hung(&adapter->vsi); 1979 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1980 restart_watchdog: 1981 if (adapter->aq_required) 1982 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 1983 msecs_to_jiffies(20)); 1984 else 1985 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1986 queue_work(iavf_wq, &adapter->adminq_task); 1987 } 1988 1989 static void iavf_disable_vf(struct iavf_adapter *adapter) 1990 { 1991 struct iavf_mac_filter *f, *ftmp; 1992 struct iavf_vlan_filter *fv, *fvtmp; 1993 struct iavf_cloud_filter *cf, *cftmp; 1994 1995 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 1996 1997 /* We don't use netif_running() because it may be true prior to 1998 * ndo_open() returning, so we can't assume it means all our open 1999 * tasks have finished, since we're not holding the rtnl_lock here. 2000 */ 2001 if (adapter->state == __IAVF_RUNNING) { 2002 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2003 netif_carrier_off(adapter->netdev); 2004 netif_tx_disable(adapter->netdev); 2005 adapter->link_up = false; 2006 iavf_napi_disable_all(adapter); 2007 iavf_irq_disable(adapter); 2008 iavf_free_traffic_irqs(adapter); 2009 iavf_free_all_tx_resources(adapter); 2010 iavf_free_all_rx_resources(adapter); 2011 } 2012 2013 spin_lock_bh(&adapter->mac_vlan_list_lock); 2014 2015 /* Delete all of the filters */ 2016 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2017 list_del(&f->list); 2018 kfree(f); 2019 } 2020 2021 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2022 list_del(&fv->list); 2023 kfree(fv); 2024 } 2025 2026 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2027 2028 spin_lock_bh(&adapter->cloud_filter_list_lock); 2029 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2030 list_del(&cf->list); 2031 kfree(cf); 2032 adapter->num_cloud_filters--; 2033 } 2034 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2035 2036 iavf_free_misc_irq(adapter); 2037 iavf_reset_interrupt_capability(adapter); 2038 iavf_free_queues(adapter); 2039 iavf_free_q_vectors(adapter); 2040 kfree(adapter->vf_res); 2041 iavf_shutdown_adminq(&adapter->hw); 2042 adapter->netdev->flags &= ~IFF_UP; 2043 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2044 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2045 adapter->state = __IAVF_DOWN; 2046 wake_up(&adapter->down_waitqueue); 2047 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2048 } 2049 2050 #define IAVF_RESET_WAIT_MS 10 2051 #define IAVF_RESET_WAIT_COUNT 500 2052 /** 2053 * iavf_reset_task - Call-back task to handle hardware reset 2054 * @work: pointer to work_struct 2055 * 2056 * During reset we need to shut down and reinitialize the admin queue 2057 * before we can use it to communicate with the PF again. We also clear 2058 * and reinit the rings because that context is lost as well. 2059 **/ 2060 static void iavf_reset_task(struct work_struct *work) 2061 { 2062 struct iavf_adapter *adapter = container_of(work, 2063 struct iavf_adapter, 2064 reset_task); 2065 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2066 struct net_device *netdev = adapter->netdev; 2067 struct iavf_hw *hw = &adapter->hw; 2068 struct iavf_vlan_filter *vlf; 2069 struct iavf_cloud_filter *cf; 2070 struct iavf_mac_filter *f; 2071 u32 reg_val; 2072 int i = 0, err; 2073 bool running; 2074 2075 /* When device is being removed it doesn't make sense to run the reset 2076 * task, just return in such a case. 2077 */ 2078 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 2079 return; 2080 2081 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, 2082 &adapter->crit_section)) 2083 usleep_range(500, 1000); 2084 if (CLIENT_ENABLED(adapter)) { 2085 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2086 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2087 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2088 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2089 cancel_delayed_work_sync(&adapter->client_task); 2090 iavf_notify_client_close(&adapter->vsi, true); 2091 } 2092 iavf_misc_irq_disable(adapter); 2093 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2094 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2095 /* Restart the AQ here. If we have been reset but didn't 2096 * detect it, or if the PF had to reinit, our AQ will be hosed. 2097 */ 2098 iavf_shutdown_adminq(hw); 2099 iavf_init_adminq(hw); 2100 iavf_request_reset(adapter); 2101 } 2102 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2103 2104 /* poll until we see the reset actually happen */ 2105 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { 2106 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2107 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2108 if (!reg_val) 2109 break; 2110 usleep_range(5000, 10000); 2111 } 2112 if (i == IAVF_RESET_WAIT_COUNT) { 2113 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2114 goto continue_reset; /* act like the reset happened */ 2115 } 2116 2117 /* wait until the reset is complete and the PF is responding to us */ 2118 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { 2119 /* sleep first to make sure a minimum wait time is met */ 2120 msleep(IAVF_RESET_WAIT_MS); 2121 2122 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2123 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2124 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2125 break; 2126 } 2127 2128 pci_set_master(adapter->pdev); 2129 2130 if (i == IAVF_RESET_WAIT_COUNT) { 2131 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2132 reg_val); 2133 iavf_disable_vf(adapter); 2134 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2135 return; /* Do not attempt to reinit. It's dead, Jim. */ 2136 } 2137 2138 continue_reset: 2139 /* We don't use netif_running() because it may be true prior to 2140 * ndo_open() returning, so we can't assume it means all our open 2141 * tasks have finished, since we're not holding the rtnl_lock here. 2142 */ 2143 running = ((adapter->state == __IAVF_RUNNING) || 2144 (adapter->state == __IAVF_RESETTING)); 2145 2146 if (running) { 2147 netif_carrier_off(netdev); 2148 netif_tx_stop_all_queues(netdev); 2149 adapter->link_up = false; 2150 iavf_napi_disable_all(adapter); 2151 } 2152 iavf_irq_disable(adapter); 2153 2154 adapter->state = __IAVF_RESETTING; 2155 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2156 2157 /* free the Tx/Rx rings and descriptors, might be better to just 2158 * re-use them sometime in the future 2159 */ 2160 iavf_free_all_rx_resources(adapter); 2161 iavf_free_all_tx_resources(adapter); 2162 2163 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2164 /* kill and reinit the admin queue */ 2165 iavf_shutdown_adminq(hw); 2166 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2167 err = iavf_init_adminq(hw); 2168 if (err) 2169 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2170 err); 2171 adapter->aq_required = 0; 2172 2173 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2174 err = iavf_reinit_interrupt_scheme(adapter); 2175 if (err) 2176 goto reset_err; 2177 } 2178 2179 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2180 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2181 2182 spin_lock_bh(&adapter->mac_vlan_list_lock); 2183 2184 /* re-add all MAC filters */ 2185 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2186 f->add = true; 2187 } 2188 /* re-add all VLAN filters */ 2189 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2190 vlf->add = true; 2191 } 2192 2193 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2194 2195 /* check if TCs are running and re-add all cloud filters */ 2196 spin_lock_bh(&adapter->cloud_filter_list_lock); 2197 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2198 adapter->num_tc) { 2199 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2200 cf->add = true; 2201 } 2202 } 2203 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2204 2205 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2206 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2207 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2208 iavf_misc_irq_enable(adapter); 2209 2210 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2211 2212 /* We were running when the reset started, so we need to restore some 2213 * state here. 2214 */ 2215 if (running) { 2216 /* allocate transmit descriptors */ 2217 err = iavf_setup_all_tx_resources(adapter); 2218 if (err) 2219 goto reset_err; 2220 2221 /* allocate receive descriptors */ 2222 err = iavf_setup_all_rx_resources(adapter); 2223 if (err) 2224 goto reset_err; 2225 2226 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2227 err = iavf_request_traffic_irqs(adapter, netdev->name); 2228 if (err) 2229 goto reset_err; 2230 2231 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2232 } 2233 2234 iavf_configure(adapter); 2235 2236 iavf_up_complete(adapter); 2237 2238 iavf_irq_enable(adapter, true); 2239 } else { 2240 adapter->state = __IAVF_DOWN; 2241 wake_up(&adapter->down_waitqueue); 2242 } 2243 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2244 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2245 2246 return; 2247 reset_err: 2248 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2249 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2250 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2251 iavf_close(netdev); 2252 } 2253 2254 /** 2255 * iavf_adminq_task - worker thread to clean the admin queue 2256 * @work: pointer to work_struct containing our data 2257 **/ 2258 static void iavf_adminq_task(struct work_struct *work) 2259 { 2260 struct iavf_adapter *adapter = 2261 container_of(work, struct iavf_adapter, adminq_task); 2262 struct iavf_hw *hw = &adapter->hw; 2263 struct iavf_arq_event_info event; 2264 enum virtchnl_ops v_op; 2265 enum iavf_status ret, v_ret; 2266 u32 val, oldval; 2267 u16 pending; 2268 2269 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2270 goto out; 2271 2272 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2273 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2274 if (!event.msg_buf) 2275 goto out; 2276 2277 do { 2278 ret = iavf_clean_arq_element(hw, &event, &pending); 2279 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2280 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2281 2282 if (ret || !v_op) 2283 break; /* No event to process or error cleaning ARQ */ 2284 2285 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2286 event.msg_len); 2287 if (pending != 0) 2288 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2289 } while (pending); 2290 2291 if ((adapter->flags & 2292 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2293 adapter->state == __IAVF_RESETTING) 2294 goto freedom; 2295 2296 /* check for error indications */ 2297 val = rd32(hw, hw->aq.arq.len); 2298 if (val == 0xdeadbeef) /* indicates device in reset */ 2299 goto freedom; 2300 oldval = val; 2301 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2302 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2303 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2304 } 2305 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2306 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2307 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2308 } 2309 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2310 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2311 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2312 } 2313 if (oldval != val) 2314 wr32(hw, hw->aq.arq.len, val); 2315 2316 val = rd32(hw, hw->aq.asq.len); 2317 oldval = val; 2318 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2319 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2320 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2321 } 2322 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2323 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2324 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2325 } 2326 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2327 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2328 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2329 } 2330 if (oldval != val) 2331 wr32(hw, hw->aq.asq.len, val); 2332 2333 freedom: 2334 kfree(event.msg_buf); 2335 out: 2336 /* re-enable Admin queue interrupt cause */ 2337 iavf_misc_irq_enable(adapter); 2338 } 2339 2340 /** 2341 * iavf_client_task - worker thread to perform client work 2342 * @work: pointer to work_struct containing our data 2343 * 2344 * This task handles client interactions. Because client calls can be 2345 * reentrant, we can't handle them in the watchdog. 2346 **/ 2347 static void iavf_client_task(struct work_struct *work) 2348 { 2349 struct iavf_adapter *adapter = 2350 container_of(work, struct iavf_adapter, client_task.work); 2351 2352 /* If we can't get the client bit, just give up. We'll be rescheduled 2353 * later. 2354 */ 2355 2356 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) 2357 return; 2358 2359 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2360 iavf_client_subtask(adapter); 2361 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2362 goto out; 2363 } 2364 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2365 iavf_notify_client_l2_params(&adapter->vsi); 2366 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2367 goto out; 2368 } 2369 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2370 iavf_notify_client_close(&adapter->vsi, false); 2371 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2372 goto out; 2373 } 2374 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2375 iavf_notify_client_open(&adapter->vsi); 2376 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2377 } 2378 out: 2379 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2380 } 2381 2382 /** 2383 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2384 * @adapter: board private structure 2385 * 2386 * Free all transmit software resources 2387 **/ 2388 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2389 { 2390 int i; 2391 2392 if (!adapter->tx_rings) 2393 return; 2394 2395 for (i = 0; i < adapter->num_active_queues; i++) 2396 if (adapter->tx_rings[i].desc) 2397 iavf_free_tx_resources(&adapter->tx_rings[i]); 2398 } 2399 2400 /** 2401 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2402 * @adapter: board private structure 2403 * 2404 * If this function returns with an error, then it's possible one or 2405 * more of the rings is populated (while the rest are not). It is the 2406 * callers duty to clean those orphaned rings. 2407 * 2408 * Return 0 on success, negative on failure 2409 **/ 2410 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2411 { 2412 int i, err = 0; 2413 2414 for (i = 0; i < adapter->num_active_queues; i++) { 2415 adapter->tx_rings[i].count = adapter->tx_desc_count; 2416 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2417 if (!err) 2418 continue; 2419 dev_err(&adapter->pdev->dev, 2420 "Allocation for Tx Queue %u failed\n", i); 2421 break; 2422 } 2423 2424 return err; 2425 } 2426 2427 /** 2428 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2429 * @adapter: board private structure 2430 * 2431 * If this function returns with an error, then it's possible one or 2432 * more of the rings is populated (while the rest are not). It is the 2433 * callers duty to clean those orphaned rings. 2434 * 2435 * Return 0 on success, negative on failure 2436 **/ 2437 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2438 { 2439 int i, err = 0; 2440 2441 for (i = 0; i < adapter->num_active_queues; i++) { 2442 adapter->rx_rings[i].count = adapter->rx_desc_count; 2443 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2444 if (!err) 2445 continue; 2446 dev_err(&adapter->pdev->dev, 2447 "Allocation for Rx Queue %u failed\n", i); 2448 break; 2449 } 2450 return err; 2451 } 2452 2453 /** 2454 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2455 * @adapter: board private structure 2456 * 2457 * Free all receive software resources 2458 **/ 2459 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2460 { 2461 int i; 2462 2463 if (!adapter->rx_rings) 2464 return; 2465 2466 for (i = 0; i < adapter->num_active_queues; i++) 2467 if (adapter->rx_rings[i].desc) 2468 iavf_free_rx_resources(&adapter->rx_rings[i]); 2469 } 2470 2471 /** 2472 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2473 * @adapter: board private structure 2474 * @max_tx_rate: max Tx bw for a tc 2475 **/ 2476 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2477 u64 max_tx_rate) 2478 { 2479 int speed = 0, ret = 0; 2480 2481 switch (adapter->link_speed) { 2482 case IAVF_LINK_SPEED_40GB: 2483 speed = 40000; 2484 break; 2485 case IAVF_LINK_SPEED_25GB: 2486 speed = 25000; 2487 break; 2488 case IAVF_LINK_SPEED_20GB: 2489 speed = 20000; 2490 break; 2491 case IAVF_LINK_SPEED_10GB: 2492 speed = 10000; 2493 break; 2494 case IAVF_LINK_SPEED_1GB: 2495 speed = 1000; 2496 break; 2497 case IAVF_LINK_SPEED_100MB: 2498 speed = 100; 2499 break; 2500 default: 2501 break; 2502 } 2503 2504 if (max_tx_rate > speed) { 2505 dev_err(&adapter->pdev->dev, 2506 "Invalid tx rate specified\n"); 2507 ret = -EINVAL; 2508 } 2509 2510 return ret; 2511 } 2512 2513 /** 2514 * iavf_validate_channel_config - validate queue mapping info 2515 * @adapter: board private structure 2516 * @mqprio_qopt: queue parameters 2517 * 2518 * This function validates if the config provided by the user to 2519 * configure queue channels is valid or not. Returns 0 on a valid 2520 * config. 2521 **/ 2522 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2523 struct tc_mqprio_qopt_offload *mqprio_qopt) 2524 { 2525 u64 total_max_rate = 0; 2526 int i, num_qps = 0; 2527 u64 tx_rate = 0; 2528 int ret = 0; 2529 2530 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2531 mqprio_qopt->qopt.num_tc < 1) 2532 return -EINVAL; 2533 2534 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2535 if (!mqprio_qopt->qopt.count[i] || 2536 mqprio_qopt->qopt.offset[i] != num_qps) 2537 return -EINVAL; 2538 if (mqprio_qopt->min_rate[i]) { 2539 dev_err(&adapter->pdev->dev, 2540 "Invalid min tx rate (greater than 0) specified\n"); 2541 return -EINVAL; 2542 } 2543 /*convert to Mbps */ 2544 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2545 IAVF_MBPS_DIVISOR); 2546 total_max_rate += tx_rate; 2547 num_qps += mqprio_qopt->qopt.count[i]; 2548 } 2549 if (num_qps > IAVF_MAX_REQ_QUEUES) 2550 return -EINVAL; 2551 2552 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2553 return ret; 2554 } 2555 2556 /** 2557 * iavf_del_all_cloud_filters - delete all cloud filters 2558 * on the traffic classes 2559 **/ 2560 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2561 { 2562 struct iavf_cloud_filter *cf, *cftmp; 2563 2564 spin_lock_bh(&adapter->cloud_filter_list_lock); 2565 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2566 list) { 2567 list_del(&cf->list); 2568 kfree(cf); 2569 adapter->num_cloud_filters--; 2570 } 2571 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2572 } 2573 2574 /** 2575 * __iavf_setup_tc - configure multiple traffic classes 2576 * @netdev: network interface device structure 2577 * @type_date: tc offload data 2578 * 2579 * This function processes the config information provided by the 2580 * user to configure traffic classes/queue channels and packages the 2581 * information to request the PF to setup traffic classes. 2582 * 2583 * Returns 0 on success. 2584 **/ 2585 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2586 { 2587 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2588 struct iavf_adapter *adapter = netdev_priv(netdev); 2589 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2590 u8 num_tc = 0, total_qps = 0; 2591 int ret = 0, netdev_tc = 0; 2592 u64 max_tx_rate; 2593 u16 mode; 2594 int i; 2595 2596 num_tc = mqprio_qopt->qopt.num_tc; 2597 mode = mqprio_qopt->mode; 2598 2599 /* delete queue_channel */ 2600 if (!mqprio_qopt->qopt.hw) { 2601 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2602 /* reset the tc configuration */ 2603 netdev_reset_tc(netdev); 2604 adapter->num_tc = 0; 2605 netif_tx_stop_all_queues(netdev); 2606 netif_tx_disable(netdev); 2607 iavf_del_all_cloud_filters(adapter); 2608 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2609 goto exit; 2610 } else { 2611 return -EINVAL; 2612 } 2613 } 2614 2615 /* add queue channel */ 2616 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2617 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2618 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2619 return -EOPNOTSUPP; 2620 } 2621 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2622 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2623 return -EINVAL; 2624 } 2625 2626 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2627 if (ret) 2628 return ret; 2629 /* Return if same TC config is requested */ 2630 if (adapter->num_tc == num_tc) 2631 return 0; 2632 adapter->num_tc = num_tc; 2633 2634 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2635 if (i < num_tc) { 2636 adapter->ch_config.ch_info[i].count = 2637 mqprio_qopt->qopt.count[i]; 2638 adapter->ch_config.ch_info[i].offset = 2639 mqprio_qopt->qopt.offset[i]; 2640 total_qps += mqprio_qopt->qopt.count[i]; 2641 max_tx_rate = mqprio_qopt->max_rate[i]; 2642 /* convert to Mbps */ 2643 max_tx_rate = div_u64(max_tx_rate, 2644 IAVF_MBPS_DIVISOR); 2645 adapter->ch_config.ch_info[i].max_tx_rate = 2646 max_tx_rate; 2647 } else { 2648 adapter->ch_config.ch_info[i].count = 1; 2649 adapter->ch_config.ch_info[i].offset = 0; 2650 } 2651 } 2652 adapter->ch_config.total_qps = total_qps; 2653 netif_tx_stop_all_queues(netdev); 2654 netif_tx_disable(netdev); 2655 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2656 netdev_reset_tc(netdev); 2657 /* Report the tc mapping up the stack */ 2658 netdev_set_num_tc(adapter->netdev, num_tc); 2659 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2660 u16 qcount = mqprio_qopt->qopt.count[i]; 2661 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2662 2663 if (i < num_tc) 2664 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2665 qoffset); 2666 } 2667 } 2668 exit: 2669 return ret; 2670 } 2671 2672 /** 2673 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2674 * @adapter: board private structure 2675 * @cls_flower: pointer to struct flow_cls_offload 2676 * @filter: pointer to cloud filter structure 2677 */ 2678 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2679 struct flow_cls_offload *f, 2680 struct iavf_cloud_filter *filter) 2681 { 2682 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2683 struct flow_dissector *dissector = rule->match.dissector; 2684 u16 n_proto_mask = 0; 2685 u16 n_proto_key = 0; 2686 u8 field_flags = 0; 2687 u16 addr_type = 0; 2688 u16 n_proto = 0; 2689 int i = 0; 2690 struct virtchnl_filter *vf = &filter->f; 2691 2692 if (dissector->used_keys & 2693 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2694 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2695 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2696 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2697 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2698 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2699 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2700 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2701 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2702 dissector->used_keys); 2703 return -EOPNOTSUPP; 2704 } 2705 2706 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2707 struct flow_match_enc_keyid match; 2708 2709 flow_rule_match_enc_keyid(rule, &match); 2710 if (match.mask->keyid != 0) 2711 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2712 } 2713 2714 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2715 struct flow_match_basic match; 2716 2717 flow_rule_match_basic(rule, &match); 2718 n_proto_key = ntohs(match.key->n_proto); 2719 n_proto_mask = ntohs(match.mask->n_proto); 2720 2721 if (n_proto_key == ETH_P_ALL) { 2722 n_proto_key = 0; 2723 n_proto_mask = 0; 2724 } 2725 n_proto = n_proto_key & n_proto_mask; 2726 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2727 return -EINVAL; 2728 if (n_proto == ETH_P_IPV6) { 2729 /* specify flow type as TCP IPv6 */ 2730 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2731 } 2732 2733 if (match.key->ip_proto != IPPROTO_TCP) { 2734 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2735 return -EINVAL; 2736 } 2737 } 2738 2739 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2740 struct flow_match_eth_addrs match; 2741 2742 flow_rule_match_eth_addrs(rule, &match); 2743 2744 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2745 if (!is_zero_ether_addr(match.mask->dst)) { 2746 if (is_broadcast_ether_addr(match.mask->dst)) { 2747 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2748 } else { 2749 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2750 match.mask->dst); 2751 return IAVF_ERR_CONFIG; 2752 } 2753 } 2754 2755 if (!is_zero_ether_addr(match.mask->src)) { 2756 if (is_broadcast_ether_addr(match.mask->src)) { 2757 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2758 } else { 2759 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2760 match.mask->src); 2761 return IAVF_ERR_CONFIG; 2762 } 2763 } 2764 2765 if (!is_zero_ether_addr(match.key->dst)) 2766 if (is_valid_ether_addr(match.key->dst) || 2767 is_multicast_ether_addr(match.key->dst)) { 2768 /* set the mask if a valid dst_mac address */ 2769 for (i = 0; i < ETH_ALEN; i++) 2770 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2771 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2772 match.key->dst); 2773 } 2774 2775 if (!is_zero_ether_addr(match.key->src)) 2776 if (is_valid_ether_addr(match.key->src) || 2777 is_multicast_ether_addr(match.key->src)) { 2778 /* set the mask if a valid dst_mac address */ 2779 for (i = 0; i < ETH_ALEN; i++) 2780 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2781 ether_addr_copy(vf->data.tcp_spec.src_mac, 2782 match.key->src); 2783 } 2784 } 2785 2786 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2787 struct flow_match_vlan match; 2788 2789 flow_rule_match_vlan(rule, &match); 2790 if (match.mask->vlan_id) { 2791 if (match.mask->vlan_id == VLAN_VID_MASK) { 2792 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2793 } else { 2794 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2795 match.mask->vlan_id); 2796 return IAVF_ERR_CONFIG; 2797 } 2798 } 2799 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2800 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2801 } 2802 2803 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2804 struct flow_match_control match; 2805 2806 flow_rule_match_control(rule, &match); 2807 addr_type = match.key->addr_type; 2808 } 2809 2810 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2811 struct flow_match_ipv4_addrs match; 2812 2813 flow_rule_match_ipv4_addrs(rule, &match); 2814 if (match.mask->dst) { 2815 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2816 field_flags |= IAVF_CLOUD_FIELD_IIP; 2817 } else { 2818 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2819 be32_to_cpu(match.mask->dst)); 2820 return IAVF_ERR_CONFIG; 2821 } 2822 } 2823 2824 if (match.mask->src) { 2825 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2826 field_flags |= IAVF_CLOUD_FIELD_IIP; 2827 } else { 2828 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2829 be32_to_cpu(match.mask->dst)); 2830 return IAVF_ERR_CONFIG; 2831 } 2832 } 2833 2834 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2835 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2836 return IAVF_ERR_CONFIG; 2837 } 2838 if (match.key->dst) { 2839 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2840 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2841 } 2842 if (match.key->src) { 2843 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2844 vf->data.tcp_spec.src_ip[0] = match.key->src; 2845 } 2846 } 2847 2848 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2849 struct flow_match_ipv6_addrs match; 2850 2851 flow_rule_match_ipv6_addrs(rule, &match); 2852 2853 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2854 if (ipv6_addr_any(&match.mask->dst)) { 2855 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2856 IPV6_ADDR_ANY); 2857 return IAVF_ERR_CONFIG; 2858 } 2859 2860 /* src and dest IPv6 address should not be LOOPBACK 2861 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2862 */ 2863 if (ipv6_addr_loopback(&match.key->dst) || 2864 ipv6_addr_loopback(&match.key->src)) { 2865 dev_err(&adapter->pdev->dev, 2866 "ipv6 addr should not be loopback\n"); 2867 return IAVF_ERR_CONFIG; 2868 } 2869 if (!ipv6_addr_any(&match.mask->dst) || 2870 !ipv6_addr_any(&match.mask->src)) 2871 field_flags |= IAVF_CLOUD_FIELD_IIP; 2872 2873 for (i = 0; i < 4; i++) 2874 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 2875 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 2876 sizeof(vf->data.tcp_spec.dst_ip)); 2877 for (i = 0; i < 4; i++) 2878 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 2879 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 2880 sizeof(vf->data.tcp_spec.src_ip)); 2881 } 2882 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2883 struct flow_match_ports match; 2884 2885 flow_rule_match_ports(rule, &match); 2886 if (match.mask->src) { 2887 if (match.mask->src == cpu_to_be16(0xffff)) { 2888 field_flags |= IAVF_CLOUD_FIELD_IIP; 2889 } else { 2890 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 2891 be16_to_cpu(match.mask->src)); 2892 return IAVF_ERR_CONFIG; 2893 } 2894 } 2895 2896 if (match.mask->dst) { 2897 if (match.mask->dst == cpu_to_be16(0xffff)) { 2898 field_flags |= IAVF_CLOUD_FIELD_IIP; 2899 } else { 2900 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 2901 be16_to_cpu(match.mask->dst)); 2902 return IAVF_ERR_CONFIG; 2903 } 2904 } 2905 if (match.key->dst) { 2906 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 2907 vf->data.tcp_spec.dst_port = match.key->dst; 2908 } 2909 2910 if (match.key->src) { 2911 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 2912 vf->data.tcp_spec.src_port = match.key->src; 2913 } 2914 } 2915 vf->field_flags = field_flags; 2916 2917 return 0; 2918 } 2919 2920 /** 2921 * iavf_handle_tclass - Forward to a traffic class on the device 2922 * @adapter: board private structure 2923 * @tc: traffic class index on the device 2924 * @filter: pointer to cloud filter structure 2925 */ 2926 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 2927 struct iavf_cloud_filter *filter) 2928 { 2929 if (tc == 0) 2930 return 0; 2931 if (tc < adapter->num_tc) { 2932 if (!filter->f.data.tcp_spec.dst_port) { 2933 dev_err(&adapter->pdev->dev, 2934 "Specify destination port to redirect to traffic class other than TC0\n"); 2935 return -EINVAL; 2936 } 2937 } 2938 /* redirect to a traffic class on the same device */ 2939 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 2940 filter->f.action_meta = tc; 2941 return 0; 2942 } 2943 2944 /** 2945 * iavf_configure_clsflower - Add tc flower filters 2946 * @adapter: board private structure 2947 * @cls_flower: Pointer to struct flow_cls_offload 2948 */ 2949 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 2950 struct flow_cls_offload *cls_flower) 2951 { 2952 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 2953 struct iavf_cloud_filter *filter = NULL; 2954 int err = -EINVAL, count = 50; 2955 2956 if (tc < 0) { 2957 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 2958 return -EINVAL; 2959 } 2960 2961 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 2962 if (!filter) 2963 return -ENOMEM; 2964 2965 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 2966 &adapter->crit_section)) { 2967 if (--count == 0) 2968 goto err; 2969 udelay(1); 2970 } 2971 2972 filter->cookie = cls_flower->cookie; 2973 2974 /* set the mask to all zeroes to begin with */ 2975 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 2976 /* start out with flow type and eth type IPv4 to begin with */ 2977 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 2978 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 2979 if (err < 0) 2980 goto err; 2981 2982 err = iavf_handle_tclass(adapter, tc, filter); 2983 if (err < 0) 2984 goto err; 2985 2986 /* add filter to the list */ 2987 spin_lock_bh(&adapter->cloud_filter_list_lock); 2988 list_add_tail(&filter->list, &adapter->cloud_filter_list); 2989 adapter->num_cloud_filters++; 2990 filter->add = true; 2991 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2992 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2993 err: 2994 if (err) 2995 kfree(filter); 2996 2997 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2998 return err; 2999 } 3000 3001 /* iavf_find_cf - Find the cloud filter in the list 3002 * @adapter: Board private structure 3003 * @cookie: filter specific cookie 3004 * 3005 * Returns ptr to the filter object or NULL. Must be called while holding the 3006 * cloud_filter_list_lock. 3007 */ 3008 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3009 unsigned long *cookie) 3010 { 3011 struct iavf_cloud_filter *filter = NULL; 3012 3013 if (!cookie) 3014 return NULL; 3015 3016 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3017 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3018 return filter; 3019 } 3020 return NULL; 3021 } 3022 3023 /** 3024 * iavf_delete_clsflower - Remove tc flower filters 3025 * @adapter: board private structure 3026 * @cls_flower: Pointer to struct flow_cls_offload 3027 */ 3028 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3029 struct flow_cls_offload *cls_flower) 3030 { 3031 struct iavf_cloud_filter *filter = NULL; 3032 int err = 0; 3033 3034 spin_lock_bh(&adapter->cloud_filter_list_lock); 3035 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3036 if (filter) { 3037 filter->del = true; 3038 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3039 } else { 3040 err = -EINVAL; 3041 } 3042 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3043 3044 return err; 3045 } 3046 3047 /** 3048 * iavf_setup_tc_cls_flower - flower classifier offloads 3049 * @netdev: net device to configure 3050 * @type_data: offload data 3051 */ 3052 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3053 struct flow_cls_offload *cls_flower) 3054 { 3055 if (cls_flower->common.chain_index) 3056 return -EOPNOTSUPP; 3057 3058 switch (cls_flower->command) { 3059 case FLOW_CLS_REPLACE: 3060 return iavf_configure_clsflower(adapter, cls_flower); 3061 case FLOW_CLS_DESTROY: 3062 return iavf_delete_clsflower(adapter, cls_flower); 3063 case FLOW_CLS_STATS: 3064 return -EOPNOTSUPP; 3065 default: 3066 return -EOPNOTSUPP; 3067 } 3068 } 3069 3070 /** 3071 * iavf_setup_tc_block_cb - block callback for tc 3072 * @type: type of offload 3073 * @type_data: offload data 3074 * @cb_priv: 3075 * 3076 * This function is the block callback for traffic classes 3077 **/ 3078 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3079 void *cb_priv) 3080 { 3081 switch (type) { 3082 case TC_SETUP_CLSFLOWER: 3083 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3084 default: 3085 return -EOPNOTSUPP; 3086 } 3087 } 3088 3089 static LIST_HEAD(iavf_block_cb_list); 3090 3091 /** 3092 * iavf_setup_tc - configure multiple traffic classes 3093 * @netdev: network interface device structure 3094 * @type: type of offload 3095 * @type_date: tc offload data 3096 * 3097 * This function is the callback to ndo_setup_tc in the 3098 * netdev_ops. 3099 * 3100 * Returns 0 on success 3101 **/ 3102 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3103 void *type_data) 3104 { 3105 struct iavf_adapter *adapter = netdev_priv(netdev); 3106 3107 switch (type) { 3108 case TC_SETUP_QDISC_MQPRIO: 3109 return __iavf_setup_tc(netdev, type_data); 3110 case TC_SETUP_BLOCK: 3111 return flow_block_cb_setup_simple(type_data, 3112 &iavf_block_cb_list, 3113 iavf_setup_tc_block_cb, 3114 adapter, adapter, true); 3115 default: 3116 return -EOPNOTSUPP; 3117 } 3118 } 3119 3120 /** 3121 * iavf_open - Called when a network interface is made active 3122 * @netdev: network interface device structure 3123 * 3124 * Returns 0 on success, negative value on failure 3125 * 3126 * The open entry point is called when a network interface is made 3127 * active by the system (IFF_UP). At this point all resources needed 3128 * for transmit and receive operations are allocated, the interrupt 3129 * handler is registered with the OS, the watchdog is started, 3130 * and the stack is notified that the interface is ready. 3131 **/ 3132 static int iavf_open(struct net_device *netdev) 3133 { 3134 struct iavf_adapter *adapter = netdev_priv(netdev); 3135 int err; 3136 3137 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3138 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3139 return -EIO; 3140 } 3141 3142 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3143 &adapter->crit_section)) 3144 usleep_range(500, 1000); 3145 3146 if (adapter->state != __IAVF_DOWN) { 3147 err = -EBUSY; 3148 goto err_unlock; 3149 } 3150 3151 /* allocate transmit descriptors */ 3152 err = iavf_setup_all_tx_resources(adapter); 3153 if (err) 3154 goto err_setup_tx; 3155 3156 /* allocate receive descriptors */ 3157 err = iavf_setup_all_rx_resources(adapter); 3158 if (err) 3159 goto err_setup_rx; 3160 3161 /* clear any pending interrupts, may auto mask */ 3162 err = iavf_request_traffic_irqs(adapter, netdev->name); 3163 if (err) 3164 goto err_req_irq; 3165 3166 spin_lock_bh(&adapter->mac_vlan_list_lock); 3167 3168 iavf_add_filter(adapter, adapter->hw.mac.addr); 3169 3170 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3171 3172 iavf_configure(adapter); 3173 3174 iavf_up_complete(adapter); 3175 3176 iavf_irq_enable(adapter, true); 3177 3178 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3179 3180 return 0; 3181 3182 err_req_irq: 3183 iavf_down(adapter); 3184 iavf_free_traffic_irqs(adapter); 3185 err_setup_rx: 3186 iavf_free_all_rx_resources(adapter); 3187 err_setup_tx: 3188 iavf_free_all_tx_resources(adapter); 3189 err_unlock: 3190 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3191 3192 return err; 3193 } 3194 3195 /** 3196 * iavf_close - Disables a network interface 3197 * @netdev: network interface device structure 3198 * 3199 * Returns 0, this is not allowed to fail 3200 * 3201 * The close entry point is called when an interface is de-activated 3202 * by the OS. The hardware is still under the drivers control, but 3203 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3204 * are freed, along with all transmit and receive resources. 3205 **/ 3206 static int iavf_close(struct net_device *netdev) 3207 { 3208 struct iavf_adapter *adapter = netdev_priv(netdev); 3209 int status; 3210 3211 if (adapter->state <= __IAVF_DOWN_PENDING) 3212 return 0; 3213 3214 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3215 &adapter->crit_section)) 3216 usleep_range(500, 1000); 3217 3218 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3219 if (CLIENT_ENABLED(adapter)) 3220 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3221 3222 iavf_down(adapter); 3223 adapter->state = __IAVF_DOWN_PENDING; 3224 iavf_free_traffic_irqs(adapter); 3225 3226 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3227 3228 /* We explicitly don't free resources here because the hardware is 3229 * still active and can DMA into memory. Resources are cleared in 3230 * iavf_virtchnl_completion() after we get confirmation from the PF 3231 * driver that the rings have been stopped. 3232 * 3233 * Also, we wait for state to transition to __IAVF_DOWN before 3234 * returning. State change occurs in iavf_virtchnl_completion() after 3235 * VF resources are released (which occurs after PF driver processes and 3236 * responds to admin queue commands). 3237 */ 3238 3239 status = wait_event_timeout(adapter->down_waitqueue, 3240 adapter->state == __IAVF_DOWN, 3241 msecs_to_jiffies(500)); 3242 if (!status) 3243 netdev_warn(netdev, "Device resources not yet released\n"); 3244 return 0; 3245 } 3246 3247 /** 3248 * iavf_change_mtu - Change the Maximum Transfer Unit 3249 * @netdev: network interface device structure 3250 * @new_mtu: new value for maximum frame size 3251 * 3252 * Returns 0 on success, negative on failure 3253 **/ 3254 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3255 { 3256 struct iavf_adapter *adapter = netdev_priv(netdev); 3257 3258 netdev->mtu = new_mtu; 3259 if (CLIENT_ENABLED(adapter)) { 3260 iavf_notify_client_l2_params(&adapter->vsi); 3261 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3262 } 3263 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3264 queue_work(iavf_wq, &adapter->reset_task); 3265 3266 return 0; 3267 } 3268 3269 /** 3270 * iavf_set_features - set the netdev feature flags 3271 * @netdev: ptr to the netdev being adjusted 3272 * @features: the feature set that the stack is suggesting 3273 * Note: expects to be called while under rtnl_lock() 3274 **/ 3275 static int iavf_set_features(struct net_device *netdev, 3276 netdev_features_t features) 3277 { 3278 struct iavf_adapter *adapter = netdev_priv(netdev); 3279 3280 /* Don't allow changing VLAN_RX flag when adapter is not capable 3281 * of VLAN offload 3282 */ 3283 if (!VLAN_ALLOWED(adapter)) { 3284 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3285 return -EINVAL; 3286 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3287 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3288 adapter->aq_required |= 3289 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3290 else 3291 adapter->aq_required |= 3292 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3293 } 3294 3295 return 0; 3296 } 3297 3298 /** 3299 * iavf_features_check - Validate encapsulated packet conforms to limits 3300 * @skb: skb buff 3301 * @dev: This physical port's netdev 3302 * @features: Offload features that the stack believes apply 3303 **/ 3304 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3305 struct net_device *dev, 3306 netdev_features_t features) 3307 { 3308 size_t len; 3309 3310 /* No point in doing any of this if neither checksum nor GSO are 3311 * being requested for this frame. We can rule out both by just 3312 * checking for CHECKSUM_PARTIAL 3313 */ 3314 if (skb->ip_summed != CHECKSUM_PARTIAL) 3315 return features; 3316 3317 /* We cannot support GSO if the MSS is going to be less than 3318 * 64 bytes. If it is then we need to drop support for GSO. 3319 */ 3320 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3321 features &= ~NETIF_F_GSO_MASK; 3322 3323 /* MACLEN can support at most 63 words */ 3324 len = skb_network_header(skb) - skb->data; 3325 if (len & ~(63 * 2)) 3326 goto out_err; 3327 3328 /* IPLEN and EIPLEN can support at most 127 dwords */ 3329 len = skb_transport_header(skb) - skb_network_header(skb); 3330 if (len & ~(127 * 4)) 3331 goto out_err; 3332 3333 if (skb->encapsulation) { 3334 /* L4TUNLEN can support 127 words */ 3335 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3336 if (len & ~(127 * 2)) 3337 goto out_err; 3338 3339 /* IPLEN can support at most 127 dwords */ 3340 len = skb_inner_transport_header(skb) - 3341 skb_inner_network_header(skb); 3342 if (len & ~(127 * 4)) 3343 goto out_err; 3344 } 3345 3346 /* No need to validate L4LEN as TCP is the only protocol with a 3347 * a flexible value and we support all possible values supported 3348 * by TCP, which is at most 15 dwords 3349 */ 3350 3351 return features; 3352 out_err: 3353 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3354 } 3355 3356 /** 3357 * iavf_fix_features - fix up the netdev feature bits 3358 * @netdev: our net device 3359 * @features: desired feature bits 3360 * 3361 * Returns fixed-up features bits 3362 **/ 3363 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3364 netdev_features_t features) 3365 { 3366 struct iavf_adapter *adapter = netdev_priv(netdev); 3367 3368 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3369 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3370 NETIF_F_HW_VLAN_CTAG_RX | 3371 NETIF_F_HW_VLAN_CTAG_FILTER); 3372 3373 return features; 3374 } 3375 3376 static const struct net_device_ops iavf_netdev_ops = { 3377 .ndo_open = iavf_open, 3378 .ndo_stop = iavf_close, 3379 .ndo_start_xmit = iavf_xmit_frame, 3380 .ndo_set_rx_mode = iavf_set_rx_mode, 3381 .ndo_validate_addr = eth_validate_addr, 3382 .ndo_set_mac_address = iavf_set_mac, 3383 .ndo_change_mtu = iavf_change_mtu, 3384 .ndo_tx_timeout = iavf_tx_timeout, 3385 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3386 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3387 .ndo_features_check = iavf_features_check, 3388 .ndo_fix_features = iavf_fix_features, 3389 .ndo_set_features = iavf_set_features, 3390 .ndo_setup_tc = iavf_setup_tc, 3391 }; 3392 3393 /** 3394 * iavf_check_reset_complete - check that VF reset is complete 3395 * @hw: pointer to hw struct 3396 * 3397 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3398 **/ 3399 static int iavf_check_reset_complete(struct iavf_hw *hw) 3400 { 3401 u32 rstat; 3402 int i; 3403 3404 for (i = 0; i < 100; i++) { 3405 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3406 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3407 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3408 (rstat == VIRTCHNL_VFR_COMPLETED)) 3409 return 0; 3410 usleep_range(10, 20); 3411 } 3412 return -EBUSY; 3413 } 3414 3415 /** 3416 * iavf_process_config - Process the config information we got from the PF 3417 * @adapter: board private structure 3418 * 3419 * Verify that we have a valid config struct, and set up our netdev features 3420 * and our VSI struct. 3421 **/ 3422 int iavf_process_config(struct iavf_adapter *adapter) 3423 { 3424 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3425 int i, num_req_queues = adapter->num_req_queues; 3426 struct net_device *netdev = adapter->netdev; 3427 struct iavf_vsi *vsi = &adapter->vsi; 3428 netdev_features_t hw_enc_features; 3429 netdev_features_t hw_features; 3430 3431 /* got VF config message back from PF, now we can parse it */ 3432 for (i = 0; i < vfres->num_vsis; i++) { 3433 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3434 adapter->vsi_res = &vfres->vsi_res[i]; 3435 } 3436 if (!adapter->vsi_res) { 3437 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3438 return -ENODEV; 3439 } 3440 3441 if (num_req_queues && 3442 num_req_queues != adapter->vsi_res->num_queue_pairs) { 3443 /* Problem. The PF gave us fewer queues than what we had 3444 * negotiated in our request. Need a reset to see if we can't 3445 * get back to a working state. 3446 */ 3447 dev_err(&adapter->pdev->dev, 3448 "Requested %d queues, but PF only gave us %d.\n", 3449 num_req_queues, 3450 adapter->vsi_res->num_queue_pairs); 3451 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3452 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3453 iavf_schedule_reset(adapter); 3454 return -ENODEV; 3455 } 3456 adapter->num_req_queues = 0; 3457 3458 hw_enc_features = NETIF_F_SG | 3459 NETIF_F_IP_CSUM | 3460 NETIF_F_IPV6_CSUM | 3461 NETIF_F_HIGHDMA | 3462 NETIF_F_SOFT_FEATURES | 3463 NETIF_F_TSO | 3464 NETIF_F_TSO_ECN | 3465 NETIF_F_TSO6 | 3466 NETIF_F_SCTP_CRC | 3467 NETIF_F_RXHASH | 3468 NETIF_F_RXCSUM | 3469 0; 3470 3471 /* advertise to stack only if offloads for encapsulated packets is 3472 * supported 3473 */ 3474 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3475 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3476 NETIF_F_GSO_GRE | 3477 NETIF_F_GSO_GRE_CSUM | 3478 NETIF_F_GSO_IPXIP4 | 3479 NETIF_F_GSO_IPXIP6 | 3480 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3481 NETIF_F_GSO_PARTIAL | 3482 0; 3483 3484 if (!(vfres->vf_cap_flags & 3485 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3486 netdev->gso_partial_features |= 3487 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3488 3489 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3490 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3491 netdev->hw_enc_features |= hw_enc_features; 3492 } 3493 /* record features VLANs can make use of */ 3494 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3495 3496 /* Write features and hw_features separately to avoid polluting 3497 * with, or dropping, features that are set when we registered. 3498 */ 3499 hw_features = hw_enc_features; 3500 3501 /* Enable VLAN features if supported */ 3502 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3503 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3504 NETIF_F_HW_VLAN_CTAG_RX); 3505 /* Enable cloud filter if ADQ is supported */ 3506 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3507 hw_features |= NETIF_F_HW_TC; 3508 3509 netdev->hw_features |= hw_features; 3510 3511 netdev->features |= hw_features; 3512 3513 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3514 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3515 3516 netdev->priv_flags |= IFF_UNICAST_FLT; 3517 3518 /* Do not turn on offloads when they are requested to be turned off. 3519 * TSO needs minimum 576 bytes to work correctly. 3520 */ 3521 if (netdev->wanted_features) { 3522 if (!(netdev->wanted_features & NETIF_F_TSO) || 3523 netdev->mtu < 576) 3524 netdev->features &= ~NETIF_F_TSO; 3525 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3526 netdev->mtu < 576) 3527 netdev->features &= ~NETIF_F_TSO6; 3528 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3529 netdev->features &= ~NETIF_F_TSO_ECN; 3530 if (!(netdev->wanted_features & NETIF_F_GRO)) 3531 netdev->features &= ~NETIF_F_GRO; 3532 if (!(netdev->wanted_features & NETIF_F_GSO)) 3533 netdev->features &= ~NETIF_F_GSO; 3534 } 3535 3536 adapter->vsi.id = adapter->vsi_res->vsi_id; 3537 3538 adapter->vsi.back = adapter; 3539 adapter->vsi.base_vector = 1; 3540 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3541 vsi->netdev = adapter->netdev; 3542 vsi->qs_handle = adapter->vsi_res->qset_handle; 3543 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3544 adapter->rss_key_size = vfres->rss_key_size; 3545 adapter->rss_lut_size = vfres->rss_lut_size; 3546 } else { 3547 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3548 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3549 } 3550 3551 return 0; 3552 } 3553 3554 /** 3555 * iavf_init_task - worker thread to perform delayed initialization 3556 * @work: pointer to work_struct containing our data 3557 * 3558 * This task completes the work that was begun in probe. Due to the nature 3559 * of VF-PF communications, we may need to wait tens of milliseconds to get 3560 * responses back from the PF. Rather than busy-wait in probe and bog down the 3561 * whole system, we'll do it in a task so we can sleep. 3562 * This task only runs during driver init. Once we've established 3563 * communications with the PF driver and set up our netdev, the watchdog 3564 * takes over. 3565 **/ 3566 static void iavf_init_task(struct work_struct *work) 3567 { 3568 struct iavf_adapter *adapter = container_of(work, 3569 struct iavf_adapter, 3570 init_task.work); 3571 struct iavf_hw *hw = &adapter->hw; 3572 3573 switch (adapter->state) { 3574 case __IAVF_STARTUP: 3575 if (iavf_startup(adapter) < 0) 3576 goto init_failed; 3577 break; 3578 case __IAVF_INIT_VERSION_CHECK: 3579 if (iavf_init_version_check(adapter) < 0) 3580 goto init_failed; 3581 break; 3582 case __IAVF_INIT_GET_RESOURCES: 3583 if (iavf_init_get_resources(adapter) < 0) 3584 goto init_failed; 3585 return; 3586 default: 3587 goto init_failed; 3588 } 3589 3590 queue_delayed_work(iavf_wq, &adapter->init_task, 3591 msecs_to_jiffies(30)); 3592 return; 3593 init_failed: 3594 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 3595 dev_err(&adapter->pdev->dev, 3596 "Failed to communicate with PF; waiting before retry\n"); 3597 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3598 iavf_shutdown_adminq(hw); 3599 adapter->state = __IAVF_STARTUP; 3600 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); 3601 return; 3602 } 3603 queue_delayed_work(iavf_wq, &adapter->init_task, HZ); 3604 } 3605 3606 /** 3607 * iavf_shutdown - Shutdown the device in preparation for a reboot 3608 * @pdev: pci device structure 3609 **/ 3610 static void iavf_shutdown(struct pci_dev *pdev) 3611 { 3612 struct net_device *netdev = pci_get_drvdata(pdev); 3613 struct iavf_adapter *adapter = netdev_priv(netdev); 3614 3615 netif_device_detach(netdev); 3616 3617 if (netif_running(netdev)) 3618 iavf_close(netdev); 3619 3620 /* Prevent the watchdog from running. */ 3621 adapter->state = __IAVF_REMOVE; 3622 adapter->aq_required = 0; 3623 3624 #ifdef CONFIG_PM 3625 pci_save_state(pdev); 3626 3627 #endif 3628 pci_disable_device(pdev); 3629 } 3630 3631 /** 3632 * iavf_probe - Device Initialization Routine 3633 * @pdev: PCI device information struct 3634 * @ent: entry in iavf_pci_tbl 3635 * 3636 * Returns 0 on success, negative on failure 3637 * 3638 * iavf_probe initializes an adapter identified by a pci_dev structure. 3639 * The OS initialization, configuring of the adapter private structure, 3640 * and a hardware reset occur. 3641 **/ 3642 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3643 { 3644 struct net_device *netdev; 3645 struct iavf_adapter *adapter = NULL; 3646 struct iavf_hw *hw = NULL; 3647 int err; 3648 3649 err = pci_enable_device(pdev); 3650 if (err) 3651 return err; 3652 3653 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3654 if (err) { 3655 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3656 if (err) { 3657 dev_err(&pdev->dev, 3658 "DMA configuration failed: 0x%x\n", err); 3659 goto err_dma; 3660 } 3661 } 3662 3663 err = pci_request_regions(pdev, iavf_driver_name); 3664 if (err) { 3665 dev_err(&pdev->dev, 3666 "pci_request_regions failed 0x%x\n", err); 3667 goto err_pci_reg; 3668 } 3669 3670 pci_enable_pcie_error_reporting(pdev); 3671 3672 pci_set_master(pdev); 3673 3674 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3675 IAVF_MAX_REQ_QUEUES); 3676 if (!netdev) { 3677 err = -ENOMEM; 3678 goto err_alloc_etherdev; 3679 } 3680 3681 SET_NETDEV_DEV(netdev, &pdev->dev); 3682 3683 pci_set_drvdata(pdev, netdev); 3684 adapter = netdev_priv(netdev); 3685 3686 adapter->netdev = netdev; 3687 adapter->pdev = pdev; 3688 3689 hw = &adapter->hw; 3690 hw->back = adapter; 3691 3692 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3693 adapter->state = __IAVF_STARTUP; 3694 3695 /* Call save state here because it relies on the adapter struct. */ 3696 pci_save_state(pdev); 3697 3698 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3699 pci_resource_len(pdev, 0)); 3700 if (!hw->hw_addr) { 3701 err = -EIO; 3702 goto err_ioremap; 3703 } 3704 hw->vendor_id = pdev->vendor; 3705 hw->device_id = pdev->device; 3706 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3707 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3708 hw->subsystem_device_id = pdev->subsystem_device; 3709 hw->bus.device = PCI_SLOT(pdev->devfn); 3710 hw->bus.func = PCI_FUNC(pdev->devfn); 3711 hw->bus.bus_id = pdev->bus->number; 3712 3713 /* set up the locks for the AQ, do this only once in probe 3714 * and destroy them only once in remove 3715 */ 3716 mutex_init(&hw->aq.asq_mutex); 3717 mutex_init(&hw->aq.arq_mutex); 3718 3719 spin_lock_init(&adapter->mac_vlan_list_lock); 3720 spin_lock_init(&adapter->cloud_filter_list_lock); 3721 3722 INIT_LIST_HEAD(&adapter->mac_filter_list); 3723 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3724 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3725 3726 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3727 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3728 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3729 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3730 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); 3731 queue_delayed_work(iavf_wq, &adapter->init_task, 3732 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3733 3734 /* Setup the wait queue for indicating transition to down status */ 3735 init_waitqueue_head(&adapter->down_waitqueue); 3736 3737 return 0; 3738 3739 err_ioremap: 3740 free_netdev(netdev); 3741 err_alloc_etherdev: 3742 pci_release_regions(pdev); 3743 err_pci_reg: 3744 err_dma: 3745 pci_disable_device(pdev); 3746 return err; 3747 } 3748 3749 #ifdef CONFIG_PM 3750 /** 3751 * iavf_suspend - Power management suspend routine 3752 * @pdev: PCI device information struct 3753 * @state: unused 3754 * 3755 * Called when the system (VM) is entering sleep/suspend. 3756 **/ 3757 static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) 3758 { 3759 struct net_device *netdev = pci_get_drvdata(pdev); 3760 struct iavf_adapter *adapter = netdev_priv(netdev); 3761 int retval = 0; 3762 3763 netif_device_detach(netdev); 3764 3765 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3766 &adapter->crit_section)) 3767 usleep_range(500, 1000); 3768 3769 if (netif_running(netdev)) { 3770 rtnl_lock(); 3771 iavf_down(adapter); 3772 rtnl_unlock(); 3773 } 3774 iavf_free_misc_irq(adapter); 3775 iavf_reset_interrupt_capability(adapter); 3776 3777 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3778 3779 retval = pci_save_state(pdev); 3780 if (retval) 3781 return retval; 3782 3783 pci_disable_device(pdev); 3784 3785 return 0; 3786 } 3787 3788 /** 3789 * iavf_resume - Power management resume routine 3790 * @pdev: PCI device information struct 3791 * 3792 * Called when the system (VM) is resumed from sleep/suspend. 3793 **/ 3794 static int iavf_resume(struct pci_dev *pdev) 3795 { 3796 struct iavf_adapter *adapter = pci_get_drvdata(pdev); 3797 struct net_device *netdev = adapter->netdev; 3798 u32 err; 3799 3800 pci_set_power_state(pdev, PCI_D0); 3801 pci_restore_state(pdev); 3802 /* pci_restore_state clears dev->state_saved so call 3803 * pci_save_state to restore it. 3804 */ 3805 pci_save_state(pdev); 3806 3807 err = pci_enable_device_mem(pdev); 3808 if (err) { 3809 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); 3810 return err; 3811 } 3812 pci_set_master(pdev); 3813 3814 rtnl_lock(); 3815 err = iavf_set_interrupt_capability(adapter); 3816 if (err) { 3817 rtnl_unlock(); 3818 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3819 return err; 3820 } 3821 err = iavf_request_misc_irq(adapter); 3822 rtnl_unlock(); 3823 if (err) { 3824 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3825 return err; 3826 } 3827 3828 queue_work(iavf_wq, &adapter->reset_task); 3829 3830 netif_device_attach(netdev); 3831 3832 return err; 3833 } 3834 3835 #endif /* CONFIG_PM */ 3836 /** 3837 * iavf_remove - Device Removal Routine 3838 * @pdev: PCI device information struct 3839 * 3840 * iavf_remove is called by the PCI subsystem to alert the driver 3841 * that it should release a PCI device. The could be caused by a 3842 * Hot-Plug event, or because the driver is going to be removed from 3843 * memory. 3844 **/ 3845 static void iavf_remove(struct pci_dev *pdev) 3846 { 3847 struct net_device *netdev = pci_get_drvdata(pdev); 3848 struct iavf_adapter *adapter = netdev_priv(netdev); 3849 struct iavf_vlan_filter *vlf, *vlftmp; 3850 struct iavf_mac_filter *f, *ftmp; 3851 struct iavf_cloud_filter *cf, *cftmp; 3852 struct iavf_hw *hw = &adapter->hw; 3853 int err; 3854 /* Indicate we are in remove and not to run reset_task */ 3855 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); 3856 cancel_delayed_work_sync(&adapter->init_task); 3857 cancel_work_sync(&adapter->reset_task); 3858 cancel_delayed_work_sync(&adapter->client_task); 3859 if (adapter->netdev_registered) { 3860 unregister_netdev(netdev); 3861 adapter->netdev_registered = false; 3862 } 3863 if (CLIENT_ALLOWED(adapter)) { 3864 err = iavf_lan_del_device(adapter); 3865 if (err) 3866 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3867 err); 3868 } 3869 3870 /* Shut down all the garbage mashers on the detention level */ 3871 adapter->state = __IAVF_REMOVE; 3872 adapter->aq_required = 0; 3873 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3874 iavf_request_reset(adapter); 3875 msleep(50); 3876 /* If the FW isn't responding, kick it once, but only once. */ 3877 if (!iavf_asq_done(hw)) { 3878 iavf_request_reset(adapter); 3879 msleep(50); 3880 } 3881 iavf_free_all_tx_resources(adapter); 3882 iavf_free_all_rx_resources(adapter); 3883 iavf_misc_irq_disable(adapter); 3884 iavf_free_misc_irq(adapter); 3885 iavf_reset_interrupt_capability(adapter); 3886 iavf_free_q_vectors(adapter); 3887 3888 cancel_delayed_work_sync(&adapter->watchdog_task); 3889 3890 cancel_work_sync(&adapter->adminq_task); 3891 3892 iavf_free_rss(adapter); 3893 3894 if (hw->aq.asq.count) 3895 iavf_shutdown_adminq(hw); 3896 3897 /* destroy the locks only once, here */ 3898 mutex_destroy(&hw->aq.arq_mutex); 3899 mutex_destroy(&hw->aq.asq_mutex); 3900 3901 iounmap(hw->hw_addr); 3902 pci_release_regions(pdev); 3903 iavf_free_all_tx_resources(adapter); 3904 iavf_free_all_rx_resources(adapter); 3905 iavf_free_queues(adapter); 3906 kfree(adapter->vf_res); 3907 spin_lock_bh(&adapter->mac_vlan_list_lock); 3908 /* If we got removed before an up/down sequence, we've got a filter 3909 * hanging out there that we need to get rid of. 3910 */ 3911 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3912 list_del(&f->list); 3913 kfree(f); 3914 } 3915 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 3916 list) { 3917 list_del(&vlf->list); 3918 kfree(vlf); 3919 } 3920 3921 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3922 3923 spin_lock_bh(&adapter->cloud_filter_list_lock); 3924 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 3925 list_del(&cf->list); 3926 kfree(cf); 3927 } 3928 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3929 3930 free_netdev(netdev); 3931 3932 pci_disable_pcie_error_reporting(pdev); 3933 3934 pci_disable_device(pdev); 3935 } 3936 3937 static struct pci_driver iavf_driver = { 3938 .name = iavf_driver_name, 3939 .id_table = iavf_pci_tbl, 3940 .probe = iavf_probe, 3941 .remove = iavf_remove, 3942 #ifdef CONFIG_PM 3943 .suspend = iavf_suspend, 3944 .resume = iavf_resume, 3945 #endif 3946 .shutdown = iavf_shutdown, 3947 }; 3948 3949 /** 3950 * iavf_init_module - Driver Registration Routine 3951 * 3952 * iavf_init_module is the first routine called when the driver is 3953 * loaded. All it does is register with the PCI subsystem. 3954 **/ 3955 static int __init iavf_init_module(void) 3956 { 3957 int ret; 3958 3959 pr_info("iavf: %s - version %s\n", iavf_driver_string, 3960 iavf_driver_version); 3961 3962 pr_info("%s\n", iavf_copyright); 3963 3964 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 3965 iavf_driver_name); 3966 if (!iavf_wq) { 3967 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 3968 return -ENOMEM; 3969 } 3970 ret = pci_register_driver(&iavf_driver); 3971 return ret; 3972 } 3973 3974 module_init(iavf_init_module); 3975 3976 /** 3977 * iavf_exit_module - Driver Exit Cleanup Routine 3978 * 3979 * iavf_exit_module is called just before the driver is removed 3980 * from memory. 3981 **/ 3982 static void __exit iavf_exit_module(void) 3983 { 3984 pci_unregister_driver(&iavf_driver); 3985 destroy_workqueue(iavf_wq); 3986 } 3987 3988 module_exit(iavf_exit_module); 3989 3990 /* iavf_main.c */ 3991