1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static int iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 #define DRV_KERN "-k" 25 26 #define DRV_VERSION_MAJOR 3 27 #define DRV_VERSION_MINOR 2 28 #define DRV_VERSION_BUILD 3 29 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 30 __stringify(DRV_VERSION_MINOR) "." \ 31 __stringify(DRV_VERSION_BUILD) \ 32 DRV_KERN 33 const char iavf_driver_version[] = DRV_VERSION; 34 static const char iavf_copyright[] = 35 "Copyright (c) 2013 - 2018 Intel Corporation."; 36 37 /* iavf_pci_tbl - PCI Device ID Table 38 * 39 * Wildcard entries (PCI_ANY_ID) should come last 40 * Last entry must be all 0s 41 * 42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 43 * Class, Class Mask, private data (not used) } 44 */ 45 static const struct pci_device_id iavf_pci_tbl[] = { 46 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 47 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 48 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 49 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 50 /* required last entry */ 51 {0, } 52 }; 53 54 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 55 56 MODULE_ALIAS("i40evf"); 57 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 58 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 59 MODULE_LICENSE("GPL v2"); 60 MODULE_VERSION(DRV_VERSION); 61 62 static const struct net_device_ops iavf_netdev_ops; 63 struct workqueue_struct *iavf_wq; 64 65 /** 66 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 67 * @hw: pointer to the HW structure 68 * @mem: ptr to mem struct to fill out 69 * @size: size of memory requested 70 * @alignment: what to align the allocation to 71 **/ 72 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 73 struct iavf_dma_mem *mem, 74 u64 size, u32 alignment) 75 { 76 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 77 78 if (!mem) 79 return IAVF_ERR_PARAM; 80 81 mem->size = ALIGN(size, alignment); 82 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 83 (dma_addr_t *)&mem->pa, GFP_KERNEL); 84 if (mem->va) 85 return 0; 86 else 87 return IAVF_ERR_NO_MEMORY; 88 } 89 90 /** 91 * iavf_free_dma_mem_d - OS specific memory free for shared code 92 * @hw: pointer to the HW structure 93 * @mem: ptr to mem struct to free 94 **/ 95 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, 96 struct iavf_dma_mem *mem) 97 { 98 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 99 100 if (!mem || !mem->va) 101 return IAVF_ERR_PARAM; 102 dma_free_coherent(&adapter->pdev->dev, mem->size, 103 mem->va, (dma_addr_t)mem->pa); 104 return 0; 105 } 106 107 /** 108 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code 109 * @hw: pointer to the HW structure 110 * @mem: ptr to mem struct to fill out 111 * @size: size of memory requested 112 **/ 113 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw, 114 struct iavf_virt_mem *mem, u32 size) 115 { 116 if (!mem) 117 return IAVF_ERR_PARAM; 118 119 mem->size = size; 120 mem->va = kzalloc(size, GFP_KERNEL); 121 122 if (mem->va) 123 return 0; 124 else 125 return IAVF_ERR_NO_MEMORY; 126 } 127 128 /** 129 * iavf_free_virt_mem_d - OS specific memory free for shared code 130 * @hw: pointer to the HW structure 131 * @mem: ptr to mem struct to free 132 **/ 133 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, 134 struct iavf_virt_mem *mem) 135 { 136 if (!mem) 137 return IAVF_ERR_PARAM; 138 139 /* it's ok to kfree a NULL pointer */ 140 kfree(mem->va); 141 142 return 0; 143 } 144 145 /** 146 * iavf_debug_d - OS dependent version of debug printing 147 * @hw: pointer to the HW structure 148 * @mask: debug level mask 149 * @fmt_str: printf-type format description 150 **/ 151 void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) 152 { 153 char buf[512]; 154 va_list argptr; 155 156 if (!(mask & ((struct iavf_hw *)hw)->debug_mask)) 157 return; 158 159 va_start(argptr, fmt_str); 160 vsnprintf(buf, sizeof(buf), fmt_str, argptr); 161 va_end(argptr); 162 163 /* the debug string is already formatted with a newline */ 164 pr_info("%s", buf); 165 } 166 167 /** 168 * iavf_schedule_reset - Set the flags and schedule a reset event 169 * @adapter: board private structure 170 **/ 171 void iavf_schedule_reset(struct iavf_adapter *adapter) 172 { 173 if (!(adapter->flags & 174 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 175 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 176 queue_work(iavf_wq, &adapter->reset_task); 177 } 178 } 179 180 /** 181 * iavf_tx_timeout - Respond to a Tx Hang 182 * @netdev: network interface device structure 183 **/ 184 static void iavf_tx_timeout(struct net_device *netdev) 185 { 186 struct iavf_adapter *adapter = netdev_priv(netdev); 187 188 adapter->tx_timeout_count++; 189 iavf_schedule_reset(adapter); 190 } 191 192 /** 193 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 194 * @adapter: board private structure 195 **/ 196 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 197 { 198 struct iavf_hw *hw = &adapter->hw; 199 200 if (!adapter->msix_entries) 201 return; 202 203 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 204 205 iavf_flush(hw); 206 207 synchronize_irq(adapter->msix_entries[0].vector); 208 } 209 210 /** 211 * iavf_misc_irq_enable - Enable default interrupt generation settings 212 * @adapter: board private structure 213 **/ 214 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 215 { 216 struct iavf_hw *hw = &adapter->hw; 217 218 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 219 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 220 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 221 222 iavf_flush(hw); 223 } 224 225 /** 226 * iavf_irq_disable - Mask off interrupt generation on the NIC 227 * @adapter: board private structure 228 **/ 229 static void iavf_irq_disable(struct iavf_adapter *adapter) 230 { 231 int i; 232 struct iavf_hw *hw = &adapter->hw; 233 234 if (!adapter->msix_entries) 235 return; 236 237 for (i = 1; i < adapter->num_msix_vectors; i++) { 238 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 239 synchronize_irq(adapter->msix_entries[i].vector); 240 } 241 iavf_flush(hw); 242 } 243 244 /** 245 * iavf_irq_enable_queues - Enable interrupt for specified queues 246 * @adapter: board private structure 247 * @mask: bitmap of queues to enable 248 **/ 249 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask) 250 { 251 struct iavf_hw *hw = &adapter->hw; 252 int i; 253 254 for (i = 1; i < adapter->num_msix_vectors; i++) { 255 if (mask & BIT(i - 1)) { 256 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 257 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 258 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 259 } 260 } 261 } 262 263 /** 264 * iavf_irq_enable - Enable default interrupt generation settings 265 * @adapter: board private structure 266 * @flush: boolean value whether to run rd32() 267 **/ 268 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 269 { 270 struct iavf_hw *hw = &adapter->hw; 271 272 iavf_misc_irq_enable(adapter); 273 iavf_irq_enable_queues(adapter, ~0); 274 275 if (flush) 276 iavf_flush(hw); 277 } 278 279 /** 280 * iavf_msix_aq - Interrupt handler for vector 0 281 * @irq: interrupt number 282 * @data: pointer to netdev 283 **/ 284 static irqreturn_t iavf_msix_aq(int irq, void *data) 285 { 286 struct net_device *netdev = data; 287 struct iavf_adapter *adapter = netdev_priv(netdev); 288 struct iavf_hw *hw = &adapter->hw; 289 290 /* handle non-queue interrupts, these reads clear the registers */ 291 rd32(hw, IAVF_VFINT_ICR01); 292 rd32(hw, IAVF_VFINT_ICR0_ENA1); 293 294 /* schedule work on the private workqueue */ 295 queue_work(iavf_wq, &adapter->adminq_task); 296 297 return IRQ_HANDLED; 298 } 299 300 /** 301 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 302 * @irq: interrupt number 303 * @data: pointer to a q_vector 304 **/ 305 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 306 { 307 struct iavf_q_vector *q_vector = data; 308 309 if (!q_vector->tx.ring && !q_vector->rx.ring) 310 return IRQ_HANDLED; 311 312 napi_schedule_irqoff(&q_vector->napi); 313 314 return IRQ_HANDLED; 315 } 316 317 /** 318 * iavf_map_vector_to_rxq - associate irqs with rx queues 319 * @adapter: board private structure 320 * @v_idx: interrupt number 321 * @r_idx: queue number 322 **/ 323 static void 324 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 325 { 326 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 327 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 328 struct iavf_hw *hw = &adapter->hw; 329 330 rx_ring->q_vector = q_vector; 331 rx_ring->next = q_vector->rx.ring; 332 rx_ring->vsi = &adapter->vsi; 333 q_vector->rx.ring = rx_ring; 334 q_vector->rx.count++; 335 q_vector->rx.next_update = jiffies + 1; 336 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 337 q_vector->ring_mask |= BIT(r_idx); 338 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 339 q_vector->rx.current_itr); 340 q_vector->rx.current_itr = q_vector->rx.target_itr; 341 } 342 343 /** 344 * iavf_map_vector_to_txq - associate irqs with tx queues 345 * @adapter: board private structure 346 * @v_idx: interrupt number 347 * @t_idx: queue number 348 **/ 349 static void 350 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 351 { 352 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 353 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 354 struct iavf_hw *hw = &adapter->hw; 355 356 tx_ring->q_vector = q_vector; 357 tx_ring->next = q_vector->tx.ring; 358 tx_ring->vsi = &adapter->vsi; 359 q_vector->tx.ring = tx_ring; 360 q_vector->tx.count++; 361 q_vector->tx.next_update = jiffies + 1; 362 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 363 q_vector->num_ringpairs++; 364 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 365 q_vector->tx.target_itr); 366 q_vector->tx.current_itr = q_vector->tx.target_itr; 367 } 368 369 /** 370 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 371 * @adapter: board private structure to initialize 372 * 373 * This function maps descriptor rings to the queue-specific vectors 374 * we were allotted through the MSI-X enabling code. Ideally, we'd have 375 * one vector per ring/queue, but on a constrained vector budget, we 376 * group the rings as "efficiently" as possible. You would add new 377 * mapping configurations in here. 378 **/ 379 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 380 { 381 int rings_remaining = adapter->num_active_queues; 382 int ridx = 0, vidx = 0; 383 int q_vectors; 384 385 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 386 387 for (; ridx < rings_remaining; ridx++) { 388 iavf_map_vector_to_rxq(adapter, vidx, ridx); 389 iavf_map_vector_to_txq(adapter, vidx, ridx); 390 391 /* In the case where we have more queues than vectors, continue 392 * round-robin on vectors until all queues are mapped. 393 */ 394 if (++vidx >= q_vectors) 395 vidx = 0; 396 } 397 398 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 399 } 400 401 /** 402 * iavf_irq_affinity_notify - Callback for affinity changes 403 * @notify: context as to what irq was changed 404 * @mask: the new affinity mask 405 * 406 * This is a callback function used by the irq_set_affinity_notifier function 407 * so that we may register to receive changes to the irq affinity masks. 408 **/ 409 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 410 const cpumask_t *mask) 411 { 412 struct iavf_q_vector *q_vector = 413 container_of(notify, struct iavf_q_vector, affinity_notify); 414 415 cpumask_copy(&q_vector->affinity_mask, mask); 416 } 417 418 /** 419 * iavf_irq_affinity_release - Callback for affinity notifier release 420 * @ref: internal core kernel usage 421 * 422 * This is a callback function used by the irq_set_affinity_notifier function 423 * to inform the current notification subscriber that they will no longer 424 * receive notifications. 425 **/ 426 static void iavf_irq_affinity_release(struct kref *ref) {} 427 428 /** 429 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 430 * @adapter: board private structure 431 * @basename: device basename 432 * 433 * Allocates MSI-X vectors for tx and rx handling, and requests 434 * interrupts from the kernel. 435 **/ 436 static int 437 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 438 { 439 unsigned int vector, q_vectors; 440 unsigned int rx_int_idx = 0, tx_int_idx = 0; 441 int irq_num, err; 442 int cpu; 443 444 iavf_irq_disable(adapter); 445 /* Decrement for Other and TCP Timer vectors */ 446 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 447 448 for (vector = 0; vector < q_vectors; vector++) { 449 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 450 451 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 452 453 if (q_vector->tx.ring && q_vector->rx.ring) { 454 snprintf(q_vector->name, sizeof(q_vector->name), 455 "iavf-%s-TxRx-%d", basename, rx_int_idx++); 456 tx_int_idx++; 457 } else if (q_vector->rx.ring) { 458 snprintf(q_vector->name, sizeof(q_vector->name), 459 "iavf-%s-rx-%d", basename, rx_int_idx++); 460 } else if (q_vector->tx.ring) { 461 snprintf(q_vector->name, sizeof(q_vector->name), 462 "iavf-%s-tx-%d", basename, tx_int_idx++); 463 } else { 464 /* skip this unused q_vector */ 465 continue; 466 } 467 err = request_irq(irq_num, 468 iavf_msix_clean_rings, 469 0, 470 q_vector->name, 471 q_vector); 472 if (err) { 473 dev_info(&adapter->pdev->dev, 474 "Request_irq failed, error: %d\n", err); 475 goto free_queue_irqs; 476 } 477 /* register for affinity change notifications */ 478 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 479 q_vector->affinity_notify.release = 480 iavf_irq_affinity_release; 481 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 482 /* Spread the IRQ affinity hints across online CPUs. Note that 483 * get_cpu_mask returns a mask with a permanent lifetime so 484 * it's safe to use as a hint for irq_set_affinity_hint. 485 */ 486 cpu = cpumask_local_spread(q_vector->v_idx, -1); 487 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); 488 } 489 490 return 0; 491 492 free_queue_irqs: 493 while (vector) { 494 vector--; 495 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 496 irq_set_affinity_notifier(irq_num, NULL); 497 irq_set_affinity_hint(irq_num, NULL); 498 free_irq(irq_num, &adapter->q_vectors[vector]); 499 } 500 return err; 501 } 502 503 /** 504 * iavf_request_misc_irq - Initialize MSI-X interrupts 505 * @adapter: board private structure 506 * 507 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 508 * vector is only for the admin queue, and stays active even when the netdev 509 * is closed. 510 **/ 511 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 512 { 513 struct net_device *netdev = adapter->netdev; 514 int err; 515 516 snprintf(adapter->misc_vector_name, 517 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 518 dev_name(&adapter->pdev->dev)); 519 err = request_irq(adapter->msix_entries[0].vector, 520 &iavf_msix_aq, 0, 521 adapter->misc_vector_name, netdev); 522 if (err) { 523 dev_err(&adapter->pdev->dev, 524 "request_irq for %s failed: %d\n", 525 adapter->misc_vector_name, err); 526 free_irq(adapter->msix_entries[0].vector, netdev); 527 } 528 return err; 529 } 530 531 /** 532 * iavf_free_traffic_irqs - Free MSI-X interrupts 533 * @adapter: board private structure 534 * 535 * Frees all MSI-X vectors other than 0. 536 **/ 537 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 538 { 539 int vector, irq_num, q_vectors; 540 541 if (!adapter->msix_entries) 542 return; 543 544 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 545 546 for (vector = 0; vector < q_vectors; vector++) { 547 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 548 irq_set_affinity_notifier(irq_num, NULL); 549 irq_set_affinity_hint(irq_num, NULL); 550 free_irq(irq_num, &adapter->q_vectors[vector]); 551 } 552 } 553 554 /** 555 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 556 * @adapter: board private structure 557 * 558 * Frees MSI-X vector 0. 559 **/ 560 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 561 { 562 struct net_device *netdev = adapter->netdev; 563 564 if (!adapter->msix_entries) 565 return; 566 567 free_irq(adapter->msix_entries[0].vector, netdev); 568 } 569 570 /** 571 * iavf_configure_tx - Configure Transmit Unit after Reset 572 * @adapter: board private structure 573 * 574 * Configure the Tx unit of the MAC after a reset. 575 **/ 576 static void iavf_configure_tx(struct iavf_adapter *adapter) 577 { 578 struct iavf_hw *hw = &adapter->hw; 579 int i; 580 581 for (i = 0; i < adapter->num_active_queues; i++) 582 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 583 } 584 585 /** 586 * iavf_configure_rx - Configure Receive Unit after Reset 587 * @adapter: board private structure 588 * 589 * Configure the Rx unit of the MAC after a reset. 590 **/ 591 static void iavf_configure_rx(struct iavf_adapter *adapter) 592 { 593 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 594 struct iavf_hw *hw = &adapter->hw; 595 int i; 596 597 /* Legacy Rx will always default to a 2048 buffer size. */ 598 #if (PAGE_SIZE < 8192) 599 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 600 struct net_device *netdev = adapter->netdev; 601 602 /* For jumbo frames on systems with 4K pages we have to use 603 * an order 1 page, so we might as well increase the size 604 * of our Rx buffer to make better use of the available space 605 */ 606 rx_buf_len = IAVF_RXBUFFER_3072; 607 608 /* We use a 1536 buffer size for configurations with 609 * standard Ethernet mtu. On x86 this gives us enough room 610 * for shared info and 192 bytes of padding. 611 */ 612 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 613 (netdev->mtu <= ETH_DATA_LEN)) 614 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 615 } 616 #endif 617 618 for (i = 0; i < adapter->num_active_queues; i++) { 619 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 620 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 621 622 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 623 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 624 else 625 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 626 } 627 } 628 629 /** 630 * iavf_find_vlan - Search filter list for specific vlan filter 631 * @adapter: board private structure 632 * @vlan: vlan tag 633 * 634 * Returns ptr to the filter object or NULL. Must be called while holding the 635 * mac_vlan_list_lock. 636 **/ 637 static struct 638 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) 639 { 640 struct iavf_vlan_filter *f; 641 642 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 643 if (vlan == f->vlan) 644 return f; 645 } 646 return NULL; 647 } 648 649 /** 650 * iavf_add_vlan - Add a vlan filter to the list 651 * @adapter: board private structure 652 * @vlan: VLAN tag 653 * 654 * Returns ptr to the filter object or NULL when no memory available. 655 **/ 656 static struct 657 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) 658 { 659 struct iavf_vlan_filter *f = NULL; 660 661 spin_lock_bh(&adapter->mac_vlan_list_lock); 662 663 f = iavf_find_vlan(adapter, vlan); 664 if (!f) { 665 f = kzalloc(sizeof(*f), GFP_ATOMIC); 666 if (!f) 667 goto clearout; 668 669 f->vlan = vlan; 670 671 list_add_tail(&f->list, &adapter->vlan_filter_list); 672 f->add = true; 673 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 674 } 675 676 clearout: 677 spin_unlock_bh(&adapter->mac_vlan_list_lock); 678 return f; 679 } 680 681 /** 682 * iavf_del_vlan - Remove a vlan filter from the list 683 * @adapter: board private structure 684 * @vlan: VLAN tag 685 **/ 686 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) 687 { 688 struct iavf_vlan_filter *f; 689 690 spin_lock_bh(&adapter->mac_vlan_list_lock); 691 692 f = iavf_find_vlan(adapter, vlan); 693 if (f) { 694 f->remove = true; 695 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 696 } 697 698 spin_unlock_bh(&adapter->mac_vlan_list_lock); 699 } 700 701 /** 702 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 703 * @netdev: network device struct 704 * @proto: unused protocol data 705 * @vid: VLAN tag 706 **/ 707 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 708 __always_unused __be16 proto, u16 vid) 709 { 710 struct iavf_adapter *adapter = netdev_priv(netdev); 711 712 if (!VLAN_ALLOWED(adapter)) 713 return -EIO; 714 if (iavf_add_vlan(adapter, vid) == NULL) 715 return -ENOMEM; 716 return 0; 717 } 718 719 /** 720 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 721 * @netdev: network device struct 722 * @proto: unused protocol data 723 * @vid: VLAN tag 724 **/ 725 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 726 __always_unused __be16 proto, u16 vid) 727 { 728 struct iavf_adapter *adapter = netdev_priv(netdev); 729 730 if (VLAN_ALLOWED(adapter)) { 731 iavf_del_vlan(adapter, vid); 732 return 0; 733 } 734 return -EIO; 735 } 736 737 /** 738 * iavf_find_filter - Search filter list for specific mac filter 739 * @adapter: board private structure 740 * @macaddr: the MAC address 741 * 742 * Returns ptr to the filter object or NULL. Must be called while holding the 743 * mac_vlan_list_lock. 744 **/ 745 static struct 746 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 747 const u8 *macaddr) 748 { 749 struct iavf_mac_filter *f; 750 751 if (!macaddr) 752 return NULL; 753 754 list_for_each_entry(f, &adapter->mac_filter_list, list) { 755 if (ether_addr_equal(macaddr, f->macaddr)) 756 return f; 757 } 758 return NULL; 759 } 760 761 /** 762 * iavf_add_filter - Add a mac filter to the filter list 763 * @adapter: board private structure 764 * @macaddr: the MAC address 765 * 766 * Returns ptr to the filter object or NULL when no memory available. 767 **/ 768 static struct 769 iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 770 const u8 *macaddr) 771 { 772 struct iavf_mac_filter *f; 773 774 if (!macaddr) 775 return NULL; 776 777 f = iavf_find_filter(adapter, macaddr); 778 if (!f) { 779 f = kzalloc(sizeof(*f), GFP_ATOMIC); 780 if (!f) 781 return f; 782 783 ether_addr_copy(f->macaddr, macaddr); 784 785 list_add_tail(&f->list, &adapter->mac_filter_list); 786 f->add = true; 787 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 788 } else { 789 f->remove = false; 790 } 791 792 return f; 793 } 794 795 /** 796 * iavf_set_mac - NDO callback to set port mac address 797 * @netdev: network interface device structure 798 * @p: pointer to an address structure 799 * 800 * Returns 0 on success, negative on failure 801 **/ 802 static int iavf_set_mac(struct net_device *netdev, void *p) 803 { 804 struct iavf_adapter *adapter = netdev_priv(netdev); 805 struct iavf_hw *hw = &adapter->hw; 806 struct iavf_mac_filter *f; 807 struct sockaddr *addr = p; 808 809 if (!is_valid_ether_addr(addr->sa_data)) 810 return -EADDRNOTAVAIL; 811 812 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) 813 return 0; 814 815 if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF) 816 return -EPERM; 817 818 spin_lock_bh(&adapter->mac_vlan_list_lock); 819 820 f = iavf_find_filter(adapter, hw->mac.addr); 821 if (f) { 822 f->remove = true; 823 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 824 } 825 826 f = iavf_add_filter(adapter, addr->sa_data); 827 828 spin_unlock_bh(&adapter->mac_vlan_list_lock); 829 830 if (f) { 831 ether_addr_copy(hw->mac.addr, addr->sa_data); 832 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 833 } 834 835 return (f == NULL) ? -ENOMEM : 0; 836 } 837 838 /** 839 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 840 * @netdev: the netdevice 841 * @addr: address to add 842 * 843 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 844 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 845 */ 846 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 847 { 848 struct iavf_adapter *adapter = netdev_priv(netdev); 849 850 if (iavf_add_filter(adapter, addr)) 851 return 0; 852 else 853 return -ENOMEM; 854 } 855 856 /** 857 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 858 * @netdev: the netdevice 859 * @addr: address to add 860 * 861 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 862 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 863 */ 864 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 865 { 866 struct iavf_adapter *adapter = netdev_priv(netdev); 867 struct iavf_mac_filter *f; 868 869 /* Under some circumstances, we might receive a request to delete 870 * our own device address from our uc list. Because we store the 871 * device address in the VSI's MAC/VLAN filter list, we need to ignore 872 * such requests and not delete our device address from this list. 873 */ 874 if (ether_addr_equal(addr, netdev->dev_addr)) 875 return 0; 876 877 f = iavf_find_filter(adapter, addr); 878 if (f) { 879 f->remove = true; 880 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 881 } 882 return 0; 883 } 884 885 /** 886 * iavf_set_rx_mode - NDO callback to set the netdev filters 887 * @netdev: network interface device structure 888 **/ 889 static void iavf_set_rx_mode(struct net_device *netdev) 890 { 891 struct iavf_adapter *adapter = netdev_priv(netdev); 892 893 spin_lock_bh(&adapter->mac_vlan_list_lock); 894 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 895 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 896 spin_unlock_bh(&adapter->mac_vlan_list_lock); 897 898 if (netdev->flags & IFF_PROMISC && 899 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 900 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 901 else if (!(netdev->flags & IFF_PROMISC) && 902 adapter->flags & IAVF_FLAG_PROMISC_ON) 903 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 904 905 if (netdev->flags & IFF_ALLMULTI && 906 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 907 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 908 else if (!(netdev->flags & IFF_ALLMULTI) && 909 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 910 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 911 } 912 913 /** 914 * iavf_napi_enable_all - enable NAPI on all queue vectors 915 * @adapter: board private structure 916 **/ 917 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 918 { 919 int q_idx; 920 struct iavf_q_vector *q_vector; 921 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 922 923 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 924 struct napi_struct *napi; 925 926 q_vector = &adapter->q_vectors[q_idx]; 927 napi = &q_vector->napi; 928 napi_enable(napi); 929 } 930 } 931 932 /** 933 * iavf_napi_disable_all - disable NAPI on all queue vectors 934 * @adapter: board private structure 935 **/ 936 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 937 { 938 int q_idx; 939 struct iavf_q_vector *q_vector; 940 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 941 942 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 943 q_vector = &adapter->q_vectors[q_idx]; 944 napi_disable(&q_vector->napi); 945 } 946 } 947 948 /** 949 * iavf_configure - set up transmit and receive data structures 950 * @adapter: board private structure 951 **/ 952 static void iavf_configure(struct iavf_adapter *adapter) 953 { 954 struct net_device *netdev = adapter->netdev; 955 int i; 956 957 iavf_set_rx_mode(netdev); 958 959 iavf_configure_tx(adapter); 960 iavf_configure_rx(adapter); 961 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 962 963 for (i = 0; i < adapter->num_active_queues; i++) { 964 struct iavf_ring *ring = &adapter->rx_rings[i]; 965 966 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 967 } 968 } 969 970 /** 971 * iavf_up_complete - Finish the last steps of bringing up a connection 972 * @adapter: board private structure 973 * 974 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 975 **/ 976 static void iavf_up_complete(struct iavf_adapter *adapter) 977 { 978 adapter->state = __IAVF_RUNNING; 979 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 980 981 iavf_napi_enable_all(adapter); 982 983 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 984 if (CLIENT_ENABLED(adapter)) 985 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 986 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 987 } 988 989 /** 990 * iavf_down - Shutdown the connection processing 991 * @adapter: board private structure 992 * 993 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 994 **/ 995 void iavf_down(struct iavf_adapter *adapter) 996 { 997 struct net_device *netdev = adapter->netdev; 998 struct iavf_vlan_filter *vlf; 999 struct iavf_mac_filter *f; 1000 struct iavf_cloud_filter *cf; 1001 1002 if (adapter->state <= __IAVF_DOWN_PENDING) 1003 return; 1004 1005 netif_carrier_off(netdev); 1006 netif_tx_disable(netdev); 1007 adapter->link_up = false; 1008 iavf_napi_disable_all(adapter); 1009 iavf_irq_disable(adapter); 1010 1011 spin_lock_bh(&adapter->mac_vlan_list_lock); 1012 1013 /* clear the sync flag on all filters */ 1014 __dev_uc_unsync(adapter->netdev, NULL); 1015 __dev_mc_unsync(adapter->netdev, NULL); 1016 1017 /* remove all MAC filters */ 1018 list_for_each_entry(f, &adapter->mac_filter_list, list) { 1019 f->remove = true; 1020 } 1021 1022 /* remove all VLAN filters */ 1023 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1024 vlf->remove = true; 1025 } 1026 1027 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1028 1029 /* remove all cloud filters */ 1030 spin_lock_bh(&adapter->cloud_filter_list_lock); 1031 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1032 cf->del = true; 1033 } 1034 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1035 1036 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && 1037 adapter->state != __IAVF_RESETTING) { 1038 /* cancel any current operation */ 1039 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1040 /* Schedule operations to close down the HW. Don't wait 1041 * here for this to complete. The watchdog is still running 1042 * and it will take care of this. 1043 */ 1044 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; 1045 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1046 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1047 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1048 } 1049 1050 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1051 } 1052 1053 /** 1054 * iavf_acquire_msix_vectors - Setup the MSIX capability 1055 * @adapter: board private structure 1056 * @vectors: number of vectors to request 1057 * 1058 * Work with the OS to set up the MSIX vectors needed. 1059 * 1060 * Returns 0 on success, negative on failure 1061 **/ 1062 static int 1063 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1064 { 1065 int err, vector_threshold; 1066 1067 /* We'll want at least 3 (vector_threshold): 1068 * 0) Other (Admin Queue and link, mostly) 1069 * 1) TxQ[0] Cleanup 1070 * 2) RxQ[0] Cleanup 1071 */ 1072 vector_threshold = MIN_MSIX_COUNT; 1073 1074 /* The more we get, the more we will assign to Tx/Rx Cleanup 1075 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1076 * Right now, we simply care about how many we'll get; we'll 1077 * set them up later while requesting irq's. 1078 */ 1079 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1080 vector_threshold, vectors); 1081 if (err < 0) { 1082 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1083 kfree(adapter->msix_entries); 1084 adapter->msix_entries = NULL; 1085 return err; 1086 } 1087 1088 /* Adjust for only the vectors we'll use, which is minimum 1089 * of max_msix_q_vectors + NONQ_VECS, or the number of 1090 * vectors we were allocated. 1091 */ 1092 adapter->num_msix_vectors = err; 1093 return 0; 1094 } 1095 1096 /** 1097 * iavf_free_queues - Free memory for all rings 1098 * @adapter: board private structure to initialize 1099 * 1100 * Free all of the memory associated with queue pairs. 1101 **/ 1102 static void iavf_free_queues(struct iavf_adapter *adapter) 1103 { 1104 if (!adapter->vsi_res) 1105 return; 1106 adapter->num_active_queues = 0; 1107 kfree(adapter->tx_rings); 1108 adapter->tx_rings = NULL; 1109 kfree(adapter->rx_rings); 1110 adapter->rx_rings = NULL; 1111 } 1112 1113 /** 1114 * iavf_alloc_queues - Allocate memory for all rings 1115 * @adapter: board private structure to initialize 1116 * 1117 * We allocate one ring per queue at run-time since we don't know the 1118 * number of queues at compile-time. The polling_netdev array is 1119 * intended for Multiqueue, but should work fine with a single queue. 1120 **/ 1121 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1122 { 1123 int i, num_active_queues; 1124 1125 /* If we're in reset reallocating queues we don't actually know yet for 1126 * certain the PF gave us the number of queues we asked for but we'll 1127 * assume it did. Once basic reset is finished we'll confirm once we 1128 * start negotiating config with PF. 1129 */ 1130 if (adapter->num_req_queues) 1131 num_active_queues = adapter->num_req_queues; 1132 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1133 adapter->num_tc) 1134 num_active_queues = adapter->ch_config.total_qps; 1135 else 1136 num_active_queues = min_t(int, 1137 adapter->vsi_res->num_queue_pairs, 1138 (int)(num_online_cpus())); 1139 1140 1141 adapter->tx_rings = kcalloc(num_active_queues, 1142 sizeof(struct iavf_ring), GFP_KERNEL); 1143 if (!adapter->tx_rings) 1144 goto err_out; 1145 adapter->rx_rings = kcalloc(num_active_queues, 1146 sizeof(struct iavf_ring), GFP_KERNEL); 1147 if (!adapter->rx_rings) 1148 goto err_out; 1149 1150 for (i = 0; i < num_active_queues; i++) { 1151 struct iavf_ring *tx_ring; 1152 struct iavf_ring *rx_ring; 1153 1154 tx_ring = &adapter->tx_rings[i]; 1155 1156 tx_ring->queue_index = i; 1157 tx_ring->netdev = adapter->netdev; 1158 tx_ring->dev = &adapter->pdev->dev; 1159 tx_ring->count = adapter->tx_desc_count; 1160 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1161 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1162 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1163 1164 rx_ring = &adapter->rx_rings[i]; 1165 rx_ring->queue_index = i; 1166 rx_ring->netdev = adapter->netdev; 1167 rx_ring->dev = &adapter->pdev->dev; 1168 rx_ring->count = adapter->rx_desc_count; 1169 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1170 } 1171 1172 adapter->num_active_queues = num_active_queues; 1173 1174 return 0; 1175 1176 err_out: 1177 iavf_free_queues(adapter); 1178 return -ENOMEM; 1179 } 1180 1181 /** 1182 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1183 * @adapter: board private structure to initialize 1184 * 1185 * Attempt to configure the interrupts using the best available 1186 * capabilities of the hardware and the kernel. 1187 **/ 1188 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1189 { 1190 int vector, v_budget; 1191 int pairs = 0; 1192 int err = 0; 1193 1194 if (!adapter->vsi_res) { 1195 err = -EIO; 1196 goto out; 1197 } 1198 pairs = adapter->num_active_queues; 1199 1200 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1201 * us much good if we have more vectors than CPUs. However, we already 1202 * limit the total number of queues by the number of CPUs so we do not 1203 * need any further limiting here. 1204 */ 1205 v_budget = min_t(int, pairs + NONQ_VECS, 1206 (int)adapter->vf_res->max_vectors); 1207 1208 adapter->msix_entries = kcalloc(v_budget, 1209 sizeof(struct msix_entry), GFP_KERNEL); 1210 if (!adapter->msix_entries) { 1211 err = -ENOMEM; 1212 goto out; 1213 } 1214 1215 for (vector = 0; vector < v_budget; vector++) 1216 adapter->msix_entries[vector].entry = vector; 1217 1218 err = iavf_acquire_msix_vectors(adapter, v_budget); 1219 1220 out: 1221 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1222 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1223 return err; 1224 } 1225 1226 /** 1227 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1228 * @adapter: board private structure 1229 * 1230 * Return 0 on success, negative on failure 1231 **/ 1232 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1233 { 1234 struct iavf_aqc_get_set_rss_key_data *rss_key = 1235 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1236 struct iavf_hw *hw = &adapter->hw; 1237 int ret = 0; 1238 1239 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1240 /* bail because we already have a command pending */ 1241 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1242 adapter->current_op); 1243 return -EBUSY; 1244 } 1245 1246 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1247 if (ret) { 1248 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1249 iavf_stat_str(hw, ret), 1250 iavf_aq_str(hw, hw->aq.asq_last_status)); 1251 return ret; 1252 1253 } 1254 1255 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1256 adapter->rss_lut, adapter->rss_lut_size); 1257 if (ret) { 1258 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1259 iavf_stat_str(hw, ret), 1260 iavf_aq_str(hw, hw->aq.asq_last_status)); 1261 } 1262 1263 return ret; 1264 1265 } 1266 1267 /** 1268 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1269 * @adapter: board private structure 1270 * 1271 * Returns 0 on success, negative on failure 1272 **/ 1273 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1274 { 1275 struct iavf_hw *hw = &adapter->hw; 1276 u32 *dw; 1277 u16 i; 1278 1279 dw = (u32 *)adapter->rss_key; 1280 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1281 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1282 1283 dw = (u32 *)adapter->rss_lut; 1284 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1285 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1286 1287 iavf_flush(hw); 1288 1289 return 0; 1290 } 1291 1292 /** 1293 * iavf_config_rss - Configure RSS keys and lut 1294 * @adapter: board private structure 1295 * 1296 * Returns 0 on success, negative on failure 1297 **/ 1298 int iavf_config_rss(struct iavf_adapter *adapter) 1299 { 1300 1301 if (RSS_PF(adapter)) { 1302 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1303 IAVF_FLAG_AQ_SET_RSS_KEY; 1304 return 0; 1305 } else if (RSS_AQ(adapter)) { 1306 return iavf_config_rss_aq(adapter); 1307 } else { 1308 return iavf_config_rss_reg(adapter); 1309 } 1310 } 1311 1312 /** 1313 * iavf_fill_rss_lut - Fill the lut with default values 1314 * @adapter: board private structure 1315 **/ 1316 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1317 { 1318 u16 i; 1319 1320 for (i = 0; i < adapter->rss_lut_size; i++) 1321 adapter->rss_lut[i] = i % adapter->num_active_queues; 1322 } 1323 1324 /** 1325 * iavf_init_rss - Prepare for RSS 1326 * @adapter: board private structure 1327 * 1328 * Return 0 on success, negative on failure 1329 **/ 1330 static int iavf_init_rss(struct iavf_adapter *adapter) 1331 { 1332 struct iavf_hw *hw = &adapter->hw; 1333 int ret; 1334 1335 if (!RSS_PF(adapter)) { 1336 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1337 if (adapter->vf_res->vf_cap_flags & 1338 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1339 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1340 else 1341 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1342 1343 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1344 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1345 } 1346 1347 iavf_fill_rss_lut(adapter); 1348 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1349 ret = iavf_config_rss(adapter); 1350 1351 return ret; 1352 } 1353 1354 /** 1355 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1356 * @adapter: board private structure to initialize 1357 * 1358 * We allocate one q_vector per queue interrupt. If allocation fails we 1359 * return -ENOMEM. 1360 **/ 1361 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1362 { 1363 int q_idx = 0, num_q_vectors; 1364 struct iavf_q_vector *q_vector; 1365 1366 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1367 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1368 GFP_KERNEL); 1369 if (!adapter->q_vectors) 1370 return -ENOMEM; 1371 1372 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1373 q_vector = &adapter->q_vectors[q_idx]; 1374 q_vector->adapter = adapter; 1375 q_vector->vsi = &adapter->vsi; 1376 q_vector->v_idx = q_idx; 1377 q_vector->reg_idx = q_idx; 1378 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1379 netif_napi_add(adapter->netdev, &q_vector->napi, 1380 iavf_napi_poll, NAPI_POLL_WEIGHT); 1381 } 1382 1383 return 0; 1384 } 1385 1386 /** 1387 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1388 * @adapter: board private structure to initialize 1389 * 1390 * This function frees the memory allocated to the q_vectors. In addition if 1391 * NAPI is enabled it will delete any references to the NAPI struct prior 1392 * to freeing the q_vector. 1393 **/ 1394 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1395 { 1396 int q_idx, num_q_vectors; 1397 int napi_vectors; 1398 1399 if (!adapter->q_vectors) 1400 return; 1401 1402 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1403 napi_vectors = adapter->num_active_queues; 1404 1405 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1406 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1407 1408 if (q_idx < napi_vectors) 1409 netif_napi_del(&q_vector->napi); 1410 } 1411 kfree(adapter->q_vectors); 1412 adapter->q_vectors = NULL; 1413 } 1414 1415 /** 1416 * iavf_reset_interrupt_capability - Reset MSIX setup 1417 * @adapter: board private structure 1418 * 1419 **/ 1420 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1421 { 1422 if (!adapter->msix_entries) 1423 return; 1424 1425 pci_disable_msix(adapter->pdev); 1426 kfree(adapter->msix_entries); 1427 adapter->msix_entries = NULL; 1428 } 1429 1430 /** 1431 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1432 * @adapter: board private structure to initialize 1433 * 1434 **/ 1435 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1436 { 1437 int err; 1438 1439 err = iavf_alloc_queues(adapter); 1440 if (err) { 1441 dev_err(&adapter->pdev->dev, 1442 "Unable to allocate memory for queues\n"); 1443 goto err_alloc_queues; 1444 } 1445 1446 rtnl_lock(); 1447 err = iavf_set_interrupt_capability(adapter); 1448 rtnl_unlock(); 1449 if (err) { 1450 dev_err(&adapter->pdev->dev, 1451 "Unable to setup interrupt capabilities\n"); 1452 goto err_set_interrupt; 1453 } 1454 1455 err = iavf_alloc_q_vectors(adapter); 1456 if (err) { 1457 dev_err(&adapter->pdev->dev, 1458 "Unable to allocate memory for queue vectors\n"); 1459 goto err_alloc_q_vectors; 1460 } 1461 1462 /* If we've made it so far while ADq flag being ON, then we haven't 1463 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1464 * resources have been allocated in the reset path. 1465 * Now we can truly claim that ADq is enabled. 1466 */ 1467 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1468 adapter->num_tc) 1469 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1470 adapter->num_tc); 1471 1472 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1473 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1474 adapter->num_active_queues); 1475 1476 return 0; 1477 err_alloc_q_vectors: 1478 iavf_reset_interrupt_capability(adapter); 1479 err_set_interrupt: 1480 iavf_free_queues(adapter); 1481 err_alloc_queues: 1482 return err; 1483 } 1484 1485 /** 1486 * iavf_free_rss - Free memory used by RSS structs 1487 * @adapter: board private structure 1488 **/ 1489 static void iavf_free_rss(struct iavf_adapter *adapter) 1490 { 1491 kfree(adapter->rss_key); 1492 adapter->rss_key = NULL; 1493 1494 kfree(adapter->rss_lut); 1495 adapter->rss_lut = NULL; 1496 } 1497 1498 /** 1499 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1500 * @adapter: board private structure 1501 * 1502 * Returns 0 on success, negative on failure 1503 **/ 1504 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1505 { 1506 struct net_device *netdev = adapter->netdev; 1507 int err; 1508 1509 if (netif_running(netdev)) 1510 iavf_free_traffic_irqs(adapter); 1511 iavf_free_misc_irq(adapter); 1512 iavf_reset_interrupt_capability(adapter); 1513 iavf_free_q_vectors(adapter); 1514 iavf_free_queues(adapter); 1515 1516 err = iavf_init_interrupt_scheme(adapter); 1517 if (err) 1518 goto err; 1519 1520 netif_tx_stop_all_queues(netdev); 1521 1522 err = iavf_request_misc_irq(adapter); 1523 if (err) 1524 goto err; 1525 1526 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1527 1528 iavf_map_rings_to_vectors(adapter); 1529 1530 if (RSS_AQ(adapter)) 1531 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1532 else 1533 err = iavf_init_rss(adapter); 1534 err: 1535 return err; 1536 } 1537 1538 /** 1539 * iavf_process_aq_command - process aq_required flags 1540 * and sends aq command 1541 * @adapter: pointer to iavf adapter structure 1542 * 1543 * Returns 0 on success 1544 * Returns error code if no command was sent 1545 * or error code if the command failed. 1546 **/ 1547 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1548 { 1549 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1550 return iavf_send_vf_config_msg(adapter); 1551 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1552 iavf_disable_queues(adapter); 1553 return 0; 1554 } 1555 1556 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1557 iavf_map_queues(adapter); 1558 return 0; 1559 } 1560 1561 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1562 iavf_add_ether_addrs(adapter); 1563 return 0; 1564 } 1565 1566 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1567 iavf_add_vlans(adapter); 1568 return 0; 1569 } 1570 1571 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 1572 iavf_del_ether_addrs(adapter); 1573 return 0; 1574 } 1575 1576 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 1577 iavf_del_vlans(adapter); 1578 return 0; 1579 } 1580 1581 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 1582 iavf_enable_vlan_stripping(adapter); 1583 return 0; 1584 } 1585 1586 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 1587 iavf_disable_vlan_stripping(adapter); 1588 return 0; 1589 } 1590 1591 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 1592 iavf_configure_queues(adapter); 1593 return 0; 1594 } 1595 1596 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 1597 iavf_enable_queues(adapter); 1598 return 0; 1599 } 1600 1601 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 1602 /* This message goes straight to the firmware, not the 1603 * PF, so we don't have to set current_op as we will 1604 * not get a response through the ARQ. 1605 */ 1606 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 1607 return 0; 1608 } 1609 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 1610 iavf_get_hena(adapter); 1611 return 0; 1612 } 1613 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 1614 iavf_set_hena(adapter); 1615 return 0; 1616 } 1617 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 1618 iavf_set_rss_key(adapter); 1619 return 0; 1620 } 1621 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 1622 iavf_set_rss_lut(adapter); 1623 return 0; 1624 } 1625 1626 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 1627 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 1628 FLAG_VF_MULTICAST_PROMISC); 1629 return 0; 1630 } 1631 1632 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 1633 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 1634 return 0; 1635 } 1636 1637 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && 1638 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 1639 iavf_set_promiscuous(adapter, 0); 1640 return 0; 1641 } 1642 1643 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 1644 iavf_enable_channels(adapter); 1645 return 0; 1646 } 1647 1648 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 1649 iavf_disable_channels(adapter); 1650 return 0; 1651 } 1652 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1653 iavf_add_cloud_filter(adapter); 1654 return 0; 1655 } 1656 1657 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1658 iavf_del_cloud_filter(adapter); 1659 return 0; 1660 } 1661 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1662 iavf_del_cloud_filter(adapter); 1663 return 0; 1664 } 1665 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1666 iavf_add_cloud_filter(adapter); 1667 return 0; 1668 } 1669 return -EAGAIN; 1670 } 1671 1672 /** 1673 * iavf_startup - first step of driver startup 1674 * @adapter: board private structure 1675 * 1676 * Function process __IAVF_STARTUP driver state. 1677 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 1678 * when fails it returns -EAGAIN 1679 **/ 1680 static int iavf_startup(struct iavf_adapter *adapter) 1681 { 1682 struct pci_dev *pdev = adapter->pdev; 1683 struct iavf_hw *hw = &adapter->hw; 1684 int err; 1685 1686 WARN_ON(adapter->state != __IAVF_STARTUP); 1687 1688 /* driver loaded, probe complete */ 1689 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1690 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 1691 err = iavf_set_mac_type(hw); 1692 if (err) { 1693 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err); 1694 goto err; 1695 } 1696 1697 err = iavf_check_reset_complete(hw); 1698 if (err) { 1699 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 1700 err); 1701 goto err; 1702 } 1703 hw->aq.num_arq_entries = IAVF_AQ_LEN; 1704 hw->aq.num_asq_entries = IAVF_AQ_LEN; 1705 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1706 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 1707 1708 err = iavf_init_adminq(hw); 1709 if (err) { 1710 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err); 1711 goto err; 1712 } 1713 err = iavf_send_api_ver(adapter); 1714 if (err) { 1715 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); 1716 iavf_shutdown_adminq(hw); 1717 goto err; 1718 } 1719 adapter->state = __IAVF_INIT_VERSION_CHECK; 1720 err: 1721 return err; 1722 } 1723 1724 /** 1725 * iavf_init_version_check - second step of driver startup 1726 * @adapter: board private structure 1727 * 1728 * Function process __IAVF_INIT_VERSION_CHECK driver state. 1729 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 1730 * when fails it returns -EAGAIN 1731 **/ 1732 static int iavf_init_version_check(struct iavf_adapter *adapter) 1733 { 1734 struct pci_dev *pdev = adapter->pdev; 1735 struct iavf_hw *hw = &adapter->hw; 1736 int err = -EAGAIN; 1737 1738 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 1739 1740 if (!iavf_asq_done(hw)) { 1741 dev_err(&pdev->dev, "Admin queue command never completed\n"); 1742 iavf_shutdown_adminq(hw); 1743 adapter->state = __IAVF_STARTUP; 1744 goto err; 1745 } 1746 1747 /* aq msg sent, awaiting reply */ 1748 err = iavf_verify_api_ver(adapter); 1749 if (err) { 1750 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) 1751 err = iavf_send_api_ver(adapter); 1752 else 1753 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 1754 adapter->pf_version.major, 1755 adapter->pf_version.minor, 1756 VIRTCHNL_VERSION_MAJOR, 1757 VIRTCHNL_VERSION_MINOR); 1758 goto err; 1759 } 1760 err = iavf_send_vf_config_msg(adapter); 1761 if (err) { 1762 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 1763 err); 1764 goto err; 1765 } 1766 adapter->state = __IAVF_INIT_GET_RESOURCES; 1767 1768 err: 1769 return err; 1770 } 1771 1772 /** 1773 * iavf_init_get_resources - third step of driver startup 1774 * @adapter: board private structure 1775 * 1776 * Function process __IAVF_INIT_GET_RESOURCES driver state and 1777 * finishes driver initialization procedure. 1778 * When success the state is changed to __IAVF_DOWN 1779 * when fails it returns -EAGAIN 1780 **/ 1781 static int iavf_init_get_resources(struct iavf_adapter *adapter) 1782 { 1783 struct net_device *netdev = adapter->netdev; 1784 struct pci_dev *pdev = adapter->pdev; 1785 struct iavf_hw *hw = &adapter->hw; 1786 int err = 0, bufsz; 1787 1788 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 1789 /* aq msg sent, awaiting reply */ 1790 if (!adapter->vf_res) { 1791 bufsz = sizeof(struct virtchnl_vf_resource) + 1792 (IAVF_MAX_VF_VSI * 1793 sizeof(struct virtchnl_vsi_resource)); 1794 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); 1795 if (!adapter->vf_res) 1796 goto err; 1797 } 1798 err = iavf_get_vf_config(adapter); 1799 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { 1800 err = iavf_send_vf_config_msg(adapter); 1801 goto err; 1802 } else if (err == IAVF_ERR_PARAM) { 1803 /* We only get ERR_PARAM if the device is in a very bad 1804 * state or if we've been disabled for previous bad 1805 * behavior. Either way, we're done now. 1806 */ 1807 iavf_shutdown_adminq(hw); 1808 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 1809 return 0; 1810 } 1811 if (err) { 1812 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 1813 goto err_alloc; 1814 } 1815 1816 if (iavf_process_config(adapter)) 1817 goto err_alloc; 1818 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1819 1820 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 1821 1822 netdev->netdev_ops = &iavf_netdev_ops; 1823 iavf_set_ethtool_ops(netdev); 1824 netdev->watchdog_timeo = 5 * HZ; 1825 1826 /* MTU range: 68 - 9710 */ 1827 netdev->min_mtu = ETH_MIN_MTU; 1828 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 1829 1830 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1831 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1832 adapter->hw.mac.addr); 1833 eth_hw_addr_random(netdev); 1834 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 1835 } else { 1836 adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; 1837 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); 1838 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 1839 } 1840 1841 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 1842 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 1843 err = iavf_init_interrupt_scheme(adapter); 1844 if (err) 1845 goto err_sw_init; 1846 iavf_map_rings_to_vectors(adapter); 1847 if (adapter->vf_res->vf_cap_flags & 1848 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 1849 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 1850 1851 err = iavf_request_misc_irq(adapter); 1852 if (err) 1853 goto err_sw_init; 1854 1855 netif_carrier_off(netdev); 1856 adapter->link_up = false; 1857 1858 /* set the semaphore to prevent any callbacks after device registration 1859 * up to time when state of driver will be set to __IAVF_DOWN 1860 */ 1861 rtnl_lock(); 1862 if (!adapter->netdev_registered) { 1863 err = register_netdevice(netdev); 1864 if (err) { 1865 rtnl_unlock(); 1866 goto err_register; 1867 } 1868 } 1869 1870 adapter->netdev_registered = true; 1871 1872 netif_tx_stop_all_queues(netdev); 1873 if (CLIENT_ALLOWED(adapter)) { 1874 err = iavf_lan_add_device(adapter); 1875 if (err) { 1876 rtnl_unlock(); 1877 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 1878 err); 1879 } 1880 } 1881 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 1882 if (netdev->features & NETIF_F_GRO) 1883 dev_info(&pdev->dev, "GRO is enabled\n"); 1884 1885 adapter->state = __IAVF_DOWN; 1886 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1887 rtnl_unlock(); 1888 1889 iavf_misc_irq_enable(adapter); 1890 wake_up(&adapter->down_waitqueue); 1891 1892 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 1893 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 1894 if (!adapter->rss_key || !adapter->rss_lut) 1895 goto err_mem; 1896 if (RSS_AQ(adapter)) 1897 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 1898 else 1899 iavf_init_rss(adapter); 1900 1901 return err; 1902 err_mem: 1903 iavf_free_rss(adapter); 1904 err_register: 1905 iavf_free_misc_irq(adapter); 1906 err_sw_init: 1907 iavf_reset_interrupt_capability(adapter); 1908 err_alloc: 1909 kfree(adapter->vf_res); 1910 adapter->vf_res = NULL; 1911 err: 1912 return err; 1913 } 1914 1915 /** 1916 * iavf_watchdog_task - Periodic call-back task 1917 * @work: pointer to work_struct 1918 **/ 1919 static void iavf_watchdog_task(struct work_struct *work) 1920 { 1921 struct iavf_adapter *adapter = container_of(work, 1922 struct iavf_adapter, 1923 watchdog_task.work); 1924 struct iavf_hw *hw = &adapter->hw; 1925 u32 reg_val; 1926 1927 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1928 goto restart_watchdog; 1929 1930 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 1931 adapter->state = __IAVF_COMM_FAILED; 1932 1933 switch (adapter->state) { 1934 case __IAVF_COMM_FAILED: 1935 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 1936 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1937 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 1938 reg_val == VIRTCHNL_VFR_COMPLETED) { 1939 /* A chance for redemption! */ 1940 dev_err(&adapter->pdev->dev, 1941 "Hardware came out of reset. Attempting reinit.\n"); 1942 adapter->state = __IAVF_STARTUP; 1943 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 1944 queue_delayed_work(iavf_wq, &adapter->init_task, 10); 1945 clear_bit(__IAVF_IN_CRITICAL_TASK, 1946 &adapter->crit_section); 1947 /* Don't reschedule the watchdog, since we've restarted 1948 * the init task. When init_task contacts the PF and 1949 * gets everything set up again, it'll restart the 1950 * watchdog for us. Down, boy. Sit. Stay. Woof. 1951 */ 1952 return; 1953 } 1954 adapter->aq_required = 0; 1955 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1956 clear_bit(__IAVF_IN_CRITICAL_TASK, 1957 &adapter->crit_section); 1958 queue_delayed_work(iavf_wq, 1959 &adapter->watchdog_task, 1960 msecs_to_jiffies(10)); 1961 goto watchdog_done; 1962 case __IAVF_RESETTING: 1963 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1964 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 1965 return; 1966 case __IAVF_DOWN: 1967 case __IAVF_DOWN_PENDING: 1968 case __IAVF_TESTING: 1969 case __IAVF_RUNNING: 1970 if (adapter->current_op) { 1971 if (!iavf_asq_done(hw)) { 1972 dev_dbg(&adapter->pdev->dev, 1973 "Admin queue timeout\n"); 1974 iavf_send_api_ver(adapter); 1975 } 1976 } else { 1977 if (!iavf_process_aq_command(adapter) && 1978 adapter->state == __IAVF_RUNNING) 1979 iavf_request_stats(adapter); 1980 } 1981 break; 1982 case __IAVF_REMOVE: 1983 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 1984 return; 1985 default: 1986 goto restart_watchdog; 1987 } 1988 1989 /* check for hw reset */ 1990 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 1991 if (!reg_val) { 1992 adapter->state = __IAVF_RESETTING; 1993 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1994 adapter->aq_required = 0; 1995 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1996 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 1997 queue_work(iavf_wq, &adapter->reset_task); 1998 goto watchdog_done; 1999 } 2000 2001 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2002 watchdog_done: 2003 if (adapter->state == __IAVF_RUNNING || 2004 adapter->state == __IAVF_COMM_FAILED) 2005 iavf_detect_recover_hung(&adapter->vsi); 2006 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2007 restart_watchdog: 2008 if (adapter->aq_required) 2009 queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2010 msecs_to_jiffies(20)); 2011 else 2012 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2013 queue_work(iavf_wq, &adapter->adminq_task); 2014 } 2015 2016 static void iavf_disable_vf(struct iavf_adapter *adapter) 2017 { 2018 struct iavf_mac_filter *f, *ftmp; 2019 struct iavf_vlan_filter *fv, *fvtmp; 2020 struct iavf_cloud_filter *cf, *cftmp; 2021 2022 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2023 2024 /* We don't use netif_running() because it may be true prior to 2025 * ndo_open() returning, so we can't assume it means all our open 2026 * tasks have finished, since we're not holding the rtnl_lock here. 2027 */ 2028 if (adapter->state == __IAVF_RUNNING) { 2029 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2030 netif_carrier_off(adapter->netdev); 2031 netif_tx_disable(adapter->netdev); 2032 adapter->link_up = false; 2033 iavf_napi_disable_all(adapter); 2034 iavf_irq_disable(adapter); 2035 iavf_free_traffic_irqs(adapter); 2036 iavf_free_all_tx_resources(adapter); 2037 iavf_free_all_rx_resources(adapter); 2038 } 2039 2040 spin_lock_bh(&adapter->mac_vlan_list_lock); 2041 2042 /* Delete all of the filters */ 2043 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2044 list_del(&f->list); 2045 kfree(f); 2046 } 2047 2048 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2049 list_del(&fv->list); 2050 kfree(fv); 2051 } 2052 2053 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2054 2055 spin_lock_bh(&adapter->cloud_filter_list_lock); 2056 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2057 list_del(&cf->list); 2058 kfree(cf); 2059 adapter->num_cloud_filters--; 2060 } 2061 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2062 2063 iavf_free_misc_irq(adapter); 2064 iavf_reset_interrupt_capability(adapter); 2065 iavf_free_queues(adapter); 2066 iavf_free_q_vectors(adapter); 2067 kfree(adapter->vf_res); 2068 iavf_shutdown_adminq(&adapter->hw); 2069 adapter->netdev->flags &= ~IFF_UP; 2070 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2071 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2072 adapter->state = __IAVF_DOWN; 2073 wake_up(&adapter->down_waitqueue); 2074 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2075 } 2076 2077 #define IAVF_RESET_WAIT_MS 10 2078 #define IAVF_RESET_WAIT_COUNT 500 2079 /** 2080 * iavf_reset_task - Call-back task to handle hardware reset 2081 * @work: pointer to work_struct 2082 * 2083 * During reset we need to shut down and reinitialize the admin queue 2084 * before we can use it to communicate with the PF again. We also clear 2085 * and reinit the rings because that context is lost as well. 2086 **/ 2087 static void iavf_reset_task(struct work_struct *work) 2088 { 2089 struct iavf_adapter *adapter = container_of(work, 2090 struct iavf_adapter, 2091 reset_task); 2092 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2093 struct net_device *netdev = adapter->netdev; 2094 struct iavf_hw *hw = &adapter->hw; 2095 struct iavf_vlan_filter *vlf; 2096 struct iavf_cloud_filter *cf; 2097 struct iavf_mac_filter *f; 2098 u32 reg_val; 2099 int i = 0, err; 2100 bool running; 2101 2102 /* When device is being removed it doesn't make sense to run the reset 2103 * task, just return in such a case. 2104 */ 2105 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 2106 return; 2107 2108 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, 2109 &adapter->crit_section)) 2110 usleep_range(500, 1000); 2111 if (CLIENT_ENABLED(adapter)) { 2112 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2113 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2114 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2115 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2116 cancel_delayed_work_sync(&adapter->client_task); 2117 iavf_notify_client_close(&adapter->vsi, true); 2118 } 2119 iavf_misc_irq_disable(adapter); 2120 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2121 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2122 /* Restart the AQ here. If we have been reset but didn't 2123 * detect it, or if the PF had to reinit, our AQ will be hosed. 2124 */ 2125 iavf_shutdown_adminq(hw); 2126 iavf_init_adminq(hw); 2127 iavf_request_reset(adapter); 2128 } 2129 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2130 2131 /* poll until we see the reset actually happen */ 2132 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { 2133 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2134 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2135 if (!reg_val) 2136 break; 2137 usleep_range(5000, 10000); 2138 } 2139 if (i == IAVF_RESET_WAIT_COUNT) { 2140 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2141 goto continue_reset; /* act like the reset happened */ 2142 } 2143 2144 /* wait until the reset is complete and the PF is responding to us */ 2145 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) { 2146 /* sleep first to make sure a minimum wait time is met */ 2147 msleep(IAVF_RESET_WAIT_MS); 2148 2149 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2150 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2151 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 2152 break; 2153 } 2154 2155 pci_set_master(adapter->pdev); 2156 2157 if (i == IAVF_RESET_WAIT_COUNT) { 2158 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 2159 reg_val); 2160 iavf_disable_vf(adapter); 2161 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2162 return; /* Do not attempt to reinit. It's dead, Jim. */ 2163 } 2164 2165 continue_reset: 2166 /* We don't use netif_running() because it may be true prior to 2167 * ndo_open() returning, so we can't assume it means all our open 2168 * tasks have finished, since we're not holding the rtnl_lock here. 2169 */ 2170 running = ((adapter->state == __IAVF_RUNNING) || 2171 (adapter->state == __IAVF_RESETTING)); 2172 2173 if (running) { 2174 netif_carrier_off(netdev); 2175 netif_tx_stop_all_queues(netdev); 2176 adapter->link_up = false; 2177 iavf_napi_disable_all(adapter); 2178 } 2179 iavf_irq_disable(adapter); 2180 2181 adapter->state = __IAVF_RESETTING; 2182 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2183 2184 /* free the Tx/Rx rings and descriptors, might be better to just 2185 * re-use them sometime in the future 2186 */ 2187 iavf_free_all_rx_resources(adapter); 2188 iavf_free_all_tx_resources(adapter); 2189 2190 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 2191 /* kill and reinit the admin queue */ 2192 iavf_shutdown_adminq(hw); 2193 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2194 err = iavf_init_adminq(hw); 2195 if (err) 2196 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 2197 err); 2198 adapter->aq_required = 0; 2199 2200 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2201 err = iavf_reinit_interrupt_scheme(adapter); 2202 if (err) 2203 goto reset_err; 2204 } 2205 2206 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 2207 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 2208 2209 spin_lock_bh(&adapter->mac_vlan_list_lock); 2210 2211 /* re-add all MAC filters */ 2212 list_for_each_entry(f, &adapter->mac_filter_list, list) { 2213 f->add = true; 2214 } 2215 /* re-add all VLAN filters */ 2216 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 2217 vlf->add = true; 2218 } 2219 2220 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2221 2222 /* check if TCs are running and re-add all cloud filters */ 2223 spin_lock_bh(&adapter->cloud_filter_list_lock); 2224 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 2225 adapter->num_tc) { 2226 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 2227 cf->add = true; 2228 } 2229 } 2230 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2231 2232 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 2233 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 2234 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 2235 iavf_misc_irq_enable(adapter); 2236 2237 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 2238 2239 /* We were running when the reset started, so we need to restore some 2240 * state here. 2241 */ 2242 if (running) { 2243 /* allocate transmit descriptors */ 2244 err = iavf_setup_all_tx_resources(adapter); 2245 if (err) 2246 goto reset_err; 2247 2248 /* allocate receive descriptors */ 2249 err = iavf_setup_all_rx_resources(adapter); 2250 if (err) 2251 goto reset_err; 2252 2253 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) { 2254 err = iavf_request_traffic_irqs(adapter, netdev->name); 2255 if (err) 2256 goto reset_err; 2257 2258 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 2259 } 2260 2261 iavf_configure(adapter); 2262 2263 iavf_up_complete(adapter); 2264 2265 iavf_irq_enable(adapter, true); 2266 } else { 2267 adapter->state = __IAVF_DOWN; 2268 wake_up(&adapter->down_waitqueue); 2269 } 2270 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2271 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2272 2273 return; 2274 reset_err: 2275 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2276 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 2277 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 2278 iavf_close(netdev); 2279 } 2280 2281 /** 2282 * iavf_adminq_task - worker thread to clean the admin queue 2283 * @work: pointer to work_struct containing our data 2284 **/ 2285 static void iavf_adminq_task(struct work_struct *work) 2286 { 2287 struct iavf_adapter *adapter = 2288 container_of(work, struct iavf_adapter, adminq_task); 2289 struct iavf_hw *hw = &adapter->hw; 2290 struct iavf_arq_event_info event; 2291 enum virtchnl_ops v_op; 2292 enum iavf_status ret, v_ret; 2293 u32 val, oldval; 2294 u16 pending; 2295 2296 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2297 goto out; 2298 2299 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 2300 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 2301 if (!event.msg_buf) 2302 goto out; 2303 2304 do { 2305 ret = iavf_clean_arq_element(hw, &event, &pending); 2306 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 2307 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 2308 2309 if (ret || !v_op) 2310 break; /* No event to process or error cleaning ARQ */ 2311 2312 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 2313 event.msg_len); 2314 if (pending != 0) 2315 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 2316 } while (pending); 2317 2318 if ((adapter->flags & 2319 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 2320 adapter->state == __IAVF_RESETTING) 2321 goto freedom; 2322 2323 /* check for error indications */ 2324 val = rd32(hw, hw->aq.arq.len); 2325 if (val == 0xdeadbeef) /* indicates device in reset */ 2326 goto freedom; 2327 oldval = val; 2328 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 2329 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 2330 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 2331 } 2332 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 2333 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 2334 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 2335 } 2336 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 2337 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 2338 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 2339 } 2340 if (oldval != val) 2341 wr32(hw, hw->aq.arq.len, val); 2342 2343 val = rd32(hw, hw->aq.asq.len); 2344 oldval = val; 2345 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 2346 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 2347 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 2348 } 2349 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 2350 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 2351 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 2352 } 2353 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 2354 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 2355 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 2356 } 2357 if (oldval != val) 2358 wr32(hw, hw->aq.asq.len, val); 2359 2360 freedom: 2361 kfree(event.msg_buf); 2362 out: 2363 /* re-enable Admin queue interrupt cause */ 2364 iavf_misc_irq_enable(adapter); 2365 } 2366 2367 /** 2368 * iavf_client_task - worker thread to perform client work 2369 * @work: pointer to work_struct containing our data 2370 * 2371 * This task handles client interactions. Because client calls can be 2372 * reentrant, we can't handle them in the watchdog. 2373 **/ 2374 static void iavf_client_task(struct work_struct *work) 2375 { 2376 struct iavf_adapter *adapter = 2377 container_of(work, struct iavf_adapter, client_task.work); 2378 2379 /* If we can't get the client bit, just give up. We'll be rescheduled 2380 * later. 2381 */ 2382 2383 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) 2384 return; 2385 2386 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 2387 iavf_client_subtask(adapter); 2388 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 2389 goto out; 2390 } 2391 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 2392 iavf_notify_client_l2_params(&adapter->vsi); 2393 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 2394 goto out; 2395 } 2396 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 2397 iavf_notify_client_close(&adapter->vsi, false); 2398 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 2399 goto out; 2400 } 2401 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 2402 iavf_notify_client_open(&adapter->vsi); 2403 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 2404 } 2405 out: 2406 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); 2407 } 2408 2409 /** 2410 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 2411 * @adapter: board private structure 2412 * 2413 * Free all transmit software resources 2414 **/ 2415 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 2416 { 2417 int i; 2418 2419 if (!adapter->tx_rings) 2420 return; 2421 2422 for (i = 0; i < adapter->num_active_queues; i++) 2423 if (adapter->tx_rings[i].desc) 2424 iavf_free_tx_resources(&adapter->tx_rings[i]); 2425 } 2426 2427 /** 2428 * iavf_setup_all_tx_resources - allocate all queues Tx resources 2429 * @adapter: board private structure 2430 * 2431 * If this function returns with an error, then it's possible one or 2432 * more of the rings is populated (while the rest are not). It is the 2433 * callers duty to clean those orphaned rings. 2434 * 2435 * Return 0 on success, negative on failure 2436 **/ 2437 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 2438 { 2439 int i, err = 0; 2440 2441 for (i = 0; i < adapter->num_active_queues; i++) { 2442 adapter->tx_rings[i].count = adapter->tx_desc_count; 2443 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 2444 if (!err) 2445 continue; 2446 dev_err(&adapter->pdev->dev, 2447 "Allocation for Tx Queue %u failed\n", i); 2448 break; 2449 } 2450 2451 return err; 2452 } 2453 2454 /** 2455 * iavf_setup_all_rx_resources - allocate all queues Rx resources 2456 * @adapter: board private structure 2457 * 2458 * If this function returns with an error, then it's possible one or 2459 * more of the rings is populated (while the rest are not). It is the 2460 * callers duty to clean those orphaned rings. 2461 * 2462 * Return 0 on success, negative on failure 2463 **/ 2464 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 2465 { 2466 int i, err = 0; 2467 2468 for (i = 0; i < adapter->num_active_queues; i++) { 2469 adapter->rx_rings[i].count = adapter->rx_desc_count; 2470 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 2471 if (!err) 2472 continue; 2473 dev_err(&adapter->pdev->dev, 2474 "Allocation for Rx Queue %u failed\n", i); 2475 break; 2476 } 2477 return err; 2478 } 2479 2480 /** 2481 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 2482 * @adapter: board private structure 2483 * 2484 * Free all receive software resources 2485 **/ 2486 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 2487 { 2488 int i; 2489 2490 if (!adapter->rx_rings) 2491 return; 2492 2493 for (i = 0; i < adapter->num_active_queues; i++) 2494 if (adapter->rx_rings[i].desc) 2495 iavf_free_rx_resources(&adapter->rx_rings[i]); 2496 } 2497 2498 /** 2499 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 2500 * @adapter: board private structure 2501 * @max_tx_rate: max Tx bw for a tc 2502 **/ 2503 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 2504 u64 max_tx_rate) 2505 { 2506 int speed = 0, ret = 0; 2507 2508 switch (adapter->link_speed) { 2509 case IAVF_LINK_SPEED_40GB: 2510 speed = 40000; 2511 break; 2512 case IAVF_LINK_SPEED_25GB: 2513 speed = 25000; 2514 break; 2515 case IAVF_LINK_SPEED_20GB: 2516 speed = 20000; 2517 break; 2518 case IAVF_LINK_SPEED_10GB: 2519 speed = 10000; 2520 break; 2521 case IAVF_LINK_SPEED_1GB: 2522 speed = 1000; 2523 break; 2524 case IAVF_LINK_SPEED_100MB: 2525 speed = 100; 2526 break; 2527 default: 2528 break; 2529 } 2530 2531 if (max_tx_rate > speed) { 2532 dev_err(&adapter->pdev->dev, 2533 "Invalid tx rate specified\n"); 2534 ret = -EINVAL; 2535 } 2536 2537 return ret; 2538 } 2539 2540 /** 2541 * iavf_validate_channel_config - validate queue mapping info 2542 * @adapter: board private structure 2543 * @mqprio_qopt: queue parameters 2544 * 2545 * This function validates if the config provided by the user to 2546 * configure queue channels is valid or not. Returns 0 on a valid 2547 * config. 2548 **/ 2549 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 2550 struct tc_mqprio_qopt_offload *mqprio_qopt) 2551 { 2552 u64 total_max_rate = 0; 2553 int i, num_qps = 0; 2554 u64 tx_rate = 0; 2555 int ret = 0; 2556 2557 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 2558 mqprio_qopt->qopt.num_tc < 1) 2559 return -EINVAL; 2560 2561 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2562 if (!mqprio_qopt->qopt.count[i] || 2563 mqprio_qopt->qopt.offset[i] != num_qps) 2564 return -EINVAL; 2565 if (mqprio_qopt->min_rate[i]) { 2566 dev_err(&adapter->pdev->dev, 2567 "Invalid min tx rate (greater than 0) specified\n"); 2568 return -EINVAL; 2569 } 2570 /*convert to Mbps */ 2571 tx_rate = div_u64(mqprio_qopt->max_rate[i], 2572 IAVF_MBPS_DIVISOR); 2573 total_max_rate += tx_rate; 2574 num_qps += mqprio_qopt->qopt.count[i]; 2575 } 2576 if (num_qps > IAVF_MAX_REQ_QUEUES) 2577 return -EINVAL; 2578 2579 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 2580 return ret; 2581 } 2582 2583 /** 2584 * iavf_del_all_cloud_filters - delete all cloud filters 2585 * on the traffic classes 2586 **/ 2587 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 2588 { 2589 struct iavf_cloud_filter *cf, *cftmp; 2590 2591 spin_lock_bh(&adapter->cloud_filter_list_lock); 2592 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2593 list) { 2594 list_del(&cf->list); 2595 kfree(cf); 2596 adapter->num_cloud_filters--; 2597 } 2598 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2599 } 2600 2601 /** 2602 * __iavf_setup_tc - configure multiple traffic classes 2603 * @netdev: network interface device structure 2604 * @type_date: tc offload data 2605 * 2606 * This function processes the config information provided by the 2607 * user to configure traffic classes/queue channels and packages the 2608 * information to request the PF to setup traffic classes. 2609 * 2610 * Returns 0 on success. 2611 **/ 2612 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 2613 { 2614 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2615 struct iavf_adapter *adapter = netdev_priv(netdev); 2616 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2617 u8 num_tc = 0, total_qps = 0; 2618 int ret = 0, netdev_tc = 0; 2619 u64 max_tx_rate; 2620 u16 mode; 2621 int i; 2622 2623 num_tc = mqprio_qopt->qopt.num_tc; 2624 mode = mqprio_qopt->mode; 2625 2626 /* delete queue_channel */ 2627 if (!mqprio_qopt->qopt.hw) { 2628 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 2629 /* reset the tc configuration */ 2630 netdev_reset_tc(netdev); 2631 adapter->num_tc = 0; 2632 netif_tx_stop_all_queues(netdev); 2633 netif_tx_disable(netdev); 2634 iavf_del_all_cloud_filters(adapter); 2635 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 2636 goto exit; 2637 } else { 2638 return -EINVAL; 2639 } 2640 } 2641 2642 /* add queue channel */ 2643 if (mode == TC_MQPRIO_MODE_CHANNEL) { 2644 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2645 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2646 return -EOPNOTSUPP; 2647 } 2648 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 2649 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2650 return -EINVAL; 2651 } 2652 2653 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 2654 if (ret) 2655 return ret; 2656 /* Return if same TC config is requested */ 2657 if (adapter->num_tc == num_tc) 2658 return 0; 2659 adapter->num_tc = num_tc; 2660 2661 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2662 if (i < num_tc) { 2663 adapter->ch_config.ch_info[i].count = 2664 mqprio_qopt->qopt.count[i]; 2665 adapter->ch_config.ch_info[i].offset = 2666 mqprio_qopt->qopt.offset[i]; 2667 total_qps += mqprio_qopt->qopt.count[i]; 2668 max_tx_rate = mqprio_qopt->max_rate[i]; 2669 /* convert to Mbps */ 2670 max_tx_rate = div_u64(max_tx_rate, 2671 IAVF_MBPS_DIVISOR); 2672 adapter->ch_config.ch_info[i].max_tx_rate = 2673 max_tx_rate; 2674 } else { 2675 adapter->ch_config.ch_info[i].count = 1; 2676 adapter->ch_config.ch_info[i].offset = 0; 2677 } 2678 } 2679 adapter->ch_config.total_qps = total_qps; 2680 netif_tx_stop_all_queues(netdev); 2681 netif_tx_disable(netdev); 2682 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 2683 netdev_reset_tc(netdev); 2684 /* Report the tc mapping up the stack */ 2685 netdev_set_num_tc(adapter->netdev, num_tc); 2686 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 2687 u16 qcount = mqprio_qopt->qopt.count[i]; 2688 u16 qoffset = mqprio_qopt->qopt.offset[i]; 2689 2690 if (i < num_tc) 2691 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2692 qoffset); 2693 } 2694 } 2695 exit: 2696 return ret; 2697 } 2698 2699 /** 2700 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 2701 * @adapter: board private structure 2702 * @cls_flower: pointer to struct flow_cls_offload 2703 * @filter: pointer to cloud filter structure 2704 */ 2705 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 2706 struct flow_cls_offload *f, 2707 struct iavf_cloud_filter *filter) 2708 { 2709 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 2710 struct flow_dissector *dissector = rule->match.dissector; 2711 u16 n_proto_mask = 0; 2712 u16 n_proto_key = 0; 2713 u8 field_flags = 0; 2714 u16 addr_type = 0; 2715 u16 n_proto = 0; 2716 int i = 0; 2717 struct virtchnl_filter *vf = &filter->f; 2718 2719 if (dissector->used_keys & 2720 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2721 BIT(FLOW_DISSECTOR_KEY_BASIC) | 2722 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2723 BIT(FLOW_DISSECTOR_KEY_VLAN) | 2724 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2725 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2726 BIT(FLOW_DISSECTOR_KEY_PORTS) | 2727 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2728 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2729 dissector->used_keys); 2730 return -EOPNOTSUPP; 2731 } 2732 2733 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2734 struct flow_match_enc_keyid match; 2735 2736 flow_rule_match_enc_keyid(rule, &match); 2737 if (match.mask->keyid != 0) 2738 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 2739 } 2740 2741 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 2742 struct flow_match_basic match; 2743 2744 flow_rule_match_basic(rule, &match); 2745 n_proto_key = ntohs(match.key->n_proto); 2746 n_proto_mask = ntohs(match.mask->n_proto); 2747 2748 if (n_proto_key == ETH_P_ALL) { 2749 n_proto_key = 0; 2750 n_proto_mask = 0; 2751 } 2752 n_proto = n_proto_key & n_proto_mask; 2753 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2754 return -EINVAL; 2755 if (n_proto == ETH_P_IPV6) { 2756 /* specify flow type as TCP IPv6 */ 2757 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 2758 } 2759 2760 if (match.key->ip_proto != IPPROTO_TCP) { 2761 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2762 return -EINVAL; 2763 } 2764 } 2765 2766 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2767 struct flow_match_eth_addrs match; 2768 2769 flow_rule_match_eth_addrs(rule, &match); 2770 2771 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2772 if (!is_zero_ether_addr(match.mask->dst)) { 2773 if (is_broadcast_ether_addr(match.mask->dst)) { 2774 field_flags |= IAVF_CLOUD_FIELD_OMAC; 2775 } else { 2776 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2777 match.mask->dst); 2778 return IAVF_ERR_CONFIG; 2779 } 2780 } 2781 2782 if (!is_zero_ether_addr(match.mask->src)) { 2783 if (is_broadcast_ether_addr(match.mask->src)) { 2784 field_flags |= IAVF_CLOUD_FIELD_IMAC; 2785 } else { 2786 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2787 match.mask->src); 2788 return IAVF_ERR_CONFIG; 2789 } 2790 } 2791 2792 if (!is_zero_ether_addr(match.key->dst)) 2793 if (is_valid_ether_addr(match.key->dst) || 2794 is_multicast_ether_addr(match.key->dst)) { 2795 /* set the mask if a valid dst_mac address */ 2796 for (i = 0; i < ETH_ALEN; i++) 2797 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 2798 ether_addr_copy(vf->data.tcp_spec.dst_mac, 2799 match.key->dst); 2800 } 2801 2802 if (!is_zero_ether_addr(match.key->src)) 2803 if (is_valid_ether_addr(match.key->src) || 2804 is_multicast_ether_addr(match.key->src)) { 2805 /* set the mask if a valid dst_mac address */ 2806 for (i = 0; i < ETH_ALEN; i++) 2807 vf->mask.tcp_spec.src_mac[i] |= 0xff; 2808 ether_addr_copy(vf->data.tcp_spec.src_mac, 2809 match.key->src); 2810 } 2811 } 2812 2813 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2814 struct flow_match_vlan match; 2815 2816 flow_rule_match_vlan(rule, &match); 2817 if (match.mask->vlan_id) { 2818 if (match.mask->vlan_id == VLAN_VID_MASK) { 2819 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 2820 } else { 2821 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2822 match.mask->vlan_id); 2823 return IAVF_ERR_CONFIG; 2824 } 2825 } 2826 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2827 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 2828 } 2829 2830 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 2831 struct flow_match_control match; 2832 2833 flow_rule_match_control(rule, &match); 2834 addr_type = match.key->addr_type; 2835 } 2836 2837 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2838 struct flow_match_ipv4_addrs match; 2839 2840 flow_rule_match_ipv4_addrs(rule, &match); 2841 if (match.mask->dst) { 2842 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 2843 field_flags |= IAVF_CLOUD_FIELD_IIP; 2844 } else { 2845 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2846 be32_to_cpu(match.mask->dst)); 2847 return IAVF_ERR_CONFIG; 2848 } 2849 } 2850 2851 if (match.mask->src) { 2852 if (match.mask->src == cpu_to_be32(0xffffffff)) { 2853 field_flags |= IAVF_CLOUD_FIELD_IIP; 2854 } else { 2855 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2856 be32_to_cpu(match.mask->dst)); 2857 return IAVF_ERR_CONFIG; 2858 } 2859 } 2860 2861 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 2862 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2863 return IAVF_ERR_CONFIG; 2864 } 2865 if (match.key->dst) { 2866 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 2867 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 2868 } 2869 if (match.key->src) { 2870 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 2871 vf->data.tcp_spec.src_ip[0] = match.key->src; 2872 } 2873 } 2874 2875 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2876 struct flow_match_ipv6_addrs match; 2877 2878 flow_rule_match_ipv6_addrs(rule, &match); 2879 2880 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2881 if (ipv6_addr_any(&match.mask->dst)) { 2882 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2883 IPV6_ADDR_ANY); 2884 return IAVF_ERR_CONFIG; 2885 } 2886 2887 /* src and dest IPv6 address should not be LOOPBACK 2888 * (0:0:0:0:0:0:0:1) which can be represented as ::1 2889 */ 2890 if (ipv6_addr_loopback(&match.key->dst) || 2891 ipv6_addr_loopback(&match.key->src)) { 2892 dev_err(&adapter->pdev->dev, 2893 "ipv6 addr should not be loopback\n"); 2894 return IAVF_ERR_CONFIG; 2895 } 2896 if (!ipv6_addr_any(&match.mask->dst) || 2897 !ipv6_addr_any(&match.mask->src)) 2898 field_flags |= IAVF_CLOUD_FIELD_IIP; 2899 2900 for (i = 0; i < 4; i++) 2901 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 2902 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 2903 sizeof(vf->data.tcp_spec.dst_ip)); 2904 for (i = 0; i < 4; i++) 2905 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 2906 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 2907 sizeof(vf->data.tcp_spec.src_ip)); 2908 } 2909 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2910 struct flow_match_ports match; 2911 2912 flow_rule_match_ports(rule, &match); 2913 if (match.mask->src) { 2914 if (match.mask->src == cpu_to_be16(0xffff)) { 2915 field_flags |= IAVF_CLOUD_FIELD_IIP; 2916 } else { 2917 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 2918 be16_to_cpu(match.mask->src)); 2919 return IAVF_ERR_CONFIG; 2920 } 2921 } 2922 2923 if (match.mask->dst) { 2924 if (match.mask->dst == cpu_to_be16(0xffff)) { 2925 field_flags |= IAVF_CLOUD_FIELD_IIP; 2926 } else { 2927 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 2928 be16_to_cpu(match.mask->dst)); 2929 return IAVF_ERR_CONFIG; 2930 } 2931 } 2932 if (match.key->dst) { 2933 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 2934 vf->data.tcp_spec.dst_port = match.key->dst; 2935 } 2936 2937 if (match.key->src) { 2938 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 2939 vf->data.tcp_spec.src_port = match.key->src; 2940 } 2941 } 2942 vf->field_flags = field_flags; 2943 2944 return 0; 2945 } 2946 2947 /** 2948 * iavf_handle_tclass - Forward to a traffic class on the device 2949 * @adapter: board private structure 2950 * @tc: traffic class index on the device 2951 * @filter: pointer to cloud filter structure 2952 */ 2953 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 2954 struct iavf_cloud_filter *filter) 2955 { 2956 if (tc == 0) 2957 return 0; 2958 if (tc < adapter->num_tc) { 2959 if (!filter->f.data.tcp_spec.dst_port) { 2960 dev_err(&adapter->pdev->dev, 2961 "Specify destination port to redirect to traffic class other than TC0\n"); 2962 return -EINVAL; 2963 } 2964 } 2965 /* redirect to a traffic class on the same device */ 2966 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 2967 filter->f.action_meta = tc; 2968 return 0; 2969 } 2970 2971 /** 2972 * iavf_configure_clsflower - Add tc flower filters 2973 * @adapter: board private structure 2974 * @cls_flower: Pointer to struct flow_cls_offload 2975 */ 2976 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 2977 struct flow_cls_offload *cls_flower) 2978 { 2979 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 2980 struct iavf_cloud_filter *filter = NULL; 2981 int err = -EINVAL, count = 50; 2982 2983 if (tc < 0) { 2984 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 2985 return -EINVAL; 2986 } 2987 2988 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 2989 if (!filter) 2990 return -ENOMEM; 2991 2992 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 2993 &adapter->crit_section)) { 2994 if (--count == 0) 2995 goto err; 2996 udelay(1); 2997 } 2998 2999 filter->cookie = cls_flower->cookie; 3000 3001 /* set the mask to all zeroes to begin with */ 3002 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 3003 /* start out with flow type and eth type IPv4 to begin with */ 3004 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 3005 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 3006 if (err < 0) 3007 goto err; 3008 3009 err = iavf_handle_tclass(adapter, tc, filter); 3010 if (err < 0) 3011 goto err; 3012 3013 /* add filter to the list */ 3014 spin_lock_bh(&adapter->cloud_filter_list_lock); 3015 list_add_tail(&filter->list, &adapter->cloud_filter_list); 3016 adapter->num_cloud_filters++; 3017 filter->add = true; 3018 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3019 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3020 err: 3021 if (err) 3022 kfree(filter); 3023 3024 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3025 return err; 3026 } 3027 3028 /* iavf_find_cf - Find the cloud filter in the list 3029 * @adapter: Board private structure 3030 * @cookie: filter specific cookie 3031 * 3032 * Returns ptr to the filter object or NULL. Must be called while holding the 3033 * cloud_filter_list_lock. 3034 */ 3035 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3036 unsigned long *cookie) 3037 { 3038 struct iavf_cloud_filter *filter = NULL; 3039 3040 if (!cookie) 3041 return NULL; 3042 3043 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3044 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3045 return filter; 3046 } 3047 return NULL; 3048 } 3049 3050 /** 3051 * iavf_delete_clsflower - Remove tc flower filters 3052 * @adapter: board private structure 3053 * @cls_flower: Pointer to struct flow_cls_offload 3054 */ 3055 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 3056 struct flow_cls_offload *cls_flower) 3057 { 3058 struct iavf_cloud_filter *filter = NULL; 3059 int err = 0; 3060 3061 spin_lock_bh(&adapter->cloud_filter_list_lock); 3062 filter = iavf_find_cf(adapter, &cls_flower->cookie); 3063 if (filter) { 3064 filter->del = true; 3065 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 3066 } else { 3067 err = -EINVAL; 3068 } 3069 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3070 3071 return err; 3072 } 3073 3074 /** 3075 * iavf_setup_tc_cls_flower - flower classifier offloads 3076 * @netdev: net device to configure 3077 * @type_data: offload data 3078 */ 3079 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 3080 struct flow_cls_offload *cls_flower) 3081 { 3082 if (cls_flower->common.chain_index) 3083 return -EOPNOTSUPP; 3084 3085 switch (cls_flower->command) { 3086 case FLOW_CLS_REPLACE: 3087 return iavf_configure_clsflower(adapter, cls_flower); 3088 case FLOW_CLS_DESTROY: 3089 return iavf_delete_clsflower(adapter, cls_flower); 3090 case FLOW_CLS_STATS: 3091 return -EOPNOTSUPP; 3092 default: 3093 return -EOPNOTSUPP; 3094 } 3095 } 3096 3097 /** 3098 * iavf_setup_tc_block_cb - block callback for tc 3099 * @type: type of offload 3100 * @type_data: offload data 3101 * @cb_priv: 3102 * 3103 * This function is the block callback for traffic classes 3104 **/ 3105 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3106 void *cb_priv) 3107 { 3108 switch (type) { 3109 case TC_SETUP_CLSFLOWER: 3110 return iavf_setup_tc_cls_flower(cb_priv, type_data); 3111 default: 3112 return -EOPNOTSUPP; 3113 } 3114 } 3115 3116 static LIST_HEAD(iavf_block_cb_list); 3117 3118 /** 3119 * iavf_setup_tc - configure multiple traffic classes 3120 * @netdev: network interface device structure 3121 * @type: type of offload 3122 * @type_date: tc offload data 3123 * 3124 * This function is the callback to ndo_setup_tc in the 3125 * netdev_ops. 3126 * 3127 * Returns 0 on success 3128 **/ 3129 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 3130 void *type_data) 3131 { 3132 struct iavf_adapter *adapter = netdev_priv(netdev); 3133 3134 switch (type) { 3135 case TC_SETUP_QDISC_MQPRIO: 3136 return __iavf_setup_tc(netdev, type_data); 3137 case TC_SETUP_BLOCK: 3138 return flow_block_cb_setup_simple(type_data, 3139 &iavf_block_cb_list, 3140 iavf_setup_tc_block_cb, 3141 adapter, adapter, true); 3142 default: 3143 return -EOPNOTSUPP; 3144 } 3145 } 3146 3147 /** 3148 * iavf_open - Called when a network interface is made active 3149 * @netdev: network interface device structure 3150 * 3151 * Returns 0 on success, negative value on failure 3152 * 3153 * The open entry point is called when a network interface is made 3154 * active by the system (IFF_UP). At this point all resources needed 3155 * for transmit and receive operations are allocated, the interrupt 3156 * handler is registered with the OS, the watchdog is started, 3157 * and the stack is notified that the interface is ready. 3158 **/ 3159 static int iavf_open(struct net_device *netdev) 3160 { 3161 struct iavf_adapter *adapter = netdev_priv(netdev); 3162 int err; 3163 3164 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 3165 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 3166 return -EIO; 3167 } 3168 3169 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3170 &adapter->crit_section)) 3171 usleep_range(500, 1000); 3172 3173 if (adapter->state != __IAVF_DOWN) { 3174 err = -EBUSY; 3175 goto err_unlock; 3176 } 3177 3178 /* allocate transmit descriptors */ 3179 err = iavf_setup_all_tx_resources(adapter); 3180 if (err) 3181 goto err_setup_tx; 3182 3183 /* allocate receive descriptors */ 3184 err = iavf_setup_all_rx_resources(adapter); 3185 if (err) 3186 goto err_setup_rx; 3187 3188 /* clear any pending interrupts, may auto mask */ 3189 err = iavf_request_traffic_irqs(adapter, netdev->name); 3190 if (err) 3191 goto err_req_irq; 3192 3193 spin_lock_bh(&adapter->mac_vlan_list_lock); 3194 3195 iavf_add_filter(adapter, adapter->hw.mac.addr); 3196 3197 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3198 3199 iavf_configure(adapter); 3200 3201 iavf_up_complete(adapter); 3202 3203 iavf_irq_enable(adapter, true); 3204 3205 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3206 3207 return 0; 3208 3209 err_req_irq: 3210 iavf_down(adapter); 3211 iavf_free_traffic_irqs(adapter); 3212 err_setup_rx: 3213 iavf_free_all_rx_resources(adapter); 3214 err_setup_tx: 3215 iavf_free_all_tx_resources(adapter); 3216 err_unlock: 3217 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3218 3219 return err; 3220 } 3221 3222 /** 3223 * iavf_close - Disables a network interface 3224 * @netdev: network interface device structure 3225 * 3226 * Returns 0, this is not allowed to fail 3227 * 3228 * The close entry point is called when an interface is de-activated 3229 * by the OS. The hardware is still under the drivers control, but 3230 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 3231 * are freed, along with all transmit and receive resources. 3232 **/ 3233 static int iavf_close(struct net_device *netdev) 3234 { 3235 struct iavf_adapter *adapter = netdev_priv(netdev); 3236 int status; 3237 3238 if (adapter->state <= __IAVF_DOWN_PENDING) 3239 return 0; 3240 3241 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3242 &adapter->crit_section)) 3243 usleep_range(500, 1000); 3244 3245 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3246 if (CLIENT_ENABLED(adapter)) 3247 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3248 3249 iavf_down(adapter); 3250 adapter->state = __IAVF_DOWN_PENDING; 3251 iavf_free_traffic_irqs(adapter); 3252 3253 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3254 3255 /* We explicitly don't free resources here because the hardware is 3256 * still active and can DMA into memory. Resources are cleared in 3257 * iavf_virtchnl_completion() after we get confirmation from the PF 3258 * driver that the rings have been stopped. 3259 * 3260 * Also, we wait for state to transition to __IAVF_DOWN before 3261 * returning. State change occurs in iavf_virtchnl_completion() after 3262 * VF resources are released (which occurs after PF driver processes and 3263 * responds to admin queue commands). 3264 */ 3265 3266 status = wait_event_timeout(adapter->down_waitqueue, 3267 adapter->state == __IAVF_DOWN, 3268 msecs_to_jiffies(500)); 3269 if (!status) 3270 netdev_warn(netdev, "Device resources not yet released\n"); 3271 return 0; 3272 } 3273 3274 /** 3275 * iavf_change_mtu - Change the Maximum Transfer Unit 3276 * @netdev: network interface device structure 3277 * @new_mtu: new value for maximum frame size 3278 * 3279 * Returns 0 on success, negative on failure 3280 **/ 3281 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 3282 { 3283 struct iavf_adapter *adapter = netdev_priv(netdev); 3284 3285 netdev->mtu = new_mtu; 3286 if (CLIENT_ENABLED(adapter)) { 3287 iavf_notify_client_l2_params(&adapter->vsi); 3288 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3289 } 3290 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 3291 queue_work(iavf_wq, &adapter->reset_task); 3292 3293 return 0; 3294 } 3295 3296 /** 3297 * iavf_set_features - set the netdev feature flags 3298 * @netdev: ptr to the netdev being adjusted 3299 * @features: the feature set that the stack is suggesting 3300 * Note: expects to be called while under rtnl_lock() 3301 **/ 3302 static int iavf_set_features(struct net_device *netdev, 3303 netdev_features_t features) 3304 { 3305 struct iavf_adapter *adapter = netdev_priv(netdev); 3306 3307 /* Don't allow changing VLAN_RX flag when adapter is not capable 3308 * of VLAN offload 3309 */ 3310 if (!VLAN_ALLOWED(adapter)) { 3311 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) 3312 return -EINVAL; 3313 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { 3314 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3315 adapter->aq_required |= 3316 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 3317 else 3318 adapter->aq_required |= 3319 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 3320 } 3321 3322 return 0; 3323 } 3324 3325 /** 3326 * iavf_features_check - Validate encapsulated packet conforms to limits 3327 * @skb: skb buff 3328 * @dev: This physical port's netdev 3329 * @features: Offload features that the stack believes apply 3330 **/ 3331 static netdev_features_t iavf_features_check(struct sk_buff *skb, 3332 struct net_device *dev, 3333 netdev_features_t features) 3334 { 3335 size_t len; 3336 3337 /* No point in doing any of this if neither checksum nor GSO are 3338 * being requested for this frame. We can rule out both by just 3339 * checking for CHECKSUM_PARTIAL 3340 */ 3341 if (skb->ip_summed != CHECKSUM_PARTIAL) 3342 return features; 3343 3344 /* We cannot support GSO if the MSS is going to be less than 3345 * 64 bytes. If it is then we need to drop support for GSO. 3346 */ 3347 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3348 features &= ~NETIF_F_GSO_MASK; 3349 3350 /* MACLEN can support at most 63 words */ 3351 len = skb_network_header(skb) - skb->data; 3352 if (len & ~(63 * 2)) 3353 goto out_err; 3354 3355 /* IPLEN and EIPLEN can support at most 127 dwords */ 3356 len = skb_transport_header(skb) - skb_network_header(skb); 3357 if (len & ~(127 * 4)) 3358 goto out_err; 3359 3360 if (skb->encapsulation) { 3361 /* L4TUNLEN can support 127 words */ 3362 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3363 if (len & ~(127 * 2)) 3364 goto out_err; 3365 3366 /* IPLEN can support at most 127 dwords */ 3367 len = skb_inner_transport_header(skb) - 3368 skb_inner_network_header(skb); 3369 if (len & ~(127 * 4)) 3370 goto out_err; 3371 } 3372 3373 /* No need to validate L4LEN as TCP is the only protocol with a 3374 * a flexible value and we support all possible values supported 3375 * by TCP, which is at most 15 dwords 3376 */ 3377 3378 return features; 3379 out_err: 3380 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3381 } 3382 3383 /** 3384 * iavf_fix_features - fix up the netdev feature bits 3385 * @netdev: our net device 3386 * @features: desired feature bits 3387 * 3388 * Returns fixed-up features bits 3389 **/ 3390 static netdev_features_t iavf_fix_features(struct net_device *netdev, 3391 netdev_features_t features) 3392 { 3393 struct iavf_adapter *adapter = netdev_priv(netdev); 3394 3395 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) 3396 features &= ~(NETIF_F_HW_VLAN_CTAG_TX | 3397 NETIF_F_HW_VLAN_CTAG_RX | 3398 NETIF_F_HW_VLAN_CTAG_FILTER); 3399 3400 return features; 3401 } 3402 3403 static const struct net_device_ops iavf_netdev_ops = { 3404 .ndo_open = iavf_open, 3405 .ndo_stop = iavf_close, 3406 .ndo_start_xmit = iavf_xmit_frame, 3407 .ndo_set_rx_mode = iavf_set_rx_mode, 3408 .ndo_validate_addr = eth_validate_addr, 3409 .ndo_set_mac_address = iavf_set_mac, 3410 .ndo_change_mtu = iavf_change_mtu, 3411 .ndo_tx_timeout = iavf_tx_timeout, 3412 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 3413 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 3414 .ndo_features_check = iavf_features_check, 3415 .ndo_fix_features = iavf_fix_features, 3416 .ndo_set_features = iavf_set_features, 3417 .ndo_setup_tc = iavf_setup_tc, 3418 }; 3419 3420 /** 3421 * iavf_check_reset_complete - check that VF reset is complete 3422 * @hw: pointer to hw struct 3423 * 3424 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 3425 **/ 3426 static int iavf_check_reset_complete(struct iavf_hw *hw) 3427 { 3428 u32 rstat; 3429 int i; 3430 3431 for (i = 0; i < 100; i++) { 3432 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 3433 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3434 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 3435 (rstat == VIRTCHNL_VFR_COMPLETED)) 3436 return 0; 3437 usleep_range(10, 20); 3438 } 3439 return -EBUSY; 3440 } 3441 3442 /** 3443 * iavf_process_config - Process the config information we got from the PF 3444 * @adapter: board private structure 3445 * 3446 * Verify that we have a valid config struct, and set up our netdev features 3447 * and our VSI struct. 3448 **/ 3449 int iavf_process_config(struct iavf_adapter *adapter) 3450 { 3451 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3452 int i, num_req_queues = adapter->num_req_queues; 3453 struct net_device *netdev = adapter->netdev; 3454 struct iavf_vsi *vsi = &adapter->vsi; 3455 netdev_features_t hw_enc_features; 3456 netdev_features_t hw_features; 3457 3458 /* got VF config message back from PF, now we can parse it */ 3459 for (i = 0; i < vfres->num_vsis; i++) { 3460 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 3461 adapter->vsi_res = &vfres->vsi_res[i]; 3462 } 3463 if (!adapter->vsi_res) { 3464 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 3465 return -ENODEV; 3466 } 3467 3468 if (num_req_queues && 3469 num_req_queues != adapter->vsi_res->num_queue_pairs) { 3470 /* Problem. The PF gave us fewer queues than what we had 3471 * negotiated in our request. Need a reset to see if we can't 3472 * get back to a working state. 3473 */ 3474 dev_err(&adapter->pdev->dev, 3475 "Requested %d queues, but PF only gave us %d.\n", 3476 num_req_queues, 3477 adapter->vsi_res->num_queue_pairs); 3478 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 3479 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 3480 iavf_schedule_reset(adapter); 3481 return -ENODEV; 3482 } 3483 adapter->num_req_queues = 0; 3484 3485 hw_enc_features = NETIF_F_SG | 3486 NETIF_F_IP_CSUM | 3487 NETIF_F_IPV6_CSUM | 3488 NETIF_F_HIGHDMA | 3489 NETIF_F_SOFT_FEATURES | 3490 NETIF_F_TSO | 3491 NETIF_F_TSO_ECN | 3492 NETIF_F_TSO6 | 3493 NETIF_F_SCTP_CRC | 3494 NETIF_F_RXHASH | 3495 NETIF_F_RXCSUM | 3496 0; 3497 3498 /* advertise to stack only if offloads for encapsulated packets is 3499 * supported 3500 */ 3501 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 3502 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 3503 NETIF_F_GSO_GRE | 3504 NETIF_F_GSO_GRE_CSUM | 3505 NETIF_F_GSO_IPXIP4 | 3506 NETIF_F_GSO_IPXIP6 | 3507 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3508 NETIF_F_GSO_PARTIAL | 3509 0; 3510 3511 if (!(vfres->vf_cap_flags & 3512 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 3513 netdev->gso_partial_features |= 3514 NETIF_F_GSO_UDP_TUNNEL_CSUM; 3515 3516 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 3517 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 3518 netdev->hw_enc_features |= hw_enc_features; 3519 } 3520 /* record features VLANs can make use of */ 3521 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 3522 3523 /* Write features and hw_features separately to avoid polluting 3524 * with, or dropping, features that are set when we registered. 3525 */ 3526 hw_features = hw_enc_features; 3527 3528 /* Enable VLAN features if supported */ 3529 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3530 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3531 NETIF_F_HW_VLAN_CTAG_RX); 3532 /* Enable cloud filter if ADQ is supported */ 3533 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 3534 hw_features |= NETIF_F_HW_TC; 3535 3536 netdev->hw_features |= hw_features; 3537 3538 netdev->features |= hw_features; 3539 3540 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3541 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3542 3543 netdev->priv_flags |= IFF_UNICAST_FLT; 3544 3545 /* Do not turn on offloads when they are requested to be turned off. 3546 * TSO needs minimum 576 bytes to work correctly. 3547 */ 3548 if (netdev->wanted_features) { 3549 if (!(netdev->wanted_features & NETIF_F_TSO) || 3550 netdev->mtu < 576) 3551 netdev->features &= ~NETIF_F_TSO; 3552 if (!(netdev->wanted_features & NETIF_F_TSO6) || 3553 netdev->mtu < 576) 3554 netdev->features &= ~NETIF_F_TSO6; 3555 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 3556 netdev->features &= ~NETIF_F_TSO_ECN; 3557 if (!(netdev->wanted_features & NETIF_F_GRO)) 3558 netdev->features &= ~NETIF_F_GRO; 3559 if (!(netdev->wanted_features & NETIF_F_GSO)) 3560 netdev->features &= ~NETIF_F_GSO; 3561 } 3562 3563 adapter->vsi.id = adapter->vsi_res->vsi_id; 3564 3565 adapter->vsi.back = adapter; 3566 adapter->vsi.base_vector = 1; 3567 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; 3568 vsi->netdev = adapter->netdev; 3569 vsi->qs_handle = adapter->vsi_res->qset_handle; 3570 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 3571 adapter->rss_key_size = vfres->rss_key_size; 3572 adapter->rss_lut_size = vfres->rss_lut_size; 3573 } else { 3574 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 3575 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 3576 } 3577 3578 return 0; 3579 } 3580 3581 /** 3582 * iavf_init_task - worker thread to perform delayed initialization 3583 * @work: pointer to work_struct containing our data 3584 * 3585 * This task completes the work that was begun in probe. Due to the nature 3586 * of VF-PF communications, we may need to wait tens of milliseconds to get 3587 * responses back from the PF. Rather than busy-wait in probe and bog down the 3588 * whole system, we'll do it in a task so we can sleep. 3589 * This task only runs during driver init. Once we've established 3590 * communications with the PF driver and set up our netdev, the watchdog 3591 * takes over. 3592 **/ 3593 static void iavf_init_task(struct work_struct *work) 3594 { 3595 struct iavf_adapter *adapter = container_of(work, 3596 struct iavf_adapter, 3597 init_task.work); 3598 struct iavf_hw *hw = &adapter->hw; 3599 3600 switch (adapter->state) { 3601 case __IAVF_STARTUP: 3602 if (iavf_startup(adapter) < 0) 3603 goto init_failed; 3604 break; 3605 case __IAVF_INIT_VERSION_CHECK: 3606 if (iavf_init_version_check(adapter) < 0) 3607 goto init_failed; 3608 break; 3609 case __IAVF_INIT_GET_RESOURCES: 3610 if (iavf_init_get_resources(adapter) < 0) 3611 goto init_failed; 3612 return; 3613 default: 3614 goto init_failed; 3615 } 3616 3617 queue_delayed_work(iavf_wq, &adapter->init_task, 3618 msecs_to_jiffies(30)); 3619 return; 3620 init_failed: 3621 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 3622 dev_err(&adapter->pdev->dev, 3623 "Failed to communicate with PF; waiting before retry\n"); 3624 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3625 iavf_shutdown_adminq(hw); 3626 adapter->state = __IAVF_STARTUP; 3627 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); 3628 return; 3629 } 3630 queue_delayed_work(iavf_wq, &adapter->init_task, HZ); 3631 } 3632 3633 /** 3634 * iavf_shutdown - Shutdown the device in preparation for a reboot 3635 * @pdev: pci device structure 3636 **/ 3637 static void iavf_shutdown(struct pci_dev *pdev) 3638 { 3639 struct net_device *netdev = pci_get_drvdata(pdev); 3640 struct iavf_adapter *adapter = netdev_priv(netdev); 3641 3642 netif_device_detach(netdev); 3643 3644 if (netif_running(netdev)) 3645 iavf_close(netdev); 3646 3647 /* Prevent the watchdog from running. */ 3648 adapter->state = __IAVF_REMOVE; 3649 adapter->aq_required = 0; 3650 3651 #ifdef CONFIG_PM 3652 pci_save_state(pdev); 3653 3654 #endif 3655 pci_disable_device(pdev); 3656 } 3657 3658 /** 3659 * iavf_probe - Device Initialization Routine 3660 * @pdev: PCI device information struct 3661 * @ent: entry in iavf_pci_tbl 3662 * 3663 * Returns 0 on success, negative on failure 3664 * 3665 * iavf_probe initializes an adapter identified by a pci_dev structure. 3666 * The OS initialization, configuring of the adapter private structure, 3667 * and a hardware reset occur. 3668 **/ 3669 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3670 { 3671 struct net_device *netdev; 3672 struct iavf_adapter *adapter = NULL; 3673 struct iavf_hw *hw = NULL; 3674 int err; 3675 3676 err = pci_enable_device(pdev); 3677 if (err) 3678 return err; 3679 3680 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 3681 if (err) { 3682 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3683 if (err) { 3684 dev_err(&pdev->dev, 3685 "DMA configuration failed: 0x%x\n", err); 3686 goto err_dma; 3687 } 3688 } 3689 3690 err = pci_request_regions(pdev, iavf_driver_name); 3691 if (err) { 3692 dev_err(&pdev->dev, 3693 "pci_request_regions failed 0x%x\n", err); 3694 goto err_pci_reg; 3695 } 3696 3697 pci_enable_pcie_error_reporting(pdev); 3698 3699 pci_set_master(pdev); 3700 3701 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 3702 IAVF_MAX_REQ_QUEUES); 3703 if (!netdev) { 3704 err = -ENOMEM; 3705 goto err_alloc_etherdev; 3706 } 3707 3708 SET_NETDEV_DEV(netdev, &pdev->dev); 3709 3710 pci_set_drvdata(pdev, netdev); 3711 adapter = netdev_priv(netdev); 3712 3713 adapter->netdev = netdev; 3714 adapter->pdev = pdev; 3715 3716 hw = &adapter->hw; 3717 hw->back = adapter; 3718 3719 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3720 adapter->state = __IAVF_STARTUP; 3721 3722 /* Call save state here because it relies on the adapter struct. */ 3723 pci_save_state(pdev); 3724 3725 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3726 pci_resource_len(pdev, 0)); 3727 if (!hw->hw_addr) { 3728 err = -EIO; 3729 goto err_ioremap; 3730 } 3731 hw->vendor_id = pdev->vendor; 3732 hw->device_id = pdev->device; 3733 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 3734 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3735 hw->subsystem_device_id = pdev->subsystem_device; 3736 hw->bus.device = PCI_SLOT(pdev->devfn); 3737 hw->bus.func = PCI_FUNC(pdev->devfn); 3738 hw->bus.bus_id = pdev->bus->number; 3739 3740 /* set up the locks for the AQ, do this only once in probe 3741 * and destroy them only once in remove 3742 */ 3743 mutex_init(&hw->aq.asq_mutex); 3744 mutex_init(&hw->aq.arq_mutex); 3745 3746 spin_lock_init(&adapter->mac_vlan_list_lock); 3747 spin_lock_init(&adapter->cloud_filter_list_lock); 3748 3749 INIT_LIST_HEAD(&adapter->mac_filter_list); 3750 INIT_LIST_HEAD(&adapter->vlan_filter_list); 3751 INIT_LIST_HEAD(&adapter->cloud_filter_list); 3752 3753 INIT_WORK(&adapter->reset_task, iavf_reset_task); 3754 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 3755 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 3756 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 3757 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task); 3758 queue_delayed_work(iavf_wq, &adapter->init_task, 3759 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 3760 3761 /* Setup the wait queue for indicating transition to down status */ 3762 init_waitqueue_head(&adapter->down_waitqueue); 3763 3764 return 0; 3765 3766 err_ioremap: 3767 free_netdev(netdev); 3768 err_alloc_etherdev: 3769 pci_release_regions(pdev); 3770 err_pci_reg: 3771 err_dma: 3772 pci_disable_device(pdev); 3773 return err; 3774 } 3775 3776 #ifdef CONFIG_PM 3777 /** 3778 * iavf_suspend - Power management suspend routine 3779 * @pdev: PCI device information struct 3780 * @state: unused 3781 * 3782 * Called when the system (VM) is entering sleep/suspend. 3783 **/ 3784 static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) 3785 { 3786 struct net_device *netdev = pci_get_drvdata(pdev); 3787 struct iavf_adapter *adapter = netdev_priv(netdev); 3788 int retval = 0; 3789 3790 netif_device_detach(netdev); 3791 3792 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, 3793 &adapter->crit_section)) 3794 usleep_range(500, 1000); 3795 3796 if (netif_running(netdev)) { 3797 rtnl_lock(); 3798 iavf_down(adapter); 3799 rtnl_unlock(); 3800 } 3801 iavf_free_misc_irq(adapter); 3802 iavf_reset_interrupt_capability(adapter); 3803 3804 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); 3805 3806 retval = pci_save_state(pdev); 3807 if (retval) 3808 return retval; 3809 3810 pci_disable_device(pdev); 3811 3812 return 0; 3813 } 3814 3815 /** 3816 * iavf_resume - Power management resume routine 3817 * @pdev: PCI device information struct 3818 * 3819 * Called when the system (VM) is resumed from sleep/suspend. 3820 **/ 3821 static int iavf_resume(struct pci_dev *pdev) 3822 { 3823 struct iavf_adapter *adapter = pci_get_drvdata(pdev); 3824 struct net_device *netdev = adapter->netdev; 3825 u32 err; 3826 3827 pci_set_power_state(pdev, PCI_D0); 3828 pci_restore_state(pdev); 3829 /* pci_restore_state clears dev->state_saved so call 3830 * pci_save_state to restore it. 3831 */ 3832 pci_save_state(pdev); 3833 3834 err = pci_enable_device_mem(pdev); 3835 if (err) { 3836 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); 3837 return err; 3838 } 3839 pci_set_master(pdev); 3840 3841 rtnl_lock(); 3842 err = iavf_set_interrupt_capability(adapter); 3843 if (err) { 3844 rtnl_unlock(); 3845 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 3846 return err; 3847 } 3848 err = iavf_request_misc_irq(adapter); 3849 rtnl_unlock(); 3850 if (err) { 3851 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 3852 return err; 3853 } 3854 3855 queue_work(iavf_wq, &adapter->reset_task); 3856 3857 netif_device_attach(netdev); 3858 3859 return err; 3860 } 3861 3862 #endif /* CONFIG_PM */ 3863 /** 3864 * iavf_remove - Device Removal Routine 3865 * @pdev: PCI device information struct 3866 * 3867 * iavf_remove is called by the PCI subsystem to alert the driver 3868 * that it should release a PCI device. The could be caused by a 3869 * Hot-Plug event, or because the driver is going to be removed from 3870 * memory. 3871 **/ 3872 static void iavf_remove(struct pci_dev *pdev) 3873 { 3874 struct net_device *netdev = pci_get_drvdata(pdev); 3875 struct iavf_adapter *adapter = netdev_priv(netdev); 3876 struct iavf_vlan_filter *vlf, *vlftmp; 3877 struct iavf_mac_filter *f, *ftmp; 3878 struct iavf_cloud_filter *cf, *cftmp; 3879 struct iavf_hw *hw = &adapter->hw; 3880 int err; 3881 /* Indicate we are in remove and not to run reset_task */ 3882 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); 3883 cancel_delayed_work_sync(&adapter->init_task); 3884 cancel_work_sync(&adapter->reset_task); 3885 cancel_delayed_work_sync(&adapter->client_task); 3886 if (adapter->netdev_registered) { 3887 unregister_netdev(netdev); 3888 adapter->netdev_registered = false; 3889 } 3890 if (CLIENT_ALLOWED(adapter)) { 3891 err = iavf_lan_del_device(adapter); 3892 if (err) 3893 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 3894 err); 3895 } 3896 3897 /* Shut down all the garbage mashers on the detention level */ 3898 adapter->state = __IAVF_REMOVE; 3899 adapter->aq_required = 0; 3900 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3901 iavf_request_reset(adapter); 3902 msleep(50); 3903 /* If the FW isn't responding, kick it once, but only once. */ 3904 if (!iavf_asq_done(hw)) { 3905 iavf_request_reset(adapter); 3906 msleep(50); 3907 } 3908 iavf_free_all_tx_resources(adapter); 3909 iavf_free_all_rx_resources(adapter); 3910 iavf_misc_irq_disable(adapter); 3911 iavf_free_misc_irq(adapter); 3912 iavf_reset_interrupt_capability(adapter); 3913 iavf_free_q_vectors(adapter); 3914 3915 cancel_delayed_work_sync(&adapter->watchdog_task); 3916 3917 cancel_work_sync(&adapter->adminq_task); 3918 3919 iavf_free_rss(adapter); 3920 3921 if (hw->aq.asq.count) 3922 iavf_shutdown_adminq(hw); 3923 3924 /* destroy the locks only once, here */ 3925 mutex_destroy(&hw->aq.arq_mutex); 3926 mutex_destroy(&hw->aq.asq_mutex); 3927 3928 iounmap(hw->hw_addr); 3929 pci_release_regions(pdev); 3930 iavf_free_all_tx_resources(adapter); 3931 iavf_free_all_rx_resources(adapter); 3932 iavf_free_queues(adapter); 3933 kfree(adapter->vf_res); 3934 spin_lock_bh(&adapter->mac_vlan_list_lock); 3935 /* If we got removed before an up/down sequence, we've got a filter 3936 * hanging out there that we need to get rid of. 3937 */ 3938 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3939 list_del(&f->list); 3940 kfree(f); 3941 } 3942 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 3943 list) { 3944 list_del(&vlf->list); 3945 kfree(vlf); 3946 } 3947 3948 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3949 3950 spin_lock_bh(&adapter->cloud_filter_list_lock); 3951 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 3952 list_del(&cf->list); 3953 kfree(cf); 3954 } 3955 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3956 3957 free_netdev(netdev); 3958 3959 pci_disable_pcie_error_reporting(pdev); 3960 3961 pci_disable_device(pdev); 3962 } 3963 3964 static struct pci_driver iavf_driver = { 3965 .name = iavf_driver_name, 3966 .id_table = iavf_pci_tbl, 3967 .probe = iavf_probe, 3968 .remove = iavf_remove, 3969 #ifdef CONFIG_PM 3970 .suspend = iavf_suspend, 3971 .resume = iavf_resume, 3972 #endif 3973 .shutdown = iavf_shutdown, 3974 }; 3975 3976 /** 3977 * iavf_init_module - Driver Registration Routine 3978 * 3979 * iavf_init_module is the first routine called when the driver is 3980 * loaded. All it does is register with the PCI subsystem. 3981 **/ 3982 static int __init iavf_init_module(void) 3983 { 3984 int ret; 3985 3986 pr_info("iavf: %s - version %s\n", iavf_driver_string, 3987 iavf_driver_version); 3988 3989 pr_info("%s\n", iavf_copyright); 3990 3991 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 3992 iavf_driver_name); 3993 if (!iavf_wq) { 3994 pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 3995 return -ENOMEM; 3996 } 3997 ret = pci_register_driver(&iavf_driver); 3998 return ret; 3999 } 4000 4001 module_init(iavf_init_module); 4002 4003 /** 4004 * iavf_exit_module - Driver Exit Cleanup Routine 4005 * 4006 * iavf_exit_module is called just before the driver is removed 4007 * from memory. 4008 **/ 4009 static void __exit iavf_exit_module(void) 4010 { 4011 pci_unregister_driver(&iavf_driver); 4012 destroy_workqueue(iavf_wq); 4013 } 4014 4015 module_exit(iavf_exit_module); 4016 4017 /* iavf_main.c */ 4018