1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf.h" 5 #include "iavf_prototype.h" 6 #include "iavf_client.h" 7 /* All iavf tracepoints are defined by the include below, which must 8 * be included exactly once across the whole kernel with 9 * CREATE_TRACE_POINTS defined 10 */ 11 #define CREATE_TRACE_POINTS 12 #include "iavf_trace.h" 13 14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter); 15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter); 16 static int iavf_close(struct net_device *netdev); 17 static void iavf_init_get_resources(struct iavf_adapter *adapter); 18 static int iavf_check_reset_complete(struct iavf_hw *hw); 19 20 char iavf_driver_name[] = "iavf"; 21 static const char iavf_driver_string[] = 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; 23 24 static const char iavf_copyright[] = 25 "Copyright (c) 2013 - 2018 Intel Corporation."; 26 27 /* iavf_pci_tbl - PCI Device ID Table 28 * 29 * Wildcard entries (PCI_ANY_ID) should come last 30 * Last entry must be all 0s 31 * 32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 33 * Class, Class Mask, private data (not used) } 34 */ 35 static const struct pci_device_id iavf_pci_tbl[] = { 36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0}, 37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0}, 38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0}, 39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0}, 40 /* required last entry */ 41 {0, } 42 }; 43 44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); 45 46 MODULE_ALIAS("i40evf"); 47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 49 MODULE_LICENSE("GPL v2"); 50 51 static const struct net_device_ops iavf_netdev_ops; 52 53 int iavf_status_to_errno(enum iavf_status status) 54 { 55 switch (status) { 56 case IAVF_SUCCESS: 57 return 0; 58 case IAVF_ERR_PARAM: 59 case IAVF_ERR_MAC_TYPE: 60 case IAVF_ERR_INVALID_MAC_ADDR: 61 case IAVF_ERR_INVALID_LINK_SETTINGS: 62 case IAVF_ERR_INVALID_PD_ID: 63 case IAVF_ERR_INVALID_QP_ID: 64 case IAVF_ERR_INVALID_CQ_ID: 65 case IAVF_ERR_INVALID_CEQ_ID: 66 case IAVF_ERR_INVALID_AEQ_ID: 67 case IAVF_ERR_INVALID_SIZE: 68 case IAVF_ERR_INVALID_ARP_INDEX: 69 case IAVF_ERR_INVALID_FPM_FUNC_ID: 70 case IAVF_ERR_QP_INVALID_MSG_SIZE: 71 case IAVF_ERR_INVALID_FRAG_COUNT: 72 case IAVF_ERR_INVALID_ALIGNMENT: 73 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: 74 case IAVF_ERR_INVALID_IMM_DATA_SIZE: 75 case IAVF_ERR_INVALID_VF_ID: 76 case IAVF_ERR_INVALID_HMCFN_ID: 77 case IAVF_ERR_INVALID_PBLE_INDEX: 78 case IAVF_ERR_INVALID_SD_INDEX: 79 case IAVF_ERR_INVALID_PAGE_DESC_INDEX: 80 case IAVF_ERR_INVALID_SD_TYPE: 81 case IAVF_ERR_INVALID_HMC_OBJ_INDEX: 82 case IAVF_ERR_INVALID_HMC_OBJ_COUNT: 83 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: 84 return -EINVAL; 85 case IAVF_ERR_NVM: 86 case IAVF_ERR_NVM_CHECKSUM: 87 case IAVF_ERR_PHY: 88 case IAVF_ERR_CONFIG: 89 case IAVF_ERR_UNKNOWN_PHY: 90 case IAVF_ERR_LINK_SETUP: 91 case IAVF_ERR_ADAPTER_STOPPED: 92 case IAVF_ERR_PRIMARY_REQUESTS_PENDING: 93 case IAVF_ERR_AUTONEG_NOT_COMPLETE: 94 case IAVF_ERR_RESET_FAILED: 95 case IAVF_ERR_BAD_PTR: 96 case IAVF_ERR_SWFW_SYNC: 97 case IAVF_ERR_QP_TOOMANY_WRS_POSTED: 98 case IAVF_ERR_QUEUE_EMPTY: 99 case IAVF_ERR_FLUSHED_QUEUE: 100 case IAVF_ERR_OPCODE_MISMATCH: 101 case IAVF_ERR_CQP_COMPL_ERROR: 102 case IAVF_ERR_BACKING_PAGE_ERROR: 103 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: 104 case IAVF_ERR_MEMCPY_FAILED: 105 case IAVF_ERR_SRQ_ENABLED: 106 case IAVF_ERR_ADMIN_QUEUE_ERROR: 107 case IAVF_ERR_ADMIN_QUEUE_FULL: 108 case IAVF_ERR_BAD_RDMA_CQE: 109 case IAVF_ERR_NVM_BLANK_MODE: 110 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: 111 case IAVF_ERR_DIAG_TEST_FAILED: 112 case IAVF_ERR_FIRMWARE_API_VERSION: 113 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 114 return -EIO; 115 case IAVF_ERR_DEVICE_NOT_SUPPORTED: 116 return -ENODEV; 117 case IAVF_ERR_NO_AVAILABLE_VSI: 118 case IAVF_ERR_RING_FULL: 119 return -ENOSPC; 120 case IAVF_ERR_NO_MEMORY: 121 return -ENOMEM; 122 case IAVF_ERR_TIMEOUT: 123 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: 124 return -ETIMEDOUT; 125 case IAVF_ERR_NOT_IMPLEMENTED: 126 case IAVF_NOT_SUPPORTED: 127 return -EOPNOTSUPP; 128 case IAVF_ERR_ADMIN_QUEUE_NO_WORK: 129 return -EALREADY; 130 case IAVF_ERR_NOT_READY: 131 return -EBUSY; 132 case IAVF_ERR_BUF_TOO_SHORT: 133 return -EMSGSIZE; 134 } 135 136 return -EIO; 137 } 138 139 int virtchnl_status_to_errno(enum virtchnl_status_code v_status) 140 { 141 switch (v_status) { 142 case VIRTCHNL_STATUS_SUCCESS: 143 return 0; 144 case VIRTCHNL_STATUS_ERR_PARAM: 145 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: 146 return -EINVAL; 147 case VIRTCHNL_STATUS_ERR_NO_MEMORY: 148 return -ENOMEM; 149 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: 150 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: 151 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: 152 return -EIO; 153 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: 154 return -EOPNOTSUPP; 155 } 156 157 return -EIO; 158 } 159 160 /** 161 * iavf_pdev_to_adapter - go from pci_dev to adapter 162 * @pdev: pci_dev pointer 163 */ 164 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev) 165 { 166 return netdev_priv(pci_get_drvdata(pdev)); 167 } 168 169 /** 170 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 171 * @hw: pointer to the HW structure 172 * @mem: ptr to mem struct to fill out 173 * @size: size of memory requested 174 * @alignment: what to align the allocation to 175 **/ 176 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, 177 struct iavf_dma_mem *mem, 178 u64 size, u32 alignment) 179 { 180 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 181 182 if (!mem) 183 return IAVF_ERR_PARAM; 184 185 mem->size = ALIGN(size, alignment); 186 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, 187 (dma_addr_t *)&mem->pa, GFP_KERNEL); 188 if (mem->va) 189 return 0; 190 else 191 return IAVF_ERR_NO_MEMORY; 192 } 193 194 /** 195 * iavf_free_dma_mem - wrapper for DMA memory freeing 196 * @hw: pointer to the HW structure 197 * @mem: ptr to mem struct to free 198 **/ 199 enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem) 200 { 201 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back; 202 203 if (!mem || !mem->va) 204 return IAVF_ERR_PARAM; 205 dma_free_coherent(&adapter->pdev->dev, mem->size, 206 mem->va, (dma_addr_t)mem->pa); 207 return 0; 208 } 209 210 /** 211 * iavf_allocate_virt_mem - virt memory alloc wrapper 212 * @hw: pointer to the HW structure 213 * @mem: ptr to mem struct to fill out 214 * @size: size of memory requested 215 **/ 216 enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw, 217 struct iavf_virt_mem *mem, u32 size) 218 { 219 if (!mem) 220 return IAVF_ERR_PARAM; 221 222 mem->size = size; 223 mem->va = kzalloc(size, GFP_KERNEL); 224 225 if (mem->va) 226 return 0; 227 else 228 return IAVF_ERR_NO_MEMORY; 229 } 230 231 /** 232 * iavf_free_virt_mem - virt memory free wrapper 233 * @hw: pointer to the HW structure 234 * @mem: ptr to mem struct to free 235 **/ 236 void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem) 237 { 238 kfree(mem->va); 239 } 240 241 /** 242 * iavf_lock_timeout - try to lock mutex but give up after timeout 243 * @lock: mutex that should be locked 244 * @msecs: timeout in msecs 245 * 246 * Returns 0 on success, negative on failure 247 **/ 248 static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) 249 { 250 unsigned int wait, delay = 10; 251 252 for (wait = 0; wait < msecs; wait += delay) { 253 if (mutex_trylock(lock)) 254 return 0; 255 256 msleep(delay); 257 } 258 259 return -1; 260 } 261 262 /** 263 * iavf_schedule_reset - Set the flags and schedule a reset event 264 * @adapter: board private structure 265 **/ 266 void iavf_schedule_reset(struct iavf_adapter *adapter) 267 { 268 if (!(adapter->flags & 269 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 270 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 271 queue_work(adapter->wq, &adapter->reset_task); 272 } 273 } 274 275 /** 276 * iavf_schedule_request_stats - Set the flags and schedule statistics request 277 * @adapter: board private structure 278 * 279 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly 280 * request and refresh ethtool stats 281 **/ 282 void iavf_schedule_request_stats(struct iavf_adapter *adapter) 283 { 284 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; 285 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 286 } 287 288 /** 289 * iavf_tx_timeout - Respond to a Tx Hang 290 * @netdev: network interface device structure 291 * @txqueue: queue number that is timing out 292 **/ 293 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue) 294 { 295 struct iavf_adapter *adapter = netdev_priv(netdev); 296 297 adapter->tx_timeout_count++; 298 iavf_schedule_reset(adapter); 299 } 300 301 /** 302 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC 303 * @adapter: board private structure 304 **/ 305 static void iavf_misc_irq_disable(struct iavf_adapter *adapter) 306 { 307 struct iavf_hw *hw = &adapter->hw; 308 309 if (!adapter->msix_entries) 310 return; 311 312 wr32(hw, IAVF_VFINT_DYN_CTL01, 0); 313 314 iavf_flush(hw); 315 316 synchronize_irq(adapter->msix_entries[0].vector); 317 } 318 319 /** 320 * iavf_misc_irq_enable - Enable default interrupt generation settings 321 * @adapter: board private structure 322 **/ 323 static void iavf_misc_irq_enable(struct iavf_adapter *adapter) 324 { 325 struct iavf_hw *hw = &adapter->hw; 326 327 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | 328 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK); 329 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK); 330 331 iavf_flush(hw); 332 } 333 334 /** 335 * iavf_irq_disable - Mask off interrupt generation on the NIC 336 * @adapter: board private structure 337 **/ 338 static void iavf_irq_disable(struct iavf_adapter *adapter) 339 { 340 int i; 341 struct iavf_hw *hw = &adapter->hw; 342 343 if (!adapter->msix_entries) 344 return; 345 346 for (i = 1; i < adapter->num_msix_vectors; i++) { 347 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0); 348 synchronize_irq(adapter->msix_entries[i].vector); 349 } 350 iavf_flush(hw); 351 } 352 353 /** 354 * iavf_irq_enable_queues - Enable interrupt for all queues 355 * @adapter: board private structure 356 **/ 357 static void iavf_irq_enable_queues(struct iavf_adapter *adapter) 358 { 359 struct iavf_hw *hw = &adapter->hw; 360 int i; 361 362 for (i = 1; i < adapter->num_msix_vectors; i++) { 363 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 364 IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 365 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 366 } 367 } 368 369 /** 370 * iavf_irq_enable - Enable default interrupt generation settings 371 * @adapter: board private structure 372 * @flush: boolean value whether to run rd32() 373 **/ 374 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush) 375 { 376 struct iavf_hw *hw = &adapter->hw; 377 378 iavf_misc_irq_enable(adapter); 379 iavf_irq_enable_queues(adapter); 380 381 if (flush) 382 iavf_flush(hw); 383 } 384 385 /** 386 * iavf_msix_aq - Interrupt handler for vector 0 387 * @irq: interrupt number 388 * @data: pointer to netdev 389 **/ 390 static irqreturn_t iavf_msix_aq(int irq, void *data) 391 { 392 struct net_device *netdev = data; 393 struct iavf_adapter *adapter = netdev_priv(netdev); 394 struct iavf_hw *hw = &adapter->hw; 395 396 /* handle non-queue interrupts, these reads clear the registers */ 397 rd32(hw, IAVF_VFINT_ICR01); 398 rd32(hw, IAVF_VFINT_ICR0_ENA1); 399 400 if (adapter->state != __IAVF_REMOVE) 401 /* schedule work on the private workqueue */ 402 queue_work(adapter->wq, &adapter->adminq_task); 403 404 return IRQ_HANDLED; 405 } 406 407 /** 408 * iavf_msix_clean_rings - MSIX mode Interrupt Handler 409 * @irq: interrupt number 410 * @data: pointer to a q_vector 411 **/ 412 static irqreturn_t iavf_msix_clean_rings(int irq, void *data) 413 { 414 struct iavf_q_vector *q_vector = data; 415 416 if (!q_vector->tx.ring && !q_vector->rx.ring) 417 return IRQ_HANDLED; 418 419 napi_schedule_irqoff(&q_vector->napi); 420 421 return IRQ_HANDLED; 422 } 423 424 /** 425 * iavf_map_vector_to_rxq - associate irqs with rx queues 426 * @adapter: board private structure 427 * @v_idx: interrupt number 428 * @r_idx: queue number 429 **/ 430 static void 431 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) 432 { 433 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 434 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx]; 435 struct iavf_hw *hw = &adapter->hw; 436 437 rx_ring->q_vector = q_vector; 438 rx_ring->next = q_vector->rx.ring; 439 rx_ring->vsi = &adapter->vsi; 440 q_vector->rx.ring = rx_ring; 441 q_vector->rx.count++; 442 q_vector->rx.next_update = jiffies + 1; 443 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 444 q_vector->ring_mask |= BIT(r_idx); 445 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 446 q_vector->rx.current_itr >> 1); 447 q_vector->rx.current_itr = q_vector->rx.target_itr; 448 } 449 450 /** 451 * iavf_map_vector_to_txq - associate irqs with tx queues 452 * @adapter: board private structure 453 * @v_idx: interrupt number 454 * @t_idx: queue number 455 **/ 456 static void 457 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) 458 { 459 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx]; 460 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx]; 461 struct iavf_hw *hw = &adapter->hw; 462 463 tx_ring->q_vector = q_vector; 464 tx_ring->next = q_vector->tx.ring; 465 tx_ring->vsi = &adapter->vsi; 466 q_vector->tx.ring = tx_ring; 467 q_vector->tx.count++; 468 q_vector->tx.next_update = jiffies + 1; 469 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 470 q_vector->num_ringpairs++; 471 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 472 q_vector->tx.target_itr >> 1); 473 q_vector->tx.current_itr = q_vector->tx.target_itr; 474 } 475 476 /** 477 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors 478 * @adapter: board private structure to initialize 479 * 480 * This function maps descriptor rings to the queue-specific vectors 481 * we were allotted through the MSI-X enabling code. Ideally, we'd have 482 * one vector per ring/queue, but on a constrained vector budget, we 483 * group the rings as "efficiently" as possible. You would add new 484 * mapping configurations in here. 485 **/ 486 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter) 487 { 488 int rings_remaining = adapter->num_active_queues; 489 int ridx = 0, vidx = 0; 490 int q_vectors; 491 492 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 493 494 for (; ridx < rings_remaining; ridx++) { 495 iavf_map_vector_to_rxq(adapter, vidx, ridx); 496 iavf_map_vector_to_txq(adapter, vidx, ridx); 497 498 /* In the case where we have more queues than vectors, continue 499 * round-robin on vectors until all queues are mapped. 500 */ 501 if (++vidx >= q_vectors) 502 vidx = 0; 503 } 504 505 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 506 } 507 508 /** 509 * iavf_irq_affinity_notify - Callback for affinity changes 510 * @notify: context as to what irq was changed 511 * @mask: the new affinity mask 512 * 513 * This is a callback function used by the irq_set_affinity_notifier function 514 * so that we may register to receive changes to the irq affinity masks. 515 **/ 516 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify, 517 const cpumask_t *mask) 518 { 519 struct iavf_q_vector *q_vector = 520 container_of(notify, struct iavf_q_vector, affinity_notify); 521 522 cpumask_copy(&q_vector->affinity_mask, mask); 523 } 524 525 /** 526 * iavf_irq_affinity_release - Callback for affinity notifier release 527 * @ref: internal core kernel usage 528 * 529 * This is a callback function used by the irq_set_affinity_notifier function 530 * to inform the current notification subscriber that they will no longer 531 * receive notifications. 532 **/ 533 static void iavf_irq_affinity_release(struct kref *ref) {} 534 535 /** 536 * iavf_request_traffic_irqs - Initialize MSI-X interrupts 537 * @adapter: board private structure 538 * @basename: device basename 539 * 540 * Allocates MSI-X vectors for tx and rx handling, and requests 541 * interrupts from the kernel. 542 **/ 543 static int 544 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) 545 { 546 unsigned int vector, q_vectors; 547 unsigned int rx_int_idx = 0, tx_int_idx = 0; 548 int irq_num, err; 549 int cpu; 550 551 iavf_irq_disable(adapter); 552 /* Decrement for Other and TCP Timer vectors */ 553 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 554 555 for (vector = 0; vector < q_vectors; vector++) { 556 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector]; 557 558 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 559 560 if (q_vector->tx.ring && q_vector->rx.ring) { 561 snprintf(q_vector->name, sizeof(q_vector->name), 562 "iavf-%s-TxRx-%u", basename, rx_int_idx++); 563 tx_int_idx++; 564 } else if (q_vector->rx.ring) { 565 snprintf(q_vector->name, sizeof(q_vector->name), 566 "iavf-%s-rx-%u", basename, rx_int_idx++); 567 } else if (q_vector->tx.ring) { 568 snprintf(q_vector->name, sizeof(q_vector->name), 569 "iavf-%s-tx-%u", basename, tx_int_idx++); 570 } else { 571 /* skip this unused q_vector */ 572 continue; 573 } 574 err = request_irq(irq_num, 575 iavf_msix_clean_rings, 576 0, 577 q_vector->name, 578 q_vector); 579 if (err) { 580 dev_info(&adapter->pdev->dev, 581 "Request_irq failed, error: %d\n", err); 582 goto free_queue_irqs; 583 } 584 /* register for affinity change notifications */ 585 q_vector->affinity_notify.notify = iavf_irq_affinity_notify; 586 q_vector->affinity_notify.release = 587 iavf_irq_affinity_release; 588 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 589 /* Spread the IRQ affinity hints across online CPUs. Note that 590 * get_cpu_mask returns a mask with a permanent lifetime so 591 * it's safe to use as a hint for irq_update_affinity_hint. 592 */ 593 cpu = cpumask_local_spread(q_vector->v_idx, -1); 594 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu)); 595 } 596 597 return 0; 598 599 free_queue_irqs: 600 while (vector) { 601 vector--; 602 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 603 irq_set_affinity_notifier(irq_num, NULL); 604 irq_update_affinity_hint(irq_num, NULL); 605 free_irq(irq_num, &adapter->q_vectors[vector]); 606 } 607 return err; 608 } 609 610 /** 611 * iavf_request_misc_irq - Initialize MSI-X interrupts 612 * @adapter: board private structure 613 * 614 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This 615 * vector is only for the admin queue, and stays active even when the netdev 616 * is closed. 617 **/ 618 static int iavf_request_misc_irq(struct iavf_adapter *adapter) 619 { 620 struct net_device *netdev = adapter->netdev; 621 int err; 622 623 snprintf(adapter->misc_vector_name, 624 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx", 625 dev_name(&adapter->pdev->dev)); 626 err = request_irq(adapter->msix_entries[0].vector, 627 &iavf_msix_aq, 0, 628 adapter->misc_vector_name, netdev); 629 if (err) { 630 dev_err(&adapter->pdev->dev, 631 "request_irq for %s failed: %d\n", 632 adapter->misc_vector_name, err); 633 free_irq(adapter->msix_entries[0].vector, netdev); 634 } 635 return err; 636 } 637 638 /** 639 * iavf_free_traffic_irqs - Free MSI-X interrupts 640 * @adapter: board private structure 641 * 642 * Frees all MSI-X vectors other than 0. 643 **/ 644 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter) 645 { 646 int vector, irq_num, q_vectors; 647 648 if (!adapter->msix_entries) 649 return; 650 651 q_vectors = adapter->num_msix_vectors - NONQ_VECS; 652 653 for (vector = 0; vector < q_vectors; vector++) { 654 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; 655 irq_set_affinity_notifier(irq_num, NULL); 656 irq_update_affinity_hint(irq_num, NULL); 657 free_irq(irq_num, &adapter->q_vectors[vector]); 658 } 659 } 660 661 /** 662 * iavf_free_misc_irq - Free MSI-X miscellaneous vector 663 * @adapter: board private structure 664 * 665 * Frees MSI-X vector 0. 666 **/ 667 static void iavf_free_misc_irq(struct iavf_adapter *adapter) 668 { 669 struct net_device *netdev = adapter->netdev; 670 671 if (!adapter->msix_entries) 672 return; 673 674 free_irq(adapter->msix_entries[0].vector, netdev); 675 } 676 677 /** 678 * iavf_configure_tx - Configure Transmit Unit after Reset 679 * @adapter: board private structure 680 * 681 * Configure the Tx unit of the MAC after a reset. 682 **/ 683 static void iavf_configure_tx(struct iavf_adapter *adapter) 684 { 685 struct iavf_hw *hw = &adapter->hw; 686 int i; 687 688 for (i = 0; i < adapter->num_active_queues; i++) 689 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); 690 } 691 692 /** 693 * iavf_configure_rx - Configure Receive Unit after Reset 694 * @adapter: board private structure 695 * 696 * Configure the Rx unit of the MAC after a reset. 697 **/ 698 static void iavf_configure_rx(struct iavf_adapter *adapter) 699 { 700 unsigned int rx_buf_len = IAVF_RXBUFFER_2048; 701 struct iavf_hw *hw = &adapter->hw; 702 int i; 703 704 /* Legacy Rx will always default to a 2048 buffer size. */ 705 #if (PAGE_SIZE < 8192) 706 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { 707 struct net_device *netdev = adapter->netdev; 708 709 /* For jumbo frames on systems with 4K pages we have to use 710 * an order 1 page, so we might as well increase the size 711 * of our Rx buffer to make better use of the available space 712 */ 713 rx_buf_len = IAVF_RXBUFFER_3072; 714 715 /* We use a 1536 buffer size for configurations with 716 * standard Ethernet mtu. On x86 this gives us enough room 717 * for shared info and 192 bytes of padding. 718 */ 719 if (!IAVF_2K_TOO_SMALL_WITH_PADDING && 720 (netdev->mtu <= ETH_DATA_LEN)) 721 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; 722 } 723 #endif 724 725 for (i = 0; i < adapter->num_active_queues; i++) { 726 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); 727 adapter->rx_rings[i].rx_buf_len = rx_buf_len; 728 729 if (adapter->flags & IAVF_FLAG_LEGACY_RX) 730 clear_ring_build_skb_enabled(&adapter->rx_rings[i]); 731 else 732 set_ring_build_skb_enabled(&adapter->rx_rings[i]); 733 } 734 } 735 736 /** 737 * iavf_find_vlan - Search filter list for specific vlan filter 738 * @adapter: board private structure 739 * @vlan: vlan tag 740 * 741 * Returns ptr to the filter object or NULL. Must be called while holding the 742 * mac_vlan_list_lock. 743 **/ 744 static struct 745 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, 746 struct iavf_vlan vlan) 747 { 748 struct iavf_vlan_filter *f; 749 750 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 751 if (f->vlan.vid == vlan.vid && 752 f->vlan.tpid == vlan.tpid) 753 return f; 754 } 755 756 return NULL; 757 } 758 759 /** 760 * iavf_add_vlan - Add a vlan filter to the list 761 * @adapter: board private structure 762 * @vlan: VLAN tag 763 * 764 * Returns ptr to the filter object or NULL when no memory available. 765 **/ 766 static struct 767 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, 768 struct iavf_vlan vlan) 769 { 770 struct iavf_vlan_filter *f = NULL; 771 772 spin_lock_bh(&adapter->mac_vlan_list_lock); 773 774 f = iavf_find_vlan(adapter, vlan); 775 if (!f) { 776 f = kzalloc(sizeof(*f), GFP_ATOMIC); 777 if (!f) 778 goto clearout; 779 780 f->vlan = vlan; 781 782 list_add_tail(&f->list, &adapter->vlan_filter_list); 783 f->state = IAVF_VLAN_ADD; 784 adapter->num_vlan_filters++; 785 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 786 } 787 788 clearout: 789 spin_unlock_bh(&adapter->mac_vlan_list_lock); 790 return f; 791 } 792 793 /** 794 * iavf_del_vlan - Remove a vlan filter from the list 795 * @adapter: board private structure 796 * @vlan: VLAN tag 797 **/ 798 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) 799 { 800 struct iavf_vlan_filter *f; 801 802 spin_lock_bh(&adapter->mac_vlan_list_lock); 803 804 f = iavf_find_vlan(adapter, vlan); 805 if (f) { 806 f->state = IAVF_VLAN_REMOVE; 807 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 808 } 809 810 spin_unlock_bh(&adapter->mac_vlan_list_lock); 811 } 812 813 /** 814 * iavf_restore_filters 815 * @adapter: board private structure 816 * 817 * Restore existing non MAC filters when VF netdev comes back up 818 **/ 819 static void iavf_restore_filters(struct iavf_adapter *adapter) 820 { 821 struct iavf_vlan_filter *f; 822 823 /* re-add all VLAN filters */ 824 spin_lock_bh(&adapter->mac_vlan_list_lock); 825 826 list_for_each_entry(f, &adapter->vlan_filter_list, list) { 827 if (f->state == IAVF_VLAN_INACTIVE) 828 f->state = IAVF_VLAN_ADD; 829 } 830 831 spin_unlock_bh(&adapter->mac_vlan_list_lock); 832 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; 833 } 834 835 /** 836 * iavf_get_num_vlans_added - get number of VLANs added 837 * @adapter: board private structure 838 */ 839 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) 840 { 841 return adapter->num_vlan_filters; 842 } 843 844 /** 845 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF 846 * @adapter: board private structure 847 * 848 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN, 849 * do not impose a limit as that maintains current behavior and for 850 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF. 851 **/ 852 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter) 853 { 854 /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has 855 * never been a limit on the VF driver side 856 */ 857 if (VLAN_ALLOWED(adapter)) 858 return VLAN_N_VID; 859 else if (VLAN_V2_ALLOWED(adapter)) 860 return adapter->vlan_v2_caps.filtering.max_filters; 861 862 return 0; 863 } 864 865 /** 866 * iavf_max_vlans_added - check if maximum VLANs allowed already exist 867 * @adapter: board private structure 868 **/ 869 static bool iavf_max_vlans_added(struct iavf_adapter *adapter) 870 { 871 if (iavf_get_num_vlans_added(adapter) < 872 iavf_get_max_vlans_allowed(adapter)) 873 return false; 874 875 return true; 876 } 877 878 /** 879 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device 880 * @netdev: network device struct 881 * @proto: unused protocol data 882 * @vid: VLAN tag 883 **/ 884 static int iavf_vlan_rx_add_vid(struct net_device *netdev, 885 __always_unused __be16 proto, u16 vid) 886 { 887 struct iavf_adapter *adapter = netdev_priv(netdev); 888 889 /* Do not track VLAN 0 filter, always added by the PF on VF init */ 890 if (!vid) 891 return 0; 892 893 if (!VLAN_FILTERING_ALLOWED(adapter)) 894 return -EIO; 895 896 if (iavf_max_vlans_added(adapter)) { 897 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n", 898 iavf_get_max_vlans_allowed(adapter)); 899 return -EIO; 900 } 901 902 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) 903 return -ENOMEM; 904 905 return 0; 906 } 907 908 /** 909 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device 910 * @netdev: network device struct 911 * @proto: unused protocol data 912 * @vid: VLAN tag 913 **/ 914 static int iavf_vlan_rx_kill_vid(struct net_device *netdev, 915 __always_unused __be16 proto, u16 vid) 916 { 917 struct iavf_adapter *adapter = netdev_priv(netdev); 918 919 /* We do not track VLAN 0 filter */ 920 if (!vid) 921 return 0; 922 923 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))); 924 return 0; 925 } 926 927 /** 928 * iavf_find_filter - Search filter list for specific mac filter 929 * @adapter: board private structure 930 * @macaddr: the MAC address 931 * 932 * Returns ptr to the filter object or NULL. Must be called while holding the 933 * mac_vlan_list_lock. 934 **/ 935 static struct 936 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, 937 const u8 *macaddr) 938 { 939 struct iavf_mac_filter *f; 940 941 if (!macaddr) 942 return NULL; 943 944 list_for_each_entry(f, &adapter->mac_filter_list, list) { 945 if (ether_addr_equal(macaddr, f->macaddr)) 946 return f; 947 } 948 return NULL; 949 } 950 951 /** 952 * iavf_add_filter - Add a mac filter to the filter list 953 * @adapter: board private structure 954 * @macaddr: the MAC address 955 * 956 * Returns ptr to the filter object or NULL when no memory available. 957 **/ 958 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 959 const u8 *macaddr) 960 { 961 struct iavf_mac_filter *f; 962 963 if (!macaddr) 964 return NULL; 965 966 f = iavf_find_filter(adapter, macaddr); 967 if (!f) { 968 f = kzalloc(sizeof(*f), GFP_ATOMIC); 969 if (!f) 970 return f; 971 972 ether_addr_copy(f->macaddr, macaddr); 973 974 list_add_tail(&f->list, &adapter->mac_filter_list); 975 f->add = true; 976 f->add_handled = false; 977 f->is_new_mac = true; 978 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr); 979 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 980 } else { 981 f->remove = false; 982 } 983 984 return f; 985 } 986 987 /** 988 * iavf_replace_primary_mac - Replace current primary address 989 * @adapter: board private structure 990 * @new_mac: new MAC address to be applied 991 * 992 * Replace current dev_addr and send request to PF for removal of previous 993 * primary MAC address filter and addition of new primary MAC filter. 994 * Return 0 for success, -ENOMEM for failure. 995 * 996 * Do not call this with mac_vlan_list_lock! 997 **/ 998 static int iavf_replace_primary_mac(struct iavf_adapter *adapter, 999 const u8 *new_mac) 1000 { 1001 struct iavf_hw *hw = &adapter->hw; 1002 struct iavf_mac_filter *new_f; 1003 struct iavf_mac_filter *old_f; 1004 1005 spin_lock_bh(&adapter->mac_vlan_list_lock); 1006 1007 new_f = iavf_add_filter(adapter, new_mac); 1008 if (!new_f) { 1009 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1010 return -ENOMEM; 1011 } 1012 1013 old_f = iavf_find_filter(adapter, hw->mac.addr); 1014 if (old_f) { 1015 old_f->is_primary = false; 1016 old_f->remove = true; 1017 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 1018 } 1019 /* Always send the request to add if changing primary MAC, 1020 * even if filter is already present on the list 1021 */ 1022 new_f->is_primary = true; 1023 new_f->add = true; 1024 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 1025 ether_addr_copy(hw->mac.addr, new_mac); 1026 1027 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1028 1029 /* schedule the watchdog task to immediately process the request */ 1030 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1031 return 0; 1032 } 1033 1034 /** 1035 * iavf_is_mac_set_handled - wait for a response to set MAC from PF 1036 * @netdev: network interface device structure 1037 * @macaddr: MAC address to set 1038 * 1039 * Returns true on success, false on failure 1040 */ 1041 static bool iavf_is_mac_set_handled(struct net_device *netdev, 1042 const u8 *macaddr) 1043 { 1044 struct iavf_adapter *adapter = netdev_priv(netdev); 1045 struct iavf_mac_filter *f; 1046 bool ret = false; 1047 1048 spin_lock_bh(&adapter->mac_vlan_list_lock); 1049 1050 f = iavf_find_filter(adapter, macaddr); 1051 1052 if (!f || (!f->add && f->add_handled)) 1053 ret = true; 1054 1055 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1056 1057 return ret; 1058 } 1059 1060 /** 1061 * iavf_set_mac - NDO callback to set port MAC address 1062 * @netdev: network interface device structure 1063 * @p: pointer to an address structure 1064 * 1065 * Returns 0 on success, negative on failure 1066 */ 1067 static int iavf_set_mac(struct net_device *netdev, void *p) 1068 { 1069 struct iavf_adapter *adapter = netdev_priv(netdev); 1070 struct sockaddr *addr = p; 1071 int ret; 1072 1073 if (!is_valid_ether_addr(addr->sa_data)) 1074 return -EADDRNOTAVAIL; 1075 1076 ret = iavf_replace_primary_mac(adapter, addr->sa_data); 1077 1078 if (ret) 1079 return ret; 1080 1081 ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, 1082 iavf_is_mac_set_handled(netdev, addr->sa_data), 1083 msecs_to_jiffies(2500)); 1084 1085 /* If ret < 0 then it means wait was interrupted. 1086 * If ret == 0 then it means we got a timeout. 1087 * else it means we got response for set MAC from PF, 1088 * check if netdev MAC was updated to requested MAC, 1089 * if yes then set MAC succeeded otherwise it failed return -EACCES 1090 */ 1091 if (ret < 0) 1092 return ret; 1093 1094 if (!ret) 1095 return -EAGAIN; 1096 1097 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) 1098 return -EACCES; 1099 1100 return 0; 1101 } 1102 1103 /** 1104 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address 1105 * @netdev: the netdevice 1106 * @addr: address to add 1107 * 1108 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call 1109 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1110 */ 1111 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr) 1112 { 1113 struct iavf_adapter *adapter = netdev_priv(netdev); 1114 1115 if (iavf_add_filter(adapter, addr)) 1116 return 0; 1117 else 1118 return -ENOMEM; 1119 } 1120 1121 /** 1122 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address 1123 * @netdev: the netdevice 1124 * @addr: address to add 1125 * 1126 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call 1127 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. 1128 */ 1129 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) 1130 { 1131 struct iavf_adapter *adapter = netdev_priv(netdev); 1132 struct iavf_mac_filter *f; 1133 1134 /* Under some circumstances, we might receive a request to delete 1135 * our own device address from our uc list. Because we store the 1136 * device address in the VSI's MAC/VLAN filter list, we need to ignore 1137 * such requests and not delete our device address from this list. 1138 */ 1139 if (ether_addr_equal(addr, netdev->dev_addr)) 1140 return 0; 1141 1142 f = iavf_find_filter(adapter, addr); 1143 if (f) { 1144 f->remove = true; 1145 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 1146 } 1147 return 0; 1148 } 1149 1150 /** 1151 * iavf_set_rx_mode - NDO callback to set the netdev filters 1152 * @netdev: network interface device structure 1153 **/ 1154 static void iavf_set_rx_mode(struct net_device *netdev) 1155 { 1156 struct iavf_adapter *adapter = netdev_priv(netdev); 1157 1158 spin_lock_bh(&adapter->mac_vlan_list_lock); 1159 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 1160 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); 1161 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1162 1163 if (netdev->flags & IFF_PROMISC && 1164 !(adapter->flags & IAVF_FLAG_PROMISC_ON)) 1165 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; 1166 else if (!(netdev->flags & IFF_PROMISC) && 1167 adapter->flags & IAVF_FLAG_PROMISC_ON) 1168 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; 1169 1170 if (netdev->flags & IFF_ALLMULTI && 1171 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) 1172 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; 1173 else if (!(netdev->flags & IFF_ALLMULTI) && 1174 adapter->flags & IAVF_FLAG_ALLMULTI_ON) 1175 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; 1176 } 1177 1178 /** 1179 * iavf_napi_enable_all - enable NAPI on all queue vectors 1180 * @adapter: board private structure 1181 **/ 1182 static void iavf_napi_enable_all(struct iavf_adapter *adapter) 1183 { 1184 int q_idx; 1185 struct iavf_q_vector *q_vector; 1186 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1187 1188 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1189 struct napi_struct *napi; 1190 1191 q_vector = &adapter->q_vectors[q_idx]; 1192 napi = &q_vector->napi; 1193 napi_enable(napi); 1194 } 1195 } 1196 1197 /** 1198 * iavf_napi_disable_all - disable NAPI on all queue vectors 1199 * @adapter: board private structure 1200 **/ 1201 static void iavf_napi_disable_all(struct iavf_adapter *adapter) 1202 { 1203 int q_idx; 1204 struct iavf_q_vector *q_vector; 1205 int q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1206 1207 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1208 q_vector = &adapter->q_vectors[q_idx]; 1209 napi_disable(&q_vector->napi); 1210 } 1211 } 1212 1213 /** 1214 * iavf_configure - set up transmit and receive data structures 1215 * @adapter: board private structure 1216 **/ 1217 static void iavf_configure(struct iavf_adapter *adapter) 1218 { 1219 struct net_device *netdev = adapter->netdev; 1220 int i; 1221 1222 iavf_set_rx_mode(netdev); 1223 1224 iavf_configure_tx(adapter); 1225 iavf_configure_rx(adapter); 1226 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES; 1227 1228 for (i = 0; i < adapter->num_active_queues; i++) { 1229 struct iavf_ring *ring = &adapter->rx_rings[i]; 1230 1231 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring)); 1232 } 1233 } 1234 1235 /** 1236 * iavf_up_complete - Finish the last steps of bringing up a connection 1237 * @adapter: board private structure 1238 * 1239 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 1240 **/ 1241 static void iavf_up_complete(struct iavf_adapter *adapter) 1242 { 1243 iavf_change_state(adapter, __IAVF_RUNNING); 1244 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1245 1246 iavf_napi_enable_all(adapter); 1247 1248 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 1249 if (CLIENT_ENABLED(adapter)) 1250 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 1251 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1252 } 1253 1254 /** 1255 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF 1256 * yet and mark other to be removed. 1257 * @adapter: board private structure 1258 **/ 1259 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) 1260 { 1261 struct iavf_vlan_filter *vlf, *vlftmp; 1262 struct iavf_mac_filter *f, *ftmp; 1263 1264 spin_lock_bh(&adapter->mac_vlan_list_lock); 1265 /* clear the sync flag on all filters */ 1266 __dev_uc_unsync(adapter->netdev, NULL); 1267 __dev_mc_unsync(adapter->netdev, NULL); 1268 1269 /* remove all MAC filters */ 1270 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, 1271 list) { 1272 if (f->add) { 1273 list_del(&f->list); 1274 kfree(f); 1275 } else { 1276 f->remove = true; 1277 } 1278 } 1279 1280 /* disable all VLAN filters */ 1281 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 1282 list) 1283 vlf->state = IAVF_VLAN_DISABLE; 1284 1285 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1286 } 1287 1288 /** 1289 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and 1290 * mark other to be removed. 1291 * @adapter: board private structure 1292 **/ 1293 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) 1294 { 1295 struct iavf_cloud_filter *cf, *cftmp; 1296 1297 /* remove all cloud filters */ 1298 spin_lock_bh(&adapter->cloud_filter_list_lock); 1299 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 1300 list) { 1301 if (cf->add) { 1302 list_del(&cf->list); 1303 kfree(cf); 1304 adapter->num_cloud_filters--; 1305 } else { 1306 cf->del = true; 1307 } 1308 } 1309 spin_unlock_bh(&adapter->cloud_filter_list_lock); 1310 } 1311 1312 /** 1313 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark 1314 * other to be removed. 1315 * @adapter: board private structure 1316 **/ 1317 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) 1318 { 1319 struct iavf_fdir_fltr *fdir, *fdirtmp; 1320 1321 /* remove all Flow Director filters */ 1322 spin_lock_bh(&adapter->fdir_fltr_lock); 1323 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, 1324 list) { 1325 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { 1326 list_del(&fdir->list); 1327 kfree(fdir); 1328 adapter->fdir_active_fltr--; 1329 } else { 1330 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; 1331 } 1332 } 1333 spin_unlock_bh(&adapter->fdir_fltr_lock); 1334 } 1335 1336 /** 1337 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark 1338 * other to be removed. 1339 * @adapter: board private structure 1340 **/ 1341 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) 1342 { 1343 struct iavf_adv_rss *rss, *rsstmp; 1344 1345 /* remove all advance RSS configuration */ 1346 spin_lock_bh(&adapter->adv_rss_lock); 1347 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 1348 list) { 1349 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { 1350 list_del(&rss->list); 1351 kfree(rss); 1352 } else { 1353 rss->state = IAVF_ADV_RSS_DEL_REQUEST; 1354 } 1355 } 1356 spin_unlock_bh(&adapter->adv_rss_lock); 1357 } 1358 1359 /** 1360 * iavf_down - Shutdown the connection processing 1361 * @adapter: board private structure 1362 * 1363 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. 1364 **/ 1365 void iavf_down(struct iavf_adapter *adapter) 1366 { 1367 struct net_device *netdev = adapter->netdev; 1368 1369 if (adapter->state <= __IAVF_DOWN_PENDING) 1370 return; 1371 1372 netif_carrier_off(netdev); 1373 netif_tx_disable(netdev); 1374 adapter->link_up = false; 1375 iavf_napi_disable_all(adapter); 1376 iavf_irq_disable(adapter); 1377 1378 iavf_clear_mac_vlan_filters(adapter); 1379 iavf_clear_cloud_filters(adapter); 1380 iavf_clear_fdir_filters(adapter); 1381 iavf_clear_adv_rss_conf(adapter); 1382 1383 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { 1384 /* cancel any current operation */ 1385 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1386 /* Schedule operations to close down the HW. Don't wait 1387 * here for this to complete. The watchdog is still running 1388 * and it will take care of this. 1389 */ 1390 if (!list_empty(&adapter->mac_filter_list)) 1391 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; 1392 if (!list_empty(&adapter->vlan_filter_list)) 1393 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; 1394 if (!list_empty(&adapter->cloud_filter_list)) 1395 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 1396 if (!list_empty(&adapter->fdir_list_head)) 1397 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; 1398 if (!list_empty(&adapter->adv_rss_list_head)) 1399 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; 1400 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1401 } 1402 1403 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1404 } 1405 1406 /** 1407 * iavf_acquire_msix_vectors - Setup the MSIX capability 1408 * @adapter: board private structure 1409 * @vectors: number of vectors to request 1410 * 1411 * Work with the OS to set up the MSIX vectors needed. 1412 * 1413 * Returns 0 on success, negative on failure 1414 **/ 1415 static int 1416 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors) 1417 { 1418 int err, vector_threshold; 1419 1420 /* We'll want at least 3 (vector_threshold): 1421 * 0) Other (Admin Queue and link, mostly) 1422 * 1) TxQ[0] Cleanup 1423 * 2) RxQ[0] Cleanup 1424 */ 1425 vector_threshold = MIN_MSIX_COUNT; 1426 1427 /* The more we get, the more we will assign to Tx/Rx Cleanup 1428 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1429 * Right now, we simply care about how many we'll get; we'll 1430 * set them up later while requesting irq's. 1431 */ 1432 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1433 vector_threshold, vectors); 1434 if (err < 0) { 1435 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); 1436 kfree(adapter->msix_entries); 1437 adapter->msix_entries = NULL; 1438 return err; 1439 } 1440 1441 /* Adjust for only the vectors we'll use, which is minimum 1442 * of max_msix_q_vectors + NONQ_VECS, or the number of 1443 * vectors we were allocated. 1444 */ 1445 adapter->num_msix_vectors = err; 1446 return 0; 1447 } 1448 1449 /** 1450 * iavf_free_queues - Free memory for all rings 1451 * @adapter: board private structure to initialize 1452 * 1453 * Free all of the memory associated with queue pairs. 1454 **/ 1455 static void iavf_free_queues(struct iavf_adapter *adapter) 1456 { 1457 if (!adapter->vsi_res) 1458 return; 1459 adapter->num_active_queues = 0; 1460 kfree(adapter->tx_rings); 1461 adapter->tx_rings = NULL; 1462 kfree(adapter->rx_rings); 1463 adapter->rx_rings = NULL; 1464 } 1465 1466 /** 1467 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload 1468 * @adapter: board private structure 1469 * 1470 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or 1471 * stripped in certain descriptor fields. Instead of checking the offload 1472 * capability bits in the hot path, cache the location the ring specific 1473 * flags. 1474 */ 1475 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter) 1476 { 1477 int i; 1478 1479 for (i = 0; i < adapter->num_active_queues; i++) { 1480 struct iavf_ring *tx_ring = &adapter->tx_rings[i]; 1481 struct iavf_ring *rx_ring = &adapter->rx_rings[i]; 1482 1483 /* prevent multiple L2TAG bits being set after VFR */ 1484 tx_ring->flags &= 1485 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | 1486 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2); 1487 rx_ring->flags &= 1488 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | 1489 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2); 1490 1491 if (VLAN_ALLOWED(adapter)) { 1492 tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1493 rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1494 } else if (VLAN_V2_ALLOWED(adapter)) { 1495 struct virtchnl_vlan_supported_caps *stripping_support; 1496 struct virtchnl_vlan_supported_caps *insertion_support; 1497 1498 stripping_support = 1499 &adapter->vlan_v2_caps.offloads.stripping_support; 1500 insertion_support = 1501 &adapter->vlan_v2_caps.offloads.insertion_support; 1502 1503 if (stripping_support->outer) { 1504 if (stripping_support->outer & 1505 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1506 rx_ring->flags |= 1507 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1508 else if (stripping_support->outer & 1509 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) 1510 rx_ring->flags |= 1511 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; 1512 } else if (stripping_support->inner) { 1513 if (stripping_support->inner & 1514 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1515 rx_ring->flags |= 1516 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1517 else if (stripping_support->inner & 1518 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) 1519 rx_ring->flags |= 1520 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; 1521 } 1522 1523 if (insertion_support->outer) { 1524 if (insertion_support->outer & 1525 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1526 tx_ring->flags |= 1527 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1528 else if (insertion_support->outer & 1529 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) 1530 tx_ring->flags |= 1531 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; 1532 } else if (insertion_support->inner) { 1533 if (insertion_support->inner & 1534 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) 1535 tx_ring->flags |= 1536 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; 1537 else if (insertion_support->inner & 1538 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) 1539 tx_ring->flags |= 1540 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; 1541 } 1542 } 1543 } 1544 } 1545 1546 /** 1547 * iavf_alloc_queues - Allocate memory for all rings 1548 * @adapter: board private structure to initialize 1549 * 1550 * We allocate one ring per queue at run-time since we don't know the 1551 * number of queues at compile-time. The polling_netdev array is 1552 * intended for Multiqueue, but should work fine with a single queue. 1553 **/ 1554 static int iavf_alloc_queues(struct iavf_adapter *adapter) 1555 { 1556 int i, num_active_queues; 1557 1558 /* If we're in reset reallocating queues we don't actually know yet for 1559 * certain the PF gave us the number of queues we asked for but we'll 1560 * assume it did. Once basic reset is finished we'll confirm once we 1561 * start negotiating config with PF. 1562 */ 1563 if (adapter->num_req_queues) 1564 num_active_queues = adapter->num_req_queues; 1565 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1566 adapter->num_tc) 1567 num_active_queues = adapter->ch_config.total_qps; 1568 else 1569 num_active_queues = min_t(int, 1570 adapter->vsi_res->num_queue_pairs, 1571 (int)(num_online_cpus())); 1572 1573 1574 adapter->tx_rings = kcalloc(num_active_queues, 1575 sizeof(struct iavf_ring), GFP_KERNEL); 1576 if (!adapter->tx_rings) 1577 goto err_out; 1578 adapter->rx_rings = kcalloc(num_active_queues, 1579 sizeof(struct iavf_ring), GFP_KERNEL); 1580 if (!adapter->rx_rings) 1581 goto err_out; 1582 1583 for (i = 0; i < num_active_queues; i++) { 1584 struct iavf_ring *tx_ring; 1585 struct iavf_ring *rx_ring; 1586 1587 tx_ring = &adapter->tx_rings[i]; 1588 1589 tx_ring->queue_index = i; 1590 tx_ring->netdev = adapter->netdev; 1591 tx_ring->dev = &adapter->pdev->dev; 1592 tx_ring->count = adapter->tx_desc_count; 1593 tx_ring->itr_setting = IAVF_ITR_TX_DEF; 1594 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE) 1595 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR; 1596 1597 rx_ring = &adapter->rx_rings[i]; 1598 rx_ring->queue_index = i; 1599 rx_ring->netdev = adapter->netdev; 1600 rx_ring->dev = &adapter->pdev->dev; 1601 rx_ring->count = adapter->rx_desc_count; 1602 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1603 } 1604 1605 adapter->num_active_queues = num_active_queues; 1606 1607 iavf_set_queue_vlan_tag_loc(adapter); 1608 1609 return 0; 1610 1611 err_out: 1612 iavf_free_queues(adapter); 1613 return -ENOMEM; 1614 } 1615 1616 /** 1617 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported 1618 * @adapter: board private structure to initialize 1619 * 1620 * Attempt to configure the interrupts using the best available 1621 * capabilities of the hardware and the kernel. 1622 **/ 1623 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter) 1624 { 1625 int vector, v_budget; 1626 int pairs = 0; 1627 int err = 0; 1628 1629 if (!adapter->vsi_res) { 1630 err = -EIO; 1631 goto out; 1632 } 1633 pairs = adapter->num_active_queues; 1634 1635 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do 1636 * us much good if we have more vectors than CPUs. However, we already 1637 * limit the total number of queues by the number of CPUs so we do not 1638 * need any further limiting here. 1639 */ 1640 v_budget = min_t(int, pairs + NONQ_VECS, 1641 (int)adapter->vf_res->max_vectors); 1642 1643 adapter->msix_entries = kcalloc(v_budget, 1644 sizeof(struct msix_entry), GFP_KERNEL); 1645 if (!adapter->msix_entries) { 1646 err = -ENOMEM; 1647 goto out; 1648 } 1649 1650 for (vector = 0; vector < v_budget; vector++) 1651 adapter->msix_entries[vector].entry = vector; 1652 1653 err = iavf_acquire_msix_vectors(adapter, v_budget); 1654 1655 out: 1656 netif_set_real_num_rx_queues(adapter->netdev, pairs); 1657 netif_set_real_num_tx_queues(adapter->netdev, pairs); 1658 return err; 1659 } 1660 1661 /** 1662 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands 1663 * @adapter: board private structure 1664 * 1665 * Return 0 on success, negative on failure 1666 **/ 1667 static int iavf_config_rss_aq(struct iavf_adapter *adapter) 1668 { 1669 struct iavf_aqc_get_set_rss_key_data *rss_key = 1670 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key; 1671 struct iavf_hw *hw = &adapter->hw; 1672 enum iavf_status status; 1673 1674 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1675 /* bail because we already have a command pending */ 1676 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", 1677 adapter->current_op); 1678 return -EBUSY; 1679 } 1680 1681 status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); 1682 if (status) { 1683 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", 1684 iavf_stat_str(hw, status), 1685 iavf_aq_str(hw, hw->aq.asq_last_status)); 1686 return iavf_status_to_errno(status); 1687 1688 } 1689 1690 status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false, 1691 adapter->rss_lut, adapter->rss_lut_size); 1692 if (status) { 1693 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", 1694 iavf_stat_str(hw, status), 1695 iavf_aq_str(hw, hw->aq.asq_last_status)); 1696 return iavf_status_to_errno(status); 1697 } 1698 1699 return 0; 1700 1701 } 1702 1703 /** 1704 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers 1705 * @adapter: board private structure 1706 * 1707 * Returns 0 on success, negative on failure 1708 **/ 1709 static int iavf_config_rss_reg(struct iavf_adapter *adapter) 1710 { 1711 struct iavf_hw *hw = &adapter->hw; 1712 u32 *dw; 1713 u16 i; 1714 1715 dw = (u32 *)adapter->rss_key; 1716 for (i = 0; i <= adapter->rss_key_size / 4; i++) 1717 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]); 1718 1719 dw = (u32 *)adapter->rss_lut; 1720 for (i = 0; i <= adapter->rss_lut_size / 4; i++) 1721 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]); 1722 1723 iavf_flush(hw); 1724 1725 return 0; 1726 } 1727 1728 /** 1729 * iavf_config_rss - Configure RSS keys and lut 1730 * @adapter: board private structure 1731 * 1732 * Returns 0 on success, negative on failure 1733 **/ 1734 int iavf_config_rss(struct iavf_adapter *adapter) 1735 { 1736 1737 if (RSS_PF(adapter)) { 1738 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT | 1739 IAVF_FLAG_AQ_SET_RSS_KEY; 1740 return 0; 1741 } else if (RSS_AQ(adapter)) { 1742 return iavf_config_rss_aq(adapter); 1743 } else { 1744 return iavf_config_rss_reg(adapter); 1745 } 1746 } 1747 1748 /** 1749 * iavf_fill_rss_lut - Fill the lut with default values 1750 * @adapter: board private structure 1751 **/ 1752 static void iavf_fill_rss_lut(struct iavf_adapter *adapter) 1753 { 1754 u16 i; 1755 1756 for (i = 0; i < adapter->rss_lut_size; i++) 1757 adapter->rss_lut[i] = i % adapter->num_active_queues; 1758 } 1759 1760 /** 1761 * iavf_init_rss - Prepare for RSS 1762 * @adapter: board private structure 1763 * 1764 * Return 0 on success, negative on failure 1765 **/ 1766 static int iavf_init_rss(struct iavf_adapter *adapter) 1767 { 1768 struct iavf_hw *hw = &adapter->hw; 1769 1770 if (!RSS_PF(adapter)) { 1771 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ 1772 if (adapter->vf_res->vf_cap_flags & 1773 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 1774 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED; 1775 else 1776 adapter->hena = IAVF_DEFAULT_RSS_HENA; 1777 1778 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena); 1779 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32)); 1780 } 1781 1782 iavf_fill_rss_lut(adapter); 1783 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); 1784 1785 return iavf_config_rss(adapter); 1786 } 1787 1788 /** 1789 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors 1790 * @adapter: board private structure to initialize 1791 * 1792 * We allocate one q_vector per queue interrupt. If allocation fails we 1793 * return -ENOMEM. 1794 **/ 1795 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) 1796 { 1797 int q_idx = 0, num_q_vectors; 1798 struct iavf_q_vector *q_vector; 1799 1800 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1801 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), 1802 GFP_KERNEL); 1803 if (!adapter->q_vectors) 1804 return -ENOMEM; 1805 1806 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1807 q_vector = &adapter->q_vectors[q_idx]; 1808 q_vector->adapter = adapter; 1809 q_vector->vsi = &adapter->vsi; 1810 q_vector->v_idx = q_idx; 1811 q_vector->reg_idx = q_idx; 1812 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); 1813 netif_napi_add(adapter->netdev, &q_vector->napi, 1814 iavf_napi_poll); 1815 } 1816 1817 return 0; 1818 } 1819 1820 /** 1821 * iavf_free_q_vectors - Free memory allocated for interrupt vectors 1822 * @adapter: board private structure to initialize 1823 * 1824 * This function frees the memory allocated to the q_vectors. In addition if 1825 * NAPI is enabled it will delete any references to the NAPI struct prior 1826 * to freeing the q_vector. 1827 **/ 1828 static void iavf_free_q_vectors(struct iavf_adapter *adapter) 1829 { 1830 int q_idx, num_q_vectors; 1831 int napi_vectors; 1832 1833 if (!adapter->q_vectors) 1834 return; 1835 1836 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; 1837 napi_vectors = adapter->num_active_queues; 1838 1839 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1840 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; 1841 1842 if (q_idx < napi_vectors) 1843 netif_napi_del(&q_vector->napi); 1844 } 1845 kfree(adapter->q_vectors); 1846 adapter->q_vectors = NULL; 1847 } 1848 1849 /** 1850 * iavf_reset_interrupt_capability - Reset MSIX setup 1851 * @adapter: board private structure 1852 * 1853 **/ 1854 static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter) 1855 { 1856 if (!adapter->msix_entries) 1857 return; 1858 1859 pci_disable_msix(adapter->pdev); 1860 kfree(adapter->msix_entries); 1861 adapter->msix_entries = NULL; 1862 } 1863 1864 /** 1865 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init 1866 * @adapter: board private structure to initialize 1867 * 1868 **/ 1869 static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter) 1870 { 1871 int err; 1872 1873 err = iavf_alloc_queues(adapter); 1874 if (err) { 1875 dev_err(&adapter->pdev->dev, 1876 "Unable to allocate memory for queues\n"); 1877 goto err_alloc_queues; 1878 } 1879 1880 rtnl_lock(); 1881 err = iavf_set_interrupt_capability(adapter); 1882 rtnl_unlock(); 1883 if (err) { 1884 dev_err(&adapter->pdev->dev, 1885 "Unable to setup interrupt capabilities\n"); 1886 goto err_set_interrupt; 1887 } 1888 1889 err = iavf_alloc_q_vectors(adapter); 1890 if (err) { 1891 dev_err(&adapter->pdev->dev, 1892 "Unable to allocate memory for queue vectors\n"); 1893 goto err_alloc_q_vectors; 1894 } 1895 1896 /* If we've made it so far while ADq flag being ON, then we haven't 1897 * bailed out anywhere in middle. And ADq isn't just enabled but actual 1898 * resources have been allocated in the reset path. 1899 * Now we can truly claim that ADq is enabled. 1900 */ 1901 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1902 adapter->num_tc) 1903 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1904 adapter->num_tc); 1905 1906 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1907 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1908 adapter->num_active_queues); 1909 1910 return 0; 1911 err_alloc_q_vectors: 1912 iavf_reset_interrupt_capability(adapter); 1913 err_set_interrupt: 1914 iavf_free_queues(adapter); 1915 err_alloc_queues: 1916 return err; 1917 } 1918 1919 /** 1920 * iavf_free_rss - Free memory used by RSS structs 1921 * @adapter: board private structure 1922 **/ 1923 static void iavf_free_rss(struct iavf_adapter *adapter) 1924 { 1925 kfree(adapter->rss_key); 1926 adapter->rss_key = NULL; 1927 1928 kfree(adapter->rss_lut); 1929 adapter->rss_lut = NULL; 1930 } 1931 1932 /** 1933 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors 1934 * @adapter: board private structure 1935 * 1936 * Returns 0 on success, negative on failure 1937 **/ 1938 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) 1939 { 1940 struct net_device *netdev = adapter->netdev; 1941 int err; 1942 1943 if (netif_running(netdev)) 1944 iavf_free_traffic_irqs(adapter); 1945 iavf_free_misc_irq(adapter); 1946 iavf_reset_interrupt_capability(adapter); 1947 iavf_free_q_vectors(adapter); 1948 iavf_free_queues(adapter); 1949 1950 err = iavf_init_interrupt_scheme(adapter); 1951 if (err) 1952 goto err; 1953 1954 netif_tx_stop_all_queues(netdev); 1955 1956 err = iavf_request_misc_irq(adapter); 1957 if (err) 1958 goto err; 1959 1960 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 1961 1962 iavf_map_rings_to_vectors(adapter); 1963 err: 1964 return err; 1965 } 1966 1967 /** 1968 * iavf_process_aq_command - process aq_required flags 1969 * and sends aq command 1970 * @adapter: pointer to iavf adapter structure 1971 * 1972 * Returns 0 on success 1973 * Returns error code if no command was sent 1974 * or error code if the command failed. 1975 **/ 1976 static int iavf_process_aq_command(struct iavf_adapter *adapter) 1977 { 1978 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) 1979 return iavf_send_vf_config_msg(adapter); 1980 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) 1981 return iavf_send_vf_offload_vlan_v2_msg(adapter); 1982 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { 1983 iavf_disable_queues(adapter); 1984 return 0; 1985 } 1986 1987 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) { 1988 iavf_map_queues(adapter); 1989 return 0; 1990 } 1991 1992 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) { 1993 iavf_add_ether_addrs(adapter); 1994 return 0; 1995 } 1996 1997 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) { 1998 iavf_add_vlans(adapter); 1999 return 0; 2000 } 2001 2002 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) { 2003 iavf_del_ether_addrs(adapter); 2004 return 0; 2005 } 2006 2007 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) { 2008 iavf_del_vlans(adapter); 2009 return 0; 2010 } 2011 2012 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { 2013 iavf_enable_vlan_stripping(adapter); 2014 return 0; 2015 } 2016 2017 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { 2018 iavf_disable_vlan_stripping(adapter); 2019 return 0; 2020 } 2021 2022 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) { 2023 iavf_configure_queues(adapter); 2024 return 0; 2025 } 2026 2027 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) { 2028 iavf_enable_queues(adapter); 2029 return 0; 2030 } 2031 2032 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { 2033 /* This message goes straight to the firmware, not the 2034 * PF, so we don't have to set current_op as we will 2035 * not get a response through the ARQ. 2036 */ 2037 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; 2038 return 0; 2039 } 2040 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) { 2041 iavf_get_hena(adapter); 2042 return 0; 2043 } 2044 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) { 2045 iavf_set_hena(adapter); 2046 return 0; 2047 } 2048 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) { 2049 iavf_set_rss_key(adapter); 2050 return 0; 2051 } 2052 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) { 2053 iavf_set_rss_lut(adapter); 2054 return 0; 2055 } 2056 2057 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { 2058 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | 2059 FLAG_VF_MULTICAST_PROMISC); 2060 return 0; 2061 } 2062 2063 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { 2064 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); 2065 return 0; 2066 } 2067 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || 2068 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { 2069 iavf_set_promiscuous(adapter, 0); 2070 return 0; 2071 } 2072 2073 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) { 2074 iavf_enable_channels(adapter); 2075 return 0; 2076 } 2077 2078 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) { 2079 iavf_disable_channels(adapter); 2080 return 0; 2081 } 2082 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 2083 iavf_add_cloud_filter(adapter); 2084 return 0; 2085 } 2086 2087 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 2088 iavf_del_cloud_filter(adapter); 2089 return 0; 2090 } 2091 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { 2092 iavf_del_cloud_filter(adapter); 2093 return 0; 2094 } 2095 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { 2096 iavf_add_cloud_filter(adapter); 2097 return 0; 2098 } 2099 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { 2100 iavf_add_fdir_filter(adapter); 2101 return IAVF_SUCCESS; 2102 } 2103 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) { 2104 iavf_del_fdir_filter(adapter); 2105 return IAVF_SUCCESS; 2106 } 2107 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) { 2108 iavf_add_adv_rss_cfg(adapter); 2109 return 0; 2110 } 2111 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) { 2112 iavf_del_adv_rss_cfg(adapter); 2113 return 0; 2114 } 2115 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) { 2116 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q); 2117 return 0; 2118 } 2119 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) { 2120 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD); 2121 return 0; 2122 } 2123 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) { 2124 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q); 2125 return 0; 2126 } 2127 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) { 2128 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD); 2129 return 0; 2130 } 2131 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) { 2132 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q); 2133 return 0; 2134 } 2135 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) { 2136 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD); 2137 return 0; 2138 } 2139 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) { 2140 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q); 2141 return 0; 2142 } 2143 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) { 2144 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); 2145 return 0; 2146 } 2147 2148 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { 2149 iavf_request_stats(adapter); 2150 return 0; 2151 } 2152 2153 return -EAGAIN; 2154 } 2155 2156 /** 2157 * iavf_set_vlan_offload_features - set VLAN offload configuration 2158 * @adapter: board private structure 2159 * @prev_features: previous features used for comparison 2160 * @features: updated features used for configuration 2161 * 2162 * Set the aq_required bit(s) based on the requested features passed in to 2163 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule 2164 * the watchdog if any changes are requested to expedite the request via 2165 * virtchnl. 2166 **/ 2167 static void 2168 iavf_set_vlan_offload_features(struct iavf_adapter *adapter, 2169 netdev_features_t prev_features, 2170 netdev_features_t features) 2171 { 2172 bool enable_stripping = true, enable_insertion = true; 2173 u16 vlan_ethertype = 0; 2174 u64 aq_required = 0; 2175 2176 /* keep cases separate because one ethertype for offloads can be 2177 * disabled at the same time as another is disabled, so check for an 2178 * enabled ethertype first, then check for disabled. Default to 2179 * ETH_P_8021Q so an ethertype is specified if disabling insertion and 2180 * stripping. 2181 */ 2182 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 2183 vlan_ethertype = ETH_P_8021AD; 2184 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 2185 vlan_ethertype = ETH_P_8021Q; 2186 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 2187 vlan_ethertype = ETH_P_8021AD; 2188 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 2189 vlan_ethertype = ETH_P_8021Q; 2190 else 2191 vlan_ethertype = ETH_P_8021Q; 2192 2193 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) 2194 enable_stripping = false; 2195 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) 2196 enable_insertion = false; 2197 2198 if (VLAN_ALLOWED(adapter)) { 2199 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN 2200 * stripping via virtchnl. VLAN insertion can be toggled on the 2201 * netdev, but it doesn't require a virtchnl message 2202 */ 2203 if (enable_stripping) 2204 aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; 2205 else 2206 aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; 2207 2208 } else if (VLAN_V2_ALLOWED(adapter)) { 2209 switch (vlan_ethertype) { 2210 case ETH_P_8021Q: 2211 if (enable_stripping) 2212 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; 2213 else 2214 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; 2215 2216 if (enable_insertion) 2217 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; 2218 else 2219 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; 2220 break; 2221 case ETH_P_8021AD: 2222 if (enable_stripping) 2223 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; 2224 else 2225 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; 2226 2227 if (enable_insertion) 2228 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; 2229 else 2230 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; 2231 break; 2232 } 2233 } 2234 2235 if (aq_required) { 2236 adapter->aq_required |= aq_required; 2237 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 2238 } 2239 } 2240 2241 /** 2242 * iavf_startup - first step of driver startup 2243 * @adapter: board private structure 2244 * 2245 * Function process __IAVF_STARTUP driver state. 2246 * When success the state is changed to __IAVF_INIT_VERSION_CHECK 2247 * when fails the state is changed to __IAVF_INIT_FAILED 2248 **/ 2249 static void iavf_startup(struct iavf_adapter *adapter) 2250 { 2251 struct pci_dev *pdev = adapter->pdev; 2252 struct iavf_hw *hw = &adapter->hw; 2253 enum iavf_status status; 2254 int ret; 2255 2256 WARN_ON(adapter->state != __IAVF_STARTUP); 2257 2258 /* driver loaded, probe complete */ 2259 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2260 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2261 status = iavf_set_mac_type(hw); 2262 if (status) { 2263 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status); 2264 goto err; 2265 } 2266 2267 ret = iavf_check_reset_complete(hw); 2268 if (ret) { 2269 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", 2270 ret); 2271 goto err; 2272 } 2273 hw->aq.num_arq_entries = IAVF_AQ_LEN; 2274 hw->aq.num_asq_entries = IAVF_AQ_LEN; 2275 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 2276 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE; 2277 2278 status = iavf_init_adminq(hw); 2279 if (status) { 2280 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", 2281 status); 2282 goto err; 2283 } 2284 ret = iavf_send_api_ver(adapter); 2285 if (ret) { 2286 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret); 2287 iavf_shutdown_adminq(hw); 2288 goto err; 2289 } 2290 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); 2291 return; 2292 err: 2293 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2294 } 2295 2296 /** 2297 * iavf_init_version_check - second step of driver startup 2298 * @adapter: board private structure 2299 * 2300 * Function process __IAVF_INIT_VERSION_CHECK driver state. 2301 * When success the state is changed to __IAVF_INIT_GET_RESOURCES 2302 * when fails the state is changed to __IAVF_INIT_FAILED 2303 **/ 2304 static void iavf_init_version_check(struct iavf_adapter *adapter) 2305 { 2306 struct pci_dev *pdev = adapter->pdev; 2307 struct iavf_hw *hw = &adapter->hw; 2308 int err = -EAGAIN; 2309 2310 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK); 2311 2312 if (!iavf_asq_done(hw)) { 2313 dev_err(&pdev->dev, "Admin queue command never completed\n"); 2314 iavf_shutdown_adminq(hw); 2315 iavf_change_state(adapter, __IAVF_STARTUP); 2316 goto err; 2317 } 2318 2319 /* aq msg sent, awaiting reply */ 2320 err = iavf_verify_api_ver(adapter); 2321 if (err) { 2322 if (err == -EALREADY) 2323 err = iavf_send_api_ver(adapter); 2324 else 2325 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", 2326 adapter->pf_version.major, 2327 adapter->pf_version.minor, 2328 VIRTCHNL_VERSION_MAJOR, 2329 VIRTCHNL_VERSION_MINOR); 2330 goto err; 2331 } 2332 err = iavf_send_vf_config_msg(adapter); 2333 if (err) { 2334 dev_err(&pdev->dev, "Unable to send config request (%d)\n", 2335 err); 2336 goto err; 2337 } 2338 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); 2339 return; 2340 err: 2341 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2342 } 2343 2344 /** 2345 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES 2346 * @adapter: board private structure 2347 */ 2348 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) 2349 { 2350 int i, num_req_queues = adapter->num_req_queues; 2351 struct iavf_vsi *vsi = &adapter->vsi; 2352 2353 for (i = 0; i < adapter->vf_res->num_vsis; i++) { 2354 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) 2355 adapter->vsi_res = &adapter->vf_res->vsi_res[i]; 2356 } 2357 if (!adapter->vsi_res) { 2358 dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); 2359 return -ENODEV; 2360 } 2361 2362 if (num_req_queues && 2363 num_req_queues > adapter->vsi_res->num_queue_pairs) { 2364 /* Problem. The PF gave us fewer queues than what we had 2365 * negotiated in our request. Need a reset to see if we can't 2366 * get back to a working state. 2367 */ 2368 dev_err(&adapter->pdev->dev, 2369 "Requested %d queues, but PF only gave us %d.\n", 2370 num_req_queues, 2371 adapter->vsi_res->num_queue_pairs); 2372 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED; 2373 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; 2374 iavf_schedule_reset(adapter); 2375 2376 return -EAGAIN; 2377 } 2378 adapter->num_req_queues = 0; 2379 adapter->vsi.id = adapter->vsi_res->vsi_id; 2380 2381 adapter->vsi.back = adapter; 2382 adapter->vsi.base_vector = 1; 2383 vsi->netdev = adapter->netdev; 2384 vsi->qs_handle = adapter->vsi_res->qset_handle; 2385 if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 2386 adapter->rss_key_size = adapter->vf_res->rss_key_size; 2387 adapter->rss_lut_size = adapter->vf_res->rss_lut_size; 2388 } else { 2389 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; 2390 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; 2391 } 2392 2393 return 0; 2394 } 2395 2396 /** 2397 * iavf_init_get_resources - third step of driver startup 2398 * @adapter: board private structure 2399 * 2400 * Function process __IAVF_INIT_GET_RESOURCES driver state and 2401 * finishes driver initialization procedure. 2402 * When success the state is changed to __IAVF_DOWN 2403 * when fails the state is changed to __IAVF_INIT_FAILED 2404 **/ 2405 static void iavf_init_get_resources(struct iavf_adapter *adapter) 2406 { 2407 struct pci_dev *pdev = adapter->pdev; 2408 struct iavf_hw *hw = &adapter->hw; 2409 int err; 2410 2411 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); 2412 /* aq msg sent, awaiting reply */ 2413 if (!adapter->vf_res) { 2414 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, 2415 GFP_KERNEL); 2416 if (!adapter->vf_res) { 2417 err = -ENOMEM; 2418 goto err; 2419 } 2420 } 2421 err = iavf_get_vf_config(adapter); 2422 if (err == -EALREADY) { 2423 err = iavf_send_vf_config_msg(adapter); 2424 goto err; 2425 } else if (err == -EINVAL) { 2426 /* We only get -EINVAL if the device is in a very bad 2427 * state or if we've been disabled for previous bad 2428 * behavior. Either way, we're done now. 2429 */ 2430 iavf_shutdown_adminq(hw); 2431 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); 2432 return; 2433 } 2434 if (err) { 2435 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); 2436 goto err_alloc; 2437 } 2438 2439 err = iavf_parse_vf_resource_msg(adapter); 2440 if (err) { 2441 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", 2442 err); 2443 goto err_alloc; 2444 } 2445 /* Some features require additional messages to negotiate extended 2446 * capabilities. These are processed in sequence by the 2447 * __IAVF_INIT_EXTENDED_CAPS driver state. 2448 */ 2449 adapter->extended_caps = IAVF_EXTENDED_CAPS; 2450 2451 iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS); 2452 return; 2453 2454 err_alloc: 2455 kfree(adapter->vf_res); 2456 adapter->vf_res = NULL; 2457 err: 2458 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2459 } 2460 2461 /** 2462 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps 2463 * @adapter: board private structure 2464 * 2465 * Function processes send of the extended VLAN V2 capability message to the 2466 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent, 2467 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2. 2468 */ 2469 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter) 2470 { 2471 int ret; 2472 2473 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2)); 2474 2475 ret = iavf_send_vf_offload_vlan_v2_msg(adapter); 2476 if (ret && ret == -EOPNOTSUPP) { 2477 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case, 2478 * we did not send the capability exchange message and do not 2479 * expect a response. 2480 */ 2481 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; 2482 } 2483 2484 /* We sent the message, so move on to the next step */ 2485 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2; 2486 } 2487 2488 /** 2489 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps 2490 * @adapter: board private structure 2491 * 2492 * Function processes receipt of the extended VLAN V2 capability message from 2493 * the PF. 2494 **/ 2495 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter) 2496 { 2497 int ret; 2498 2499 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2)); 2500 2501 memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps)); 2502 2503 ret = iavf_get_vf_vlan_v2_caps(adapter); 2504 if (ret) 2505 goto err; 2506 2507 /* We've processed receipt of the VLAN V2 caps message */ 2508 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2; 2509 return; 2510 err: 2511 /* We didn't receive a reply. Make sure we try sending again when 2512 * __IAVF_INIT_FAILED attempts to recover. 2513 */ 2514 adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2; 2515 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2516 } 2517 2518 /** 2519 * iavf_init_process_extended_caps - Part of driver startup 2520 * @adapter: board private structure 2521 * 2522 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state 2523 * handles negotiating capabilities for features which require an additional 2524 * message. 2525 * 2526 * Once all extended capabilities exchanges are finished, the driver will 2527 * transition into __IAVF_INIT_CONFIG_ADAPTER. 2528 */ 2529 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter) 2530 { 2531 WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS); 2532 2533 /* Process capability exchange for VLAN V2 */ 2534 if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) { 2535 iavf_init_send_offload_vlan_v2_caps(adapter); 2536 return; 2537 } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) { 2538 iavf_init_recv_offload_vlan_v2_caps(adapter); 2539 return; 2540 } 2541 2542 /* When we reach here, no further extended capabilities exchanges are 2543 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER 2544 */ 2545 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); 2546 } 2547 2548 /** 2549 * iavf_init_config_adapter - last part of driver startup 2550 * @adapter: board private structure 2551 * 2552 * After all the supported capabilities are negotiated, then the 2553 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization. 2554 */ 2555 static void iavf_init_config_adapter(struct iavf_adapter *adapter) 2556 { 2557 struct net_device *netdev = adapter->netdev; 2558 struct pci_dev *pdev = adapter->pdev; 2559 int err; 2560 2561 WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER); 2562 2563 if (iavf_process_config(adapter)) 2564 goto err; 2565 2566 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2567 2568 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; 2569 2570 netdev->netdev_ops = &iavf_netdev_ops; 2571 iavf_set_ethtool_ops(netdev); 2572 netdev->watchdog_timeo = 5 * HZ; 2573 2574 /* MTU range: 68 - 9710 */ 2575 netdev->min_mtu = ETH_MIN_MTU; 2576 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 2577 2578 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2579 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 2580 adapter->hw.mac.addr); 2581 eth_hw_addr_random(netdev); 2582 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); 2583 } else { 2584 eth_hw_addr_set(netdev, adapter->hw.mac.addr); 2585 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); 2586 } 2587 2588 adapter->tx_desc_count = IAVF_DEFAULT_TXD; 2589 adapter->rx_desc_count = IAVF_DEFAULT_RXD; 2590 err = iavf_init_interrupt_scheme(adapter); 2591 if (err) 2592 goto err_sw_init; 2593 iavf_map_rings_to_vectors(adapter); 2594 if (adapter->vf_res->vf_cap_flags & 2595 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 2596 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE; 2597 2598 err = iavf_request_misc_irq(adapter); 2599 if (err) 2600 goto err_sw_init; 2601 2602 netif_carrier_off(netdev); 2603 adapter->link_up = false; 2604 2605 /* set the semaphore to prevent any callbacks after device registration 2606 * up to time when state of driver will be set to __IAVF_DOWN 2607 */ 2608 rtnl_lock(); 2609 if (!adapter->netdev_registered) { 2610 err = register_netdevice(netdev); 2611 if (err) { 2612 rtnl_unlock(); 2613 goto err_register; 2614 } 2615 } 2616 2617 adapter->netdev_registered = true; 2618 2619 netif_tx_stop_all_queues(netdev); 2620 if (CLIENT_ALLOWED(adapter)) { 2621 err = iavf_lan_add_device(adapter); 2622 if (err) 2623 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", 2624 err); 2625 } 2626 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); 2627 if (netdev->features & NETIF_F_GRO) 2628 dev_info(&pdev->dev, "GRO is enabled\n"); 2629 2630 iavf_change_state(adapter, __IAVF_DOWN); 2631 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2632 rtnl_unlock(); 2633 2634 iavf_misc_irq_enable(adapter); 2635 wake_up(&adapter->down_waitqueue); 2636 2637 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); 2638 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); 2639 if (!adapter->rss_key || !adapter->rss_lut) { 2640 err = -ENOMEM; 2641 goto err_mem; 2642 } 2643 if (RSS_AQ(adapter)) 2644 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 2645 else 2646 iavf_init_rss(adapter); 2647 2648 if (VLAN_V2_ALLOWED(adapter)) 2649 /* request initial VLAN offload settings */ 2650 iavf_set_vlan_offload_features(adapter, 0, netdev->features); 2651 2652 return; 2653 err_mem: 2654 iavf_free_rss(adapter); 2655 err_register: 2656 iavf_free_misc_irq(adapter); 2657 err_sw_init: 2658 iavf_reset_interrupt_capability(adapter); 2659 err: 2660 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2661 } 2662 2663 /** 2664 * iavf_watchdog_task - Periodic call-back task 2665 * @work: pointer to work_struct 2666 **/ 2667 static void iavf_watchdog_task(struct work_struct *work) 2668 { 2669 struct iavf_adapter *adapter = container_of(work, 2670 struct iavf_adapter, 2671 watchdog_task.work); 2672 struct iavf_hw *hw = &adapter->hw; 2673 u32 reg_val; 2674 2675 if (!mutex_trylock(&adapter->crit_lock)) { 2676 if (adapter->state == __IAVF_REMOVE) 2677 return; 2678 2679 goto restart_watchdog; 2680 } 2681 2682 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && 2683 adapter->netdev_registered && 2684 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && 2685 rtnl_trylock()) { 2686 netdev_update_features(adapter->netdev); 2687 rtnl_unlock(); 2688 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; 2689 } 2690 2691 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2692 iavf_change_state(adapter, __IAVF_COMM_FAILED); 2693 2694 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2695 adapter->aq_required = 0; 2696 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2697 mutex_unlock(&adapter->crit_lock); 2698 queue_work(adapter->wq, &adapter->reset_task); 2699 return; 2700 } 2701 2702 switch (adapter->state) { 2703 case __IAVF_STARTUP: 2704 iavf_startup(adapter); 2705 mutex_unlock(&adapter->crit_lock); 2706 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2707 msecs_to_jiffies(30)); 2708 return; 2709 case __IAVF_INIT_VERSION_CHECK: 2710 iavf_init_version_check(adapter); 2711 mutex_unlock(&adapter->crit_lock); 2712 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2713 msecs_to_jiffies(30)); 2714 return; 2715 case __IAVF_INIT_GET_RESOURCES: 2716 iavf_init_get_resources(adapter); 2717 mutex_unlock(&adapter->crit_lock); 2718 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2719 msecs_to_jiffies(1)); 2720 return; 2721 case __IAVF_INIT_EXTENDED_CAPS: 2722 iavf_init_process_extended_caps(adapter); 2723 mutex_unlock(&adapter->crit_lock); 2724 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2725 msecs_to_jiffies(1)); 2726 return; 2727 case __IAVF_INIT_CONFIG_ADAPTER: 2728 iavf_init_config_adapter(adapter); 2729 mutex_unlock(&adapter->crit_lock); 2730 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2731 msecs_to_jiffies(1)); 2732 return; 2733 case __IAVF_INIT_FAILED: 2734 if (test_bit(__IAVF_IN_REMOVE_TASK, 2735 &adapter->crit_section)) { 2736 /* Do not update the state and do not reschedule 2737 * watchdog task, iavf_remove should handle this state 2738 * as it can loop forever 2739 */ 2740 mutex_unlock(&adapter->crit_lock); 2741 return; 2742 } 2743 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { 2744 dev_err(&adapter->pdev->dev, 2745 "Failed to communicate with PF; waiting before retry\n"); 2746 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2747 iavf_shutdown_adminq(hw); 2748 mutex_unlock(&adapter->crit_lock); 2749 queue_delayed_work(adapter->wq, 2750 &adapter->watchdog_task, (5 * HZ)); 2751 return; 2752 } 2753 /* Try again from failed step*/ 2754 iavf_change_state(adapter, adapter->last_state); 2755 mutex_unlock(&adapter->crit_lock); 2756 queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); 2757 return; 2758 case __IAVF_COMM_FAILED: 2759 if (test_bit(__IAVF_IN_REMOVE_TASK, 2760 &adapter->crit_section)) { 2761 /* Set state to __IAVF_INIT_FAILED and perform remove 2762 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task 2763 * doesn't bring the state back to __IAVF_COMM_FAILED. 2764 */ 2765 iavf_change_state(adapter, __IAVF_INIT_FAILED); 2766 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2767 mutex_unlock(&adapter->crit_lock); 2768 return; 2769 } 2770 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2771 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 2772 if (reg_val == VIRTCHNL_VFR_VFACTIVE || 2773 reg_val == VIRTCHNL_VFR_COMPLETED) { 2774 /* A chance for redemption! */ 2775 dev_err(&adapter->pdev->dev, 2776 "Hardware came out of reset. Attempting reinit.\n"); 2777 /* When init task contacts the PF and 2778 * gets everything set up again, it'll restart the 2779 * watchdog for us. Down, boy. Sit. Stay. Woof. 2780 */ 2781 iavf_change_state(adapter, __IAVF_STARTUP); 2782 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; 2783 } 2784 adapter->aq_required = 0; 2785 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2786 mutex_unlock(&adapter->crit_lock); 2787 queue_delayed_work(adapter->wq, 2788 &adapter->watchdog_task, 2789 msecs_to_jiffies(10)); 2790 return; 2791 case __IAVF_RESETTING: 2792 mutex_unlock(&adapter->crit_lock); 2793 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2794 HZ * 2); 2795 return; 2796 case __IAVF_DOWN: 2797 case __IAVF_DOWN_PENDING: 2798 case __IAVF_TESTING: 2799 case __IAVF_RUNNING: 2800 if (adapter->current_op) { 2801 if (!iavf_asq_done(hw)) { 2802 dev_dbg(&adapter->pdev->dev, 2803 "Admin queue timeout\n"); 2804 iavf_send_api_ver(adapter); 2805 } 2806 } else { 2807 int ret = iavf_process_aq_command(adapter); 2808 2809 /* An error will be returned if no commands were 2810 * processed; use this opportunity to update stats 2811 * if the error isn't -ENOTSUPP 2812 */ 2813 if (ret && ret != -EOPNOTSUPP && 2814 adapter->state == __IAVF_RUNNING) 2815 iavf_request_stats(adapter); 2816 } 2817 if (adapter->state == __IAVF_RUNNING) 2818 iavf_detect_recover_hung(&adapter->vsi); 2819 break; 2820 case __IAVF_REMOVE: 2821 default: 2822 mutex_unlock(&adapter->crit_lock); 2823 return; 2824 } 2825 2826 /* check for hw reset */ 2827 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2828 if (!reg_val) { 2829 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2830 adapter->aq_required = 0; 2831 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2832 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2833 queue_work(adapter->wq, &adapter->reset_task); 2834 mutex_unlock(&adapter->crit_lock); 2835 queue_delayed_work(adapter->wq, 2836 &adapter->watchdog_task, HZ * 2); 2837 return; 2838 } 2839 2840 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 2841 mutex_unlock(&adapter->crit_lock); 2842 restart_watchdog: 2843 if (adapter->state >= __IAVF_DOWN) 2844 queue_work(adapter->wq, &adapter->adminq_task); 2845 if (adapter->aq_required) 2846 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2847 msecs_to_jiffies(20)); 2848 else 2849 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2850 HZ * 2); 2851 } 2852 2853 /** 2854 * iavf_disable_vf - disable VF 2855 * @adapter: board private structure 2856 * 2857 * Set communication failed flag and free all resources. 2858 * NOTE: This function is expected to be called with crit_lock being held. 2859 **/ 2860 static void iavf_disable_vf(struct iavf_adapter *adapter) 2861 { 2862 struct iavf_mac_filter *f, *ftmp; 2863 struct iavf_vlan_filter *fv, *fvtmp; 2864 struct iavf_cloud_filter *cf, *cftmp; 2865 2866 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2867 2868 /* We don't use netif_running() because it may be true prior to 2869 * ndo_open() returning, so we can't assume it means all our open 2870 * tasks have finished, since we're not holding the rtnl_lock here. 2871 */ 2872 if (adapter->state == __IAVF_RUNNING) { 2873 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 2874 netif_carrier_off(adapter->netdev); 2875 netif_tx_disable(adapter->netdev); 2876 adapter->link_up = false; 2877 iavf_napi_disable_all(adapter); 2878 iavf_irq_disable(adapter); 2879 iavf_free_traffic_irqs(adapter); 2880 iavf_free_all_tx_resources(adapter); 2881 iavf_free_all_rx_resources(adapter); 2882 } 2883 2884 spin_lock_bh(&adapter->mac_vlan_list_lock); 2885 2886 /* Delete all of the filters */ 2887 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 2888 list_del(&f->list); 2889 kfree(f); 2890 } 2891 2892 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { 2893 list_del(&fv->list); 2894 kfree(fv); 2895 } 2896 adapter->num_vlan_filters = 0; 2897 2898 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2899 2900 spin_lock_bh(&adapter->cloud_filter_list_lock); 2901 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 2902 list_del(&cf->list); 2903 kfree(cf); 2904 adapter->num_cloud_filters--; 2905 } 2906 spin_unlock_bh(&adapter->cloud_filter_list_lock); 2907 2908 iavf_free_misc_irq(adapter); 2909 iavf_reset_interrupt_capability(adapter); 2910 iavf_free_q_vectors(adapter); 2911 iavf_free_queues(adapter); 2912 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); 2913 iavf_shutdown_adminq(&adapter->hw); 2914 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 2915 iavf_change_state(adapter, __IAVF_DOWN); 2916 wake_up(&adapter->down_waitqueue); 2917 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 2918 } 2919 2920 /** 2921 * iavf_reset_task - Call-back task to handle hardware reset 2922 * @work: pointer to work_struct 2923 * 2924 * During reset we need to shut down and reinitialize the admin queue 2925 * before we can use it to communicate with the PF again. We also clear 2926 * and reinit the rings because that context is lost as well. 2927 **/ 2928 static void iavf_reset_task(struct work_struct *work) 2929 { 2930 struct iavf_adapter *adapter = container_of(work, 2931 struct iavf_adapter, 2932 reset_task); 2933 struct virtchnl_vf_resource *vfres = adapter->vf_res; 2934 struct net_device *netdev = adapter->netdev; 2935 struct iavf_hw *hw = &adapter->hw; 2936 struct iavf_mac_filter *f, *ftmp; 2937 struct iavf_cloud_filter *cf; 2938 enum iavf_status status; 2939 u32 reg_val; 2940 int i = 0, err; 2941 bool running; 2942 2943 /* Detach interface to avoid subsequent NDO callbacks */ 2944 rtnl_lock(); 2945 netif_device_detach(netdev); 2946 rtnl_unlock(); 2947 2948 /* When device is being removed it doesn't make sense to run the reset 2949 * task, just return in such a case. 2950 */ 2951 if (!mutex_trylock(&adapter->crit_lock)) { 2952 if (adapter->state != __IAVF_REMOVE) 2953 queue_work(adapter->wq, &adapter->reset_task); 2954 2955 goto reset_finish; 2956 } 2957 2958 while (!mutex_trylock(&adapter->client_lock)) 2959 usleep_range(500, 1000); 2960 if (CLIENT_ENABLED(adapter)) { 2961 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | 2962 IAVF_FLAG_CLIENT_NEEDS_CLOSE | 2963 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | 2964 IAVF_FLAG_SERVICE_CLIENT_REQUESTED); 2965 cancel_delayed_work_sync(&adapter->client_task); 2966 iavf_notify_client_close(&adapter->vsi, true); 2967 } 2968 iavf_misc_irq_disable(adapter); 2969 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { 2970 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; 2971 /* Restart the AQ here. If we have been reset but didn't 2972 * detect it, or if the PF had to reinit, our AQ will be hosed. 2973 */ 2974 iavf_shutdown_adminq(hw); 2975 iavf_init_adminq(hw); 2976 iavf_request_reset(adapter); 2977 } 2978 adapter->flags |= IAVF_FLAG_RESET_PENDING; 2979 2980 /* poll until we see the reset actually happen */ 2981 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) { 2982 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & 2983 IAVF_VF_ARQLEN1_ARQENABLE_MASK; 2984 if (!reg_val) 2985 break; 2986 usleep_range(5000, 10000); 2987 } 2988 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) { 2989 dev_info(&adapter->pdev->dev, "Never saw reset\n"); 2990 goto continue_reset; /* act like the reset happened */ 2991 } 2992 2993 /* wait until the reset is complete and the PF is responding to us */ 2994 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 2995 /* sleep first to make sure a minimum wait time is met */ 2996 msleep(IAVF_RESET_WAIT_MS); 2997 2998 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & 2999 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 3000 if (reg_val == VIRTCHNL_VFR_VFACTIVE) 3001 break; 3002 } 3003 3004 pci_set_master(adapter->pdev); 3005 pci_restore_msi_state(adapter->pdev); 3006 3007 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { 3008 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 3009 reg_val); 3010 iavf_disable_vf(adapter); 3011 mutex_unlock(&adapter->client_lock); 3012 mutex_unlock(&adapter->crit_lock); 3013 if (netif_running(netdev)) { 3014 rtnl_lock(); 3015 dev_close(netdev); 3016 rtnl_unlock(); 3017 } 3018 return; /* Do not attempt to reinit. It's dead, Jim. */ 3019 } 3020 3021 continue_reset: 3022 /* We don't use netif_running() because it may be true prior to 3023 * ndo_open() returning, so we can't assume it means all our open 3024 * tasks have finished, since we're not holding the rtnl_lock here. 3025 */ 3026 running = adapter->state == __IAVF_RUNNING; 3027 3028 if (running) { 3029 netif_carrier_off(netdev); 3030 netif_tx_stop_all_queues(netdev); 3031 adapter->link_up = false; 3032 iavf_napi_disable_all(adapter); 3033 } 3034 iavf_irq_disable(adapter); 3035 3036 iavf_change_state(adapter, __IAVF_RESETTING); 3037 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 3038 3039 /* free the Tx/Rx rings and descriptors, might be better to just 3040 * re-use them sometime in the future 3041 */ 3042 iavf_free_all_rx_resources(adapter); 3043 iavf_free_all_tx_resources(adapter); 3044 3045 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED; 3046 /* kill and reinit the admin queue */ 3047 iavf_shutdown_adminq(hw); 3048 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 3049 status = iavf_init_adminq(hw); 3050 if (status) { 3051 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", 3052 status); 3053 goto reset_err; 3054 } 3055 adapter->aq_required = 0; 3056 3057 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || 3058 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { 3059 err = iavf_reinit_interrupt_scheme(adapter); 3060 if (err) 3061 goto reset_err; 3062 } 3063 3064 if (RSS_AQ(adapter)) { 3065 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; 3066 } else { 3067 err = iavf_init_rss(adapter); 3068 if (err) 3069 goto reset_err; 3070 } 3071 3072 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; 3073 /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been 3074 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here, 3075 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until 3076 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have 3077 * been successfully sent and negotiated 3078 */ 3079 adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; 3080 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; 3081 3082 spin_lock_bh(&adapter->mac_vlan_list_lock); 3083 3084 /* Delete filter for the current MAC address, it could have 3085 * been changed by the PF via administratively set MAC. 3086 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. 3087 */ 3088 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 3089 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { 3090 list_del(&f->list); 3091 kfree(f); 3092 } 3093 } 3094 /* re-add all MAC filters */ 3095 list_for_each_entry(f, &adapter->mac_filter_list, list) { 3096 f->add = true; 3097 } 3098 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3099 3100 /* check if TCs are running and re-add all cloud filters */ 3101 spin_lock_bh(&adapter->cloud_filter_list_lock); 3102 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 3103 adapter->num_tc) { 3104 list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 3105 cf->add = true; 3106 } 3107 } 3108 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3109 3110 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; 3111 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 3112 iavf_misc_irq_enable(adapter); 3113 3114 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2); 3115 3116 /* We were running when the reset started, so we need to restore some 3117 * state here. 3118 */ 3119 if (running) { 3120 /* allocate transmit descriptors */ 3121 err = iavf_setup_all_tx_resources(adapter); 3122 if (err) 3123 goto reset_err; 3124 3125 /* allocate receive descriptors */ 3126 err = iavf_setup_all_rx_resources(adapter); 3127 if (err) 3128 goto reset_err; 3129 3130 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) || 3131 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) { 3132 err = iavf_request_traffic_irqs(adapter, netdev->name); 3133 if (err) 3134 goto reset_err; 3135 3136 adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED; 3137 } 3138 3139 iavf_configure(adapter); 3140 3141 /* iavf_up_complete() will switch device back 3142 * to __IAVF_RUNNING 3143 */ 3144 iavf_up_complete(adapter); 3145 3146 iavf_irq_enable(adapter, true); 3147 } else { 3148 iavf_change_state(adapter, __IAVF_DOWN); 3149 wake_up(&adapter->down_waitqueue); 3150 } 3151 3152 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3153 3154 mutex_unlock(&adapter->client_lock); 3155 mutex_unlock(&adapter->crit_lock); 3156 3157 goto reset_finish; 3158 reset_err: 3159 if (running) { 3160 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 3161 iavf_free_traffic_irqs(adapter); 3162 } 3163 iavf_disable_vf(adapter); 3164 3165 mutex_unlock(&adapter->client_lock); 3166 mutex_unlock(&adapter->crit_lock); 3167 3168 if (netif_running(netdev)) { 3169 /* Close device to ensure that Tx queues will not be started 3170 * during netif_device_attach() at the end of the reset task. 3171 */ 3172 rtnl_lock(); 3173 dev_close(netdev); 3174 rtnl_unlock(); 3175 } 3176 3177 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 3178 reset_finish: 3179 rtnl_lock(); 3180 netif_device_attach(netdev); 3181 rtnl_unlock(); 3182 } 3183 3184 /** 3185 * iavf_adminq_task - worker thread to clean the admin queue 3186 * @work: pointer to work_struct containing our data 3187 **/ 3188 static void iavf_adminq_task(struct work_struct *work) 3189 { 3190 struct iavf_adapter *adapter = 3191 container_of(work, struct iavf_adapter, adminq_task); 3192 struct iavf_hw *hw = &adapter->hw; 3193 struct iavf_arq_event_info event; 3194 enum virtchnl_ops v_op; 3195 enum iavf_status ret, v_ret; 3196 u32 val, oldval; 3197 u16 pending; 3198 3199 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 3200 goto out; 3201 3202 if (!mutex_trylock(&adapter->crit_lock)) { 3203 if (adapter->state == __IAVF_REMOVE) 3204 return; 3205 3206 queue_work(adapter->wq, &adapter->adminq_task); 3207 goto out; 3208 } 3209 3210 event.buf_len = IAVF_MAX_AQ_BUF_SIZE; 3211 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 3212 if (!event.msg_buf) 3213 goto out; 3214 3215 do { 3216 ret = iavf_clean_arq_element(hw, &event, &pending); 3217 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); 3218 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); 3219 3220 if (ret || !v_op) 3221 break; /* No event to process or error cleaning ARQ */ 3222 3223 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, 3224 event.msg_len); 3225 if (pending != 0) 3226 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); 3227 } while (pending); 3228 mutex_unlock(&adapter->crit_lock); 3229 3230 if ((adapter->flags & 3231 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 3232 adapter->state == __IAVF_RESETTING) 3233 goto freedom; 3234 3235 /* check for error indications */ 3236 val = rd32(hw, hw->aq.arq.len); 3237 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ 3238 goto freedom; 3239 oldval = val; 3240 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 3241 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); 3242 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 3243 } 3244 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 3245 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); 3246 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 3247 } 3248 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 3249 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); 3250 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 3251 } 3252 if (oldval != val) 3253 wr32(hw, hw->aq.arq.len, val); 3254 3255 val = rd32(hw, hw->aq.asq.len); 3256 oldval = val; 3257 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 3258 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); 3259 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 3260 } 3261 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 3262 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); 3263 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 3264 } 3265 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 3266 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); 3267 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 3268 } 3269 if (oldval != val) 3270 wr32(hw, hw->aq.asq.len, val); 3271 3272 freedom: 3273 kfree(event.msg_buf); 3274 out: 3275 /* re-enable Admin queue interrupt cause */ 3276 iavf_misc_irq_enable(adapter); 3277 } 3278 3279 /** 3280 * iavf_client_task - worker thread to perform client work 3281 * @work: pointer to work_struct containing our data 3282 * 3283 * This task handles client interactions. Because client calls can be 3284 * reentrant, we can't handle them in the watchdog. 3285 **/ 3286 static void iavf_client_task(struct work_struct *work) 3287 { 3288 struct iavf_adapter *adapter = 3289 container_of(work, struct iavf_adapter, client_task.work); 3290 3291 /* If we can't get the client bit, just give up. We'll be rescheduled 3292 * later. 3293 */ 3294 3295 if (!mutex_trylock(&adapter->client_lock)) 3296 return; 3297 3298 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { 3299 iavf_client_subtask(adapter); 3300 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 3301 goto out; 3302 } 3303 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { 3304 iavf_notify_client_l2_params(&adapter->vsi); 3305 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; 3306 goto out; 3307 } 3308 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { 3309 iavf_notify_client_close(&adapter->vsi, false); 3310 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; 3311 goto out; 3312 } 3313 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { 3314 iavf_notify_client_open(&adapter->vsi); 3315 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; 3316 } 3317 out: 3318 mutex_unlock(&adapter->client_lock); 3319 } 3320 3321 /** 3322 * iavf_free_all_tx_resources - Free Tx Resources for All Queues 3323 * @adapter: board private structure 3324 * 3325 * Free all transmit software resources 3326 **/ 3327 void iavf_free_all_tx_resources(struct iavf_adapter *adapter) 3328 { 3329 int i; 3330 3331 if (!adapter->tx_rings) 3332 return; 3333 3334 for (i = 0; i < adapter->num_active_queues; i++) 3335 if (adapter->tx_rings[i].desc) 3336 iavf_free_tx_resources(&adapter->tx_rings[i]); 3337 } 3338 3339 /** 3340 * iavf_setup_all_tx_resources - allocate all queues Tx resources 3341 * @adapter: board private structure 3342 * 3343 * If this function returns with an error, then it's possible one or 3344 * more of the rings is populated (while the rest are not). It is the 3345 * callers duty to clean those orphaned rings. 3346 * 3347 * Return 0 on success, negative on failure 3348 **/ 3349 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter) 3350 { 3351 int i, err = 0; 3352 3353 for (i = 0; i < adapter->num_active_queues; i++) { 3354 adapter->tx_rings[i].count = adapter->tx_desc_count; 3355 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]); 3356 if (!err) 3357 continue; 3358 dev_err(&adapter->pdev->dev, 3359 "Allocation for Tx Queue %u failed\n", i); 3360 break; 3361 } 3362 3363 return err; 3364 } 3365 3366 /** 3367 * iavf_setup_all_rx_resources - allocate all queues Rx resources 3368 * @adapter: board private structure 3369 * 3370 * If this function returns with an error, then it's possible one or 3371 * more of the rings is populated (while the rest are not). It is the 3372 * callers duty to clean those orphaned rings. 3373 * 3374 * Return 0 on success, negative on failure 3375 **/ 3376 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter) 3377 { 3378 int i, err = 0; 3379 3380 for (i = 0; i < adapter->num_active_queues; i++) { 3381 adapter->rx_rings[i].count = adapter->rx_desc_count; 3382 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]); 3383 if (!err) 3384 continue; 3385 dev_err(&adapter->pdev->dev, 3386 "Allocation for Rx Queue %u failed\n", i); 3387 break; 3388 } 3389 return err; 3390 } 3391 3392 /** 3393 * iavf_free_all_rx_resources - Free Rx Resources for All Queues 3394 * @adapter: board private structure 3395 * 3396 * Free all receive software resources 3397 **/ 3398 void iavf_free_all_rx_resources(struct iavf_adapter *adapter) 3399 { 3400 int i; 3401 3402 if (!adapter->rx_rings) 3403 return; 3404 3405 for (i = 0; i < adapter->num_active_queues; i++) 3406 if (adapter->rx_rings[i].desc) 3407 iavf_free_rx_resources(&adapter->rx_rings[i]); 3408 } 3409 3410 /** 3411 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth 3412 * @adapter: board private structure 3413 * @max_tx_rate: max Tx bw for a tc 3414 **/ 3415 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, 3416 u64 max_tx_rate) 3417 { 3418 int speed = 0, ret = 0; 3419 3420 if (ADV_LINK_SUPPORT(adapter)) { 3421 if (adapter->link_speed_mbps < U32_MAX) { 3422 speed = adapter->link_speed_mbps; 3423 goto validate_bw; 3424 } else { 3425 dev_err(&adapter->pdev->dev, "Unknown link speed\n"); 3426 return -EINVAL; 3427 } 3428 } 3429 3430 switch (adapter->link_speed) { 3431 case VIRTCHNL_LINK_SPEED_40GB: 3432 speed = SPEED_40000; 3433 break; 3434 case VIRTCHNL_LINK_SPEED_25GB: 3435 speed = SPEED_25000; 3436 break; 3437 case VIRTCHNL_LINK_SPEED_20GB: 3438 speed = SPEED_20000; 3439 break; 3440 case VIRTCHNL_LINK_SPEED_10GB: 3441 speed = SPEED_10000; 3442 break; 3443 case VIRTCHNL_LINK_SPEED_5GB: 3444 speed = SPEED_5000; 3445 break; 3446 case VIRTCHNL_LINK_SPEED_2_5GB: 3447 speed = SPEED_2500; 3448 break; 3449 case VIRTCHNL_LINK_SPEED_1GB: 3450 speed = SPEED_1000; 3451 break; 3452 case VIRTCHNL_LINK_SPEED_100MB: 3453 speed = SPEED_100; 3454 break; 3455 default: 3456 break; 3457 } 3458 3459 validate_bw: 3460 if (max_tx_rate > speed) { 3461 dev_err(&adapter->pdev->dev, 3462 "Invalid tx rate specified\n"); 3463 ret = -EINVAL; 3464 } 3465 3466 return ret; 3467 } 3468 3469 /** 3470 * iavf_validate_ch_config - validate queue mapping info 3471 * @adapter: board private structure 3472 * @mqprio_qopt: queue parameters 3473 * 3474 * This function validates if the config provided by the user to 3475 * configure queue channels is valid or not. Returns 0 on a valid 3476 * config. 3477 **/ 3478 static int iavf_validate_ch_config(struct iavf_adapter *adapter, 3479 struct tc_mqprio_qopt_offload *mqprio_qopt) 3480 { 3481 u64 total_max_rate = 0; 3482 u32 tx_rate_rem = 0; 3483 int i, num_qps = 0; 3484 u64 tx_rate = 0; 3485 int ret = 0; 3486 3487 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || 3488 mqprio_qopt->qopt.num_tc < 1) 3489 return -EINVAL; 3490 3491 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 3492 if (!mqprio_qopt->qopt.count[i] || 3493 mqprio_qopt->qopt.offset[i] != num_qps) 3494 return -EINVAL; 3495 if (mqprio_qopt->min_rate[i]) { 3496 dev_err(&adapter->pdev->dev, 3497 "Invalid min tx rate (greater than 0) specified for TC%d\n", 3498 i); 3499 return -EINVAL; 3500 } 3501 3502 /* convert to Mbps */ 3503 tx_rate = div_u64(mqprio_qopt->max_rate[i], 3504 IAVF_MBPS_DIVISOR); 3505 3506 if (mqprio_qopt->max_rate[i] && 3507 tx_rate < IAVF_MBPS_QUANTA) { 3508 dev_err(&adapter->pdev->dev, 3509 "Invalid max tx rate for TC%d, minimum %dMbps\n", 3510 i, IAVF_MBPS_QUANTA); 3511 return -EINVAL; 3512 } 3513 3514 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem); 3515 3516 if (tx_rate_rem != 0) { 3517 dev_err(&adapter->pdev->dev, 3518 "Invalid max tx rate for TC%d, not divisible by %d\n", 3519 i, IAVF_MBPS_QUANTA); 3520 return -EINVAL; 3521 } 3522 3523 total_max_rate += tx_rate; 3524 num_qps += mqprio_qopt->qopt.count[i]; 3525 } 3526 if (num_qps > adapter->num_active_queues) { 3527 dev_err(&adapter->pdev->dev, 3528 "Cannot support requested number of queues\n"); 3529 return -EINVAL; 3530 } 3531 3532 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); 3533 return ret; 3534 } 3535 3536 /** 3537 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes 3538 * @adapter: board private structure 3539 **/ 3540 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter) 3541 { 3542 struct iavf_cloud_filter *cf, *cftmp; 3543 3544 spin_lock_bh(&adapter->cloud_filter_list_lock); 3545 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 3546 list) { 3547 list_del(&cf->list); 3548 kfree(cf); 3549 adapter->num_cloud_filters--; 3550 } 3551 spin_unlock_bh(&adapter->cloud_filter_list_lock); 3552 } 3553 3554 /** 3555 * __iavf_setup_tc - configure multiple traffic classes 3556 * @netdev: network interface device structure 3557 * @type_data: tc offload data 3558 * 3559 * This function processes the config information provided by the 3560 * user to configure traffic classes/queue channels and packages the 3561 * information to request the PF to setup traffic classes. 3562 * 3563 * Returns 0 on success. 3564 **/ 3565 static int __iavf_setup_tc(struct net_device *netdev, void *type_data) 3566 { 3567 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 3568 struct iavf_adapter *adapter = netdev_priv(netdev); 3569 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3570 u8 num_tc = 0, total_qps = 0; 3571 int ret = 0, netdev_tc = 0; 3572 u64 max_tx_rate; 3573 u16 mode; 3574 int i; 3575 3576 num_tc = mqprio_qopt->qopt.num_tc; 3577 mode = mqprio_qopt->mode; 3578 3579 /* delete queue_channel */ 3580 if (!mqprio_qopt->qopt.hw) { 3581 if (adapter->ch_config.state == __IAVF_TC_RUNNING) { 3582 /* reset the tc configuration */ 3583 netdev_reset_tc(netdev); 3584 adapter->num_tc = 0; 3585 netif_tx_stop_all_queues(netdev); 3586 netif_tx_disable(netdev); 3587 iavf_del_all_cloud_filters(adapter); 3588 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS; 3589 total_qps = adapter->orig_num_active_queues; 3590 goto exit; 3591 } else { 3592 return -EINVAL; 3593 } 3594 } 3595 3596 /* add queue channel */ 3597 if (mode == TC_MQPRIO_MODE_CHANNEL) { 3598 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3599 dev_err(&adapter->pdev->dev, "ADq not supported\n"); 3600 return -EOPNOTSUPP; 3601 } 3602 if (adapter->ch_config.state != __IAVF_TC_INVALID) { 3603 dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 3604 return -EINVAL; 3605 } 3606 3607 ret = iavf_validate_ch_config(adapter, mqprio_qopt); 3608 if (ret) 3609 return ret; 3610 /* Return if same TC config is requested */ 3611 if (adapter->num_tc == num_tc) 3612 return 0; 3613 adapter->num_tc = num_tc; 3614 3615 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 3616 if (i < num_tc) { 3617 adapter->ch_config.ch_info[i].count = 3618 mqprio_qopt->qopt.count[i]; 3619 adapter->ch_config.ch_info[i].offset = 3620 mqprio_qopt->qopt.offset[i]; 3621 total_qps += mqprio_qopt->qopt.count[i]; 3622 max_tx_rate = mqprio_qopt->max_rate[i]; 3623 /* convert to Mbps */ 3624 max_tx_rate = div_u64(max_tx_rate, 3625 IAVF_MBPS_DIVISOR); 3626 adapter->ch_config.ch_info[i].max_tx_rate = 3627 max_tx_rate; 3628 } else { 3629 adapter->ch_config.ch_info[i].count = 1; 3630 adapter->ch_config.ch_info[i].offset = 0; 3631 } 3632 } 3633 3634 /* Take snapshot of original config such as "num_active_queues" 3635 * It is used later when delete ADQ flow is exercised, so that 3636 * once delete ADQ flow completes, VF shall go back to its 3637 * original queue configuration 3638 */ 3639 3640 adapter->orig_num_active_queues = adapter->num_active_queues; 3641 3642 /* Store queue info based on TC so that VF gets configured 3643 * with correct number of queues when VF completes ADQ config 3644 * flow 3645 */ 3646 adapter->ch_config.total_qps = total_qps; 3647 3648 netif_tx_stop_all_queues(netdev); 3649 netif_tx_disable(netdev); 3650 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS; 3651 netdev_reset_tc(netdev); 3652 /* Report the tc mapping up the stack */ 3653 netdev_set_num_tc(adapter->netdev, num_tc); 3654 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) { 3655 u16 qcount = mqprio_qopt->qopt.count[i]; 3656 u16 qoffset = mqprio_qopt->qopt.offset[i]; 3657 3658 if (i < num_tc) 3659 netdev_set_tc_queue(netdev, netdev_tc++, qcount, 3660 qoffset); 3661 } 3662 } 3663 exit: 3664 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 3665 return 0; 3666 3667 netif_set_real_num_rx_queues(netdev, total_qps); 3668 netif_set_real_num_tx_queues(netdev, total_qps); 3669 3670 return ret; 3671 } 3672 3673 /** 3674 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel 3675 * @adapter: board private structure 3676 * @f: pointer to struct flow_cls_offload 3677 * @filter: pointer to cloud filter structure 3678 */ 3679 static int iavf_parse_cls_flower(struct iavf_adapter *adapter, 3680 struct flow_cls_offload *f, 3681 struct iavf_cloud_filter *filter) 3682 { 3683 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 3684 struct flow_dissector *dissector = rule->match.dissector; 3685 u16 n_proto_mask = 0; 3686 u16 n_proto_key = 0; 3687 u8 field_flags = 0; 3688 u16 addr_type = 0; 3689 u16 n_proto = 0; 3690 int i = 0; 3691 struct virtchnl_filter *vf = &filter->f; 3692 3693 if (dissector->used_keys & 3694 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 3695 BIT(FLOW_DISSECTOR_KEY_BASIC) | 3696 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 3697 BIT(FLOW_DISSECTOR_KEY_VLAN) | 3698 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 3699 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 3700 BIT(FLOW_DISSECTOR_KEY_PORTS) | 3701 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 3702 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 3703 dissector->used_keys); 3704 return -EOPNOTSUPP; 3705 } 3706 3707 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 3708 struct flow_match_enc_keyid match; 3709 3710 flow_rule_match_enc_keyid(rule, &match); 3711 if (match.mask->keyid != 0) 3712 field_flags |= IAVF_CLOUD_FIELD_TEN_ID; 3713 } 3714 3715 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 3716 struct flow_match_basic match; 3717 3718 flow_rule_match_basic(rule, &match); 3719 n_proto_key = ntohs(match.key->n_proto); 3720 n_proto_mask = ntohs(match.mask->n_proto); 3721 3722 if (n_proto_key == ETH_P_ALL) { 3723 n_proto_key = 0; 3724 n_proto_mask = 0; 3725 } 3726 n_proto = n_proto_key & n_proto_mask; 3727 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 3728 return -EINVAL; 3729 if (n_proto == ETH_P_IPV6) { 3730 /* specify flow type as TCP IPv6 */ 3731 vf->flow_type = VIRTCHNL_TCP_V6_FLOW; 3732 } 3733 3734 if (match.key->ip_proto != IPPROTO_TCP) { 3735 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 3736 return -EINVAL; 3737 } 3738 } 3739 3740 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 3741 struct flow_match_eth_addrs match; 3742 3743 flow_rule_match_eth_addrs(rule, &match); 3744 3745 /* use is_broadcast and is_zero to check for all 0xf or 0 */ 3746 if (!is_zero_ether_addr(match.mask->dst)) { 3747 if (is_broadcast_ether_addr(match.mask->dst)) { 3748 field_flags |= IAVF_CLOUD_FIELD_OMAC; 3749 } else { 3750 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 3751 match.mask->dst); 3752 return -EINVAL; 3753 } 3754 } 3755 3756 if (!is_zero_ether_addr(match.mask->src)) { 3757 if (is_broadcast_ether_addr(match.mask->src)) { 3758 field_flags |= IAVF_CLOUD_FIELD_IMAC; 3759 } else { 3760 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 3761 match.mask->src); 3762 return -EINVAL; 3763 } 3764 } 3765 3766 if (!is_zero_ether_addr(match.key->dst)) 3767 if (is_valid_ether_addr(match.key->dst) || 3768 is_multicast_ether_addr(match.key->dst)) { 3769 /* set the mask if a valid dst_mac address */ 3770 for (i = 0; i < ETH_ALEN; i++) 3771 vf->mask.tcp_spec.dst_mac[i] |= 0xff; 3772 ether_addr_copy(vf->data.tcp_spec.dst_mac, 3773 match.key->dst); 3774 } 3775 3776 if (!is_zero_ether_addr(match.key->src)) 3777 if (is_valid_ether_addr(match.key->src) || 3778 is_multicast_ether_addr(match.key->src)) { 3779 /* set the mask if a valid dst_mac address */ 3780 for (i = 0; i < ETH_ALEN; i++) 3781 vf->mask.tcp_spec.src_mac[i] |= 0xff; 3782 ether_addr_copy(vf->data.tcp_spec.src_mac, 3783 match.key->src); 3784 } 3785 } 3786 3787 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 3788 struct flow_match_vlan match; 3789 3790 flow_rule_match_vlan(rule, &match); 3791 if (match.mask->vlan_id) { 3792 if (match.mask->vlan_id == VLAN_VID_MASK) { 3793 field_flags |= IAVF_CLOUD_FIELD_IVLAN; 3794 } else { 3795 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 3796 match.mask->vlan_id); 3797 return -EINVAL; 3798 } 3799 } 3800 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 3801 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); 3802 } 3803 3804 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 3805 struct flow_match_control match; 3806 3807 flow_rule_match_control(rule, &match); 3808 addr_type = match.key->addr_type; 3809 } 3810 3811 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 3812 struct flow_match_ipv4_addrs match; 3813 3814 flow_rule_match_ipv4_addrs(rule, &match); 3815 if (match.mask->dst) { 3816 if (match.mask->dst == cpu_to_be32(0xffffffff)) { 3817 field_flags |= IAVF_CLOUD_FIELD_IIP; 3818 } else { 3819 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 3820 be32_to_cpu(match.mask->dst)); 3821 return -EINVAL; 3822 } 3823 } 3824 3825 if (match.mask->src) { 3826 if (match.mask->src == cpu_to_be32(0xffffffff)) { 3827 field_flags |= IAVF_CLOUD_FIELD_IIP; 3828 } else { 3829 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 3830 be32_to_cpu(match.mask->src)); 3831 return -EINVAL; 3832 } 3833 } 3834 3835 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { 3836 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 3837 return -EINVAL; 3838 } 3839 if (match.key->dst) { 3840 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); 3841 vf->data.tcp_spec.dst_ip[0] = match.key->dst; 3842 } 3843 if (match.key->src) { 3844 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); 3845 vf->data.tcp_spec.src_ip[0] = match.key->src; 3846 } 3847 } 3848 3849 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 3850 struct flow_match_ipv6_addrs match; 3851 3852 flow_rule_match_ipv6_addrs(rule, &match); 3853 3854 /* validate mask, make sure it is not IPV6_ADDR_ANY */ 3855 if (ipv6_addr_any(&match.mask->dst)) { 3856 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 3857 IPV6_ADDR_ANY); 3858 return -EINVAL; 3859 } 3860 3861 /* src and dest IPv6 address should not be LOOPBACK 3862 * (0:0:0:0:0:0:0:1) which can be represented as ::1 3863 */ 3864 if (ipv6_addr_loopback(&match.key->dst) || 3865 ipv6_addr_loopback(&match.key->src)) { 3866 dev_err(&adapter->pdev->dev, 3867 "ipv6 addr should not be loopback\n"); 3868 return -EINVAL; 3869 } 3870 if (!ipv6_addr_any(&match.mask->dst) || 3871 !ipv6_addr_any(&match.mask->src)) 3872 field_flags |= IAVF_CLOUD_FIELD_IIP; 3873 3874 for (i = 0; i < 4; i++) 3875 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); 3876 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, 3877 sizeof(vf->data.tcp_spec.dst_ip)); 3878 for (i = 0; i < 4; i++) 3879 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); 3880 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, 3881 sizeof(vf->data.tcp_spec.src_ip)); 3882 } 3883 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 3884 struct flow_match_ports match; 3885 3886 flow_rule_match_ports(rule, &match); 3887 if (match.mask->src) { 3888 if (match.mask->src == cpu_to_be16(0xffff)) { 3889 field_flags |= IAVF_CLOUD_FIELD_IIP; 3890 } else { 3891 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 3892 be16_to_cpu(match.mask->src)); 3893 return -EINVAL; 3894 } 3895 } 3896 3897 if (match.mask->dst) { 3898 if (match.mask->dst == cpu_to_be16(0xffff)) { 3899 field_flags |= IAVF_CLOUD_FIELD_IIP; 3900 } else { 3901 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 3902 be16_to_cpu(match.mask->dst)); 3903 return -EINVAL; 3904 } 3905 } 3906 if (match.key->dst) { 3907 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 3908 vf->data.tcp_spec.dst_port = match.key->dst; 3909 } 3910 3911 if (match.key->src) { 3912 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 3913 vf->data.tcp_spec.src_port = match.key->src; 3914 } 3915 } 3916 vf->field_flags = field_flags; 3917 3918 return 0; 3919 } 3920 3921 /** 3922 * iavf_handle_tclass - Forward to a traffic class on the device 3923 * @adapter: board private structure 3924 * @tc: traffic class index on the device 3925 * @filter: pointer to cloud filter structure 3926 */ 3927 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc, 3928 struct iavf_cloud_filter *filter) 3929 { 3930 if (tc == 0) 3931 return 0; 3932 if (tc < adapter->num_tc) { 3933 if (!filter->f.data.tcp_spec.dst_port) { 3934 dev_err(&adapter->pdev->dev, 3935 "Specify destination port to redirect to traffic class other than TC0\n"); 3936 return -EINVAL; 3937 } 3938 } 3939 /* redirect to a traffic class on the same device */ 3940 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 3941 filter->f.action_meta = tc; 3942 return 0; 3943 } 3944 3945 /** 3946 * iavf_find_cf - Find the cloud filter in the list 3947 * @adapter: Board private structure 3948 * @cookie: filter specific cookie 3949 * 3950 * Returns ptr to the filter object or NULL. Must be called while holding the 3951 * cloud_filter_list_lock. 3952 */ 3953 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter, 3954 unsigned long *cookie) 3955 { 3956 struct iavf_cloud_filter *filter = NULL; 3957 3958 if (!cookie) 3959 return NULL; 3960 3961 list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 3962 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 3963 return filter; 3964 } 3965 return NULL; 3966 } 3967 3968 /** 3969 * iavf_configure_clsflower - Add tc flower filters 3970 * @adapter: board private structure 3971 * @cls_flower: Pointer to struct flow_cls_offload 3972 */ 3973 static int iavf_configure_clsflower(struct iavf_adapter *adapter, 3974 struct flow_cls_offload *cls_flower) 3975 { 3976 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 3977 struct iavf_cloud_filter *filter = NULL; 3978 int err = -EINVAL, count = 50; 3979 3980 if (tc < 0) { 3981 dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 3982 return -EINVAL; 3983 } 3984 3985 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 3986 if (!filter) 3987 return -ENOMEM; 3988 3989 while (!mutex_trylock(&adapter->crit_lock)) { 3990 if (--count == 0) { 3991 kfree(filter); 3992 return err; 3993 } 3994 udelay(1); 3995 } 3996 3997 filter->cookie = cls_flower->cookie; 3998 3999 /* bail out here if filter already exists */ 4000 spin_lock_bh(&adapter->cloud_filter_list_lock); 4001 if (iavf_find_cf(adapter, &cls_flower->cookie)) { 4002 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n"); 4003 err = -EEXIST; 4004 goto spin_unlock; 4005 } 4006 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4007 4008 /* set the mask to all zeroes to begin with */ 4009 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 4010 /* start out with flow type and eth type IPv4 to begin with */ 4011 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 4012 err = iavf_parse_cls_flower(adapter, cls_flower, filter); 4013 if (err) 4014 goto err; 4015 4016 err = iavf_handle_tclass(adapter, tc, filter); 4017 if (err) 4018 goto err; 4019 4020 /* add filter to the list */ 4021 spin_lock_bh(&adapter->cloud_filter_list_lock); 4022 list_add_tail(&filter->list, &adapter->cloud_filter_list); 4023 adapter->num_cloud_filters++; 4024 filter->add = true; 4025 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; 4026 spin_unlock: 4027 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4028 err: 4029 if (err) 4030 kfree(filter); 4031 4032 mutex_unlock(&adapter->crit_lock); 4033 return err; 4034 } 4035 4036 /** 4037 * iavf_delete_clsflower - Remove tc flower filters 4038 * @adapter: board private structure 4039 * @cls_flower: Pointer to struct flow_cls_offload 4040 */ 4041 static int iavf_delete_clsflower(struct iavf_adapter *adapter, 4042 struct flow_cls_offload *cls_flower) 4043 { 4044 struct iavf_cloud_filter *filter = NULL; 4045 int err = 0; 4046 4047 spin_lock_bh(&adapter->cloud_filter_list_lock); 4048 filter = iavf_find_cf(adapter, &cls_flower->cookie); 4049 if (filter) { 4050 filter->del = true; 4051 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; 4052 } else { 4053 err = -EINVAL; 4054 } 4055 spin_unlock_bh(&adapter->cloud_filter_list_lock); 4056 4057 return err; 4058 } 4059 4060 /** 4061 * iavf_setup_tc_cls_flower - flower classifier offloads 4062 * @adapter: board private structure 4063 * @cls_flower: pointer to flow_cls_offload struct with flow info 4064 */ 4065 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, 4066 struct flow_cls_offload *cls_flower) 4067 { 4068 switch (cls_flower->command) { 4069 case FLOW_CLS_REPLACE: 4070 return iavf_configure_clsflower(adapter, cls_flower); 4071 case FLOW_CLS_DESTROY: 4072 return iavf_delete_clsflower(adapter, cls_flower); 4073 case FLOW_CLS_STATS: 4074 return -EOPNOTSUPP; 4075 default: 4076 return -EOPNOTSUPP; 4077 } 4078 } 4079 4080 /** 4081 * iavf_setup_tc_block_cb - block callback for tc 4082 * @type: type of offload 4083 * @type_data: offload data 4084 * @cb_priv: 4085 * 4086 * This function is the block callback for traffic classes 4087 **/ 4088 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 4089 void *cb_priv) 4090 { 4091 struct iavf_adapter *adapter = cb_priv; 4092 4093 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) 4094 return -EOPNOTSUPP; 4095 4096 switch (type) { 4097 case TC_SETUP_CLSFLOWER: 4098 return iavf_setup_tc_cls_flower(cb_priv, type_data); 4099 default: 4100 return -EOPNOTSUPP; 4101 } 4102 } 4103 4104 static LIST_HEAD(iavf_block_cb_list); 4105 4106 /** 4107 * iavf_setup_tc - configure multiple traffic classes 4108 * @netdev: network interface device structure 4109 * @type: type of offload 4110 * @type_data: tc offload data 4111 * 4112 * This function is the callback to ndo_setup_tc in the 4113 * netdev_ops. 4114 * 4115 * Returns 0 on success 4116 **/ 4117 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 4118 void *type_data) 4119 { 4120 struct iavf_adapter *adapter = netdev_priv(netdev); 4121 4122 switch (type) { 4123 case TC_SETUP_QDISC_MQPRIO: 4124 return __iavf_setup_tc(netdev, type_data); 4125 case TC_SETUP_BLOCK: 4126 return flow_block_cb_setup_simple(type_data, 4127 &iavf_block_cb_list, 4128 iavf_setup_tc_block_cb, 4129 adapter, adapter, true); 4130 default: 4131 return -EOPNOTSUPP; 4132 } 4133 } 4134 4135 /** 4136 * iavf_open - Called when a network interface is made active 4137 * @netdev: network interface device structure 4138 * 4139 * Returns 0 on success, negative value on failure 4140 * 4141 * The open entry point is called when a network interface is made 4142 * active by the system (IFF_UP). At this point all resources needed 4143 * for transmit and receive operations are allocated, the interrupt 4144 * handler is registered with the OS, the watchdog is started, 4145 * and the stack is notified that the interface is ready. 4146 **/ 4147 static int iavf_open(struct net_device *netdev) 4148 { 4149 struct iavf_adapter *adapter = netdev_priv(netdev); 4150 int err; 4151 4152 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { 4153 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); 4154 return -EIO; 4155 } 4156 4157 while (!mutex_trylock(&adapter->crit_lock)) { 4158 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock 4159 * is already taken and iavf_open is called from an upper 4160 * device's notifier reacting on NETDEV_REGISTER event. 4161 * We have to leave here to avoid dead lock. 4162 */ 4163 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) 4164 return -EBUSY; 4165 4166 usleep_range(500, 1000); 4167 } 4168 4169 if (adapter->state != __IAVF_DOWN) { 4170 err = -EBUSY; 4171 goto err_unlock; 4172 } 4173 4174 if (adapter->state == __IAVF_RUNNING && 4175 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { 4176 dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); 4177 err = 0; 4178 goto err_unlock; 4179 } 4180 4181 /* allocate transmit descriptors */ 4182 err = iavf_setup_all_tx_resources(adapter); 4183 if (err) 4184 goto err_setup_tx; 4185 4186 /* allocate receive descriptors */ 4187 err = iavf_setup_all_rx_resources(adapter); 4188 if (err) 4189 goto err_setup_rx; 4190 4191 /* clear any pending interrupts, may auto mask */ 4192 err = iavf_request_traffic_irqs(adapter, netdev->name); 4193 if (err) 4194 goto err_req_irq; 4195 4196 spin_lock_bh(&adapter->mac_vlan_list_lock); 4197 4198 iavf_add_filter(adapter, adapter->hw.mac.addr); 4199 4200 spin_unlock_bh(&adapter->mac_vlan_list_lock); 4201 4202 /* Restore VLAN filters that were removed with IFF_DOWN */ 4203 iavf_restore_filters(adapter); 4204 4205 iavf_configure(adapter); 4206 4207 iavf_up_complete(adapter); 4208 4209 iavf_irq_enable(adapter, true); 4210 4211 mutex_unlock(&adapter->crit_lock); 4212 4213 return 0; 4214 4215 err_req_irq: 4216 iavf_down(adapter); 4217 iavf_free_traffic_irqs(adapter); 4218 err_setup_rx: 4219 iavf_free_all_rx_resources(adapter); 4220 err_setup_tx: 4221 iavf_free_all_tx_resources(adapter); 4222 err_unlock: 4223 mutex_unlock(&adapter->crit_lock); 4224 4225 return err; 4226 } 4227 4228 /** 4229 * iavf_close - Disables a network interface 4230 * @netdev: network interface device structure 4231 * 4232 * Returns 0, this is not allowed to fail 4233 * 4234 * The close entry point is called when an interface is de-activated 4235 * by the OS. The hardware is still under the drivers control, but 4236 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) 4237 * are freed, along with all transmit and receive resources. 4238 **/ 4239 static int iavf_close(struct net_device *netdev) 4240 { 4241 struct iavf_adapter *adapter = netdev_priv(netdev); 4242 u64 aq_to_restore; 4243 int status; 4244 4245 mutex_lock(&adapter->crit_lock); 4246 4247 if (adapter->state <= __IAVF_DOWN_PENDING) { 4248 mutex_unlock(&adapter->crit_lock); 4249 return 0; 4250 } 4251 4252 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); 4253 if (CLIENT_ENABLED(adapter)) 4254 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; 4255 /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before 4256 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl 4257 * deadlock with adminq_task() until iavf_close timeouts. We must send 4258 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make 4259 * disable queues possible for vf. Give only necessary flags to 4260 * iavf_down and save other to set them right before iavf_close() 4261 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and 4262 * iavf will be in DOWN state. 4263 */ 4264 aq_to_restore = adapter->aq_required; 4265 adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG; 4266 4267 /* Remove flags which we do not want to send after close or we want to 4268 * send before disable queues. 4269 */ 4270 aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG | 4271 IAVF_FLAG_AQ_ENABLE_QUEUES | 4272 IAVF_FLAG_AQ_CONFIGURE_QUEUES | 4273 IAVF_FLAG_AQ_ADD_VLAN_FILTER | 4274 IAVF_FLAG_AQ_ADD_MAC_FILTER | 4275 IAVF_FLAG_AQ_ADD_CLOUD_FILTER | 4276 IAVF_FLAG_AQ_ADD_FDIR_FILTER | 4277 IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); 4278 4279 iavf_down(adapter); 4280 iavf_change_state(adapter, __IAVF_DOWN_PENDING); 4281 iavf_free_traffic_irqs(adapter); 4282 4283 mutex_unlock(&adapter->crit_lock); 4284 4285 /* We explicitly don't free resources here because the hardware is 4286 * still active and can DMA into memory. Resources are cleared in 4287 * iavf_virtchnl_completion() after we get confirmation from the PF 4288 * driver that the rings have been stopped. 4289 * 4290 * Also, we wait for state to transition to __IAVF_DOWN before 4291 * returning. State change occurs in iavf_virtchnl_completion() after 4292 * VF resources are released (which occurs after PF driver processes and 4293 * responds to admin queue commands). 4294 */ 4295 4296 status = wait_event_timeout(adapter->down_waitqueue, 4297 adapter->state == __IAVF_DOWN, 4298 msecs_to_jiffies(500)); 4299 if (!status) 4300 netdev_warn(netdev, "Device resources not yet released\n"); 4301 4302 mutex_lock(&adapter->crit_lock); 4303 adapter->aq_required |= aq_to_restore; 4304 mutex_unlock(&adapter->crit_lock); 4305 return 0; 4306 } 4307 4308 /** 4309 * iavf_change_mtu - Change the Maximum Transfer Unit 4310 * @netdev: network interface device structure 4311 * @new_mtu: new value for maximum frame size 4312 * 4313 * Returns 0 on success, negative on failure 4314 **/ 4315 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 4316 { 4317 struct iavf_adapter *adapter = netdev_priv(netdev); 4318 4319 netdev_dbg(netdev, "changing MTU from %d to %d\n", 4320 netdev->mtu, new_mtu); 4321 netdev->mtu = new_mtu; 4322 if (CLIENT_ENABLED(adapter)) { 4323 iavf_notify_client_l2_params(&adapter->vsi); 4324 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; 4325 } 4326 4327 if (netif_running(netdev)) { 4328 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 4329 queue_work(adapter->wq, &adapter->reset_task); 4330 } 4331 4332 return 0; 4333 } 4334 4335 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 4336 NETIF_F_HW_VLAN_CTAG_TX | \ 4337 NETIF_F_HW_VLAN_STAG_RX | \ 4338 NETIF_F_HW_VLAN_STAG_TX) 4339 4340 /** 4341 * iavf_set_features - set the netdev feature flags 4342 * @netdev: ptr to the netdev being adjusted 4343 * @features: the feature set that the stack is suggesting 4344 * Note: expects to be called while under rtnl_lock() 4345 **/ 4346 static int iavf_set_features(struct net_device *netdev, 4347 netdev_features_t features) 4348 { 4349 struct iavf_adapter *adapter = netdev_priv(netdev); 4350 4351 /* trigger update on any VLAN feature change */ 4352 if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^ 4353 (features & NETIF_VLAN_OFFLOAD_FEATURES)) 4354 iavf_set_vlan_offload_features(adapter, netdev->features, 4355 features); 4356 4357 return 0; 4358 } 4359 4360 /** 4361 * iavf_features_check - Validate encapsulated packet conforms to limits 4362 * @skb: skb buff 4363 * @dev: This physical port's netdev 4364 * @features: Offload features that the stack believes apply 4365 **/ 4366 static netdev_features_t iavf_features_check(struct sk_buff *skb, 4367 struct net_device *dev, 4368 netdev_features_t features) 4369 { 4370 size_t len; 4371 4372 /* No point in doing any of this if neither checksum nor GSO are 4373 * being requested for this frame. We can rule out both by just 4374 * checking for CHECKSUM_PARTIAL 4375 */ 4376 if (skb->ip_summed != CHECKSUM_PARTIAL) 4377 return features; 4378 4379 /* We cannot support GSO if the MSS is going to be less than 4380 * 64 bytes. If it is then we need to drop support for GSO. 4381 */ 4382 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 4383 features &= ~NETIF_F_GSO_MASK; 4384 4385 /* MACLEN can support at most 63 words */ 4386 len = skb_network_header(skb) - skb->data; 4387 if (len & ~(63 * 2)) 4388 goto out_err; 4389 4390 /* IPLEN and EIPLEN can support at most 127 dwords */ 4391 len = skb_transport_header(skb) - skb_network_header(skb); 4392 if (len & ~(127 * 4)) 4393 goto out_err; 4394 4395 if (skb->encapsulation) { 4396 /* L4TUNLEN can support 127 words */ 4397 len = skb_inner_network_header(skb) - skb_transport_header(skb); 4398 if (len & ~(127 * 2)) 4399 goto out_err; 4400 4401 /* IPLEN can support at most 127 dwords */ 4402 len = skb_inner_transport_header(skb) - 4403 skb_inner_network_header(skb); 4404 if (len & ~(127 * 4)) 4405 goto out_err; 4406 } 4407 4408 /* No need to validate L4LEN as TCP is the only protocol with a 4409 * flexible value and we support all possible values supported 4410 * by TCP, which is at most 15 dwords 4411 */ 4412 4413 return features; 4414 out_err: 4415 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 4416 } 4417 4418 /** 4419 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off 4420 * @adapter: board private structure 4421 * 4422 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 4423 * were negotiated determine the VLAN features that can be toggled on and off. 4424 **/ 4425 static netdev_features_t 4426 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) 4427 { 4428 netdev_features_t hw_features = 0; 4429 4430 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) 4431 return hw_features; 4432 4433 /* Enable VLAN features if supported */ 4434 if (VLAN_ALLOWED(adapter)) { 4435 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 4436 NETIF_F_HW_VLAN_CTAG_RX); 4437 } else if (VLAN_V2_ALLOWED(adapter)) { 4438 struct virtchnl_vlan_caps *vlan_v2_caps = 4439 &adapter->vlan_v2_caps; 4440 struct virtchnl_vlan_supported_caps *stripping_support = 4441 &vlan_v2_caps->offloads.stripping_support; 4442 struct virtchnl_vlan_supported_caps *insertion_support = 4443 &vlan_v2_caps->offloads.insertion_support; 4444 4445 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && 4446 stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) { 4447 if (stripping_support->outer & 4448 VIRTCHNL_VLAN_ETHERTYPE_8100) 4449 hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 4450 if (stripping_support->outer & 4451 VIRTCHNL_VLAN_ETHERTYPE_88A8) 4452 hw_features |= NETIF_F_HW_VLAN_STAG_RX; 4453 } else if (stripping_support->inner != 4454 VIRTCHNL_VLAN_UNSUPPORTED && 4455 stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) { 4456 if (stripping_support->inner & 4457 VIRTCHNL_VLAN_ETHERTYPE_8100) 4458 hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 4459 } 4460 4461 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && 4462 insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) { 4463 if (insertion_support->outer & 4464 VIRTCHNL_VLAN_ETHERTYPE_8100) 4465 hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 4466 if (insertion_support->outer & 4467 VIRTCHNL_VLAN_ETHERTYPE_88A8) 4468 hw_features |= NETIF_F_HW_VLAN_STAG_TX; 4469 } else if (insertion_support->inner && 4470 insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) { 4471 if (insertion_support->inner & 4472 VIRTCHNL_VLAN_ETHERTYPE_8100) 4473 hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 4474 } 4475 } 4476 4477 return hw_features; 4478 } 4479 4480 /** 4481 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures 4482 * @adapter: board private structure 4483 * 4484 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 4485 * were negotiated determine the VLAN features that are enabled by default. 4486 **/ 4487 static netdev_features_t 4488 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter) 4489 { 4490 netdev_features_t features = 0; 4491 4492 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) 4493 return features; 4494 4495 if (VLAN_ALLOWED(adapter)) { 4496 features |= NETIF_F_HW_VLAN_CTAG_FILTER | 4497 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; 4498 } else if (VLAN_V2_ALLOWED(adapter)) { 4499 struct virtchnl_vlan_caps *vlan_v2_caps = 4500 &adapter->vlan_v2_caps; 4501 struct virtchnl_vlan_supported_caps *filtering_support = 4502 &vlan_v2_caps->filtering.filtering_support; 4503 struct virtchnl_vlan_supported_caps *stripping_support = 4504 &vlan_v2_caps->offloads.stripping_support; 4505 struct virtchnl_vlan_supported_caps *insertion_support = 4506 &vlan_v2_caps->offloads.insertion_support; 4507 u32 ethertype_init; 4508 4509 /* give priority to outer stripping and don't support both outer 4510 * and inner stripping 4511 */ 4512 ethertype_init = vlan_v2_caps->offloads.ethertype_init; 4513 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { 4514 if (stripping_support->outer & 4515 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4516 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4517 features |= NETIF_F_HW_VLAN_CTAG_RX; 4518 else if (stripping_support->outer & 4519 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4520 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4521 features |= NETIF_F_HW_VLAN_STAG_RX; 4522 } else if (stripping_support->inner != 4523 VIRTCHNL_VLAN_UNSUPPORTED) { 4524 if (stripping_support->inner & 4525 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4526 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4527 features |= NETIF_F_HW_VLAN_CTAG_RX; 4528 } 4529 4530 /* give priority to outer insertion and don't support both outer 4531 * and inner insertion 4532 */ 4533 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { 4534 if (insertion_support->outer & 4535 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4536 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4537 features |= NETIF_F_HW_VLAN_CTAG_TX; 4538 else if (insertion_support->outer & 4539 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4540 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4541 features |= NETIF_F_HW_VLAN_STAG_TX; 4542 } else if (insertion_support->inner != 4543 VIRTCHNL_VLAN_UNSUPPORTED) { 4544 if (insertion_support->inner & 4545 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4546 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4547 features |= NETIF_F_HW_VLAN_CTAG_TX; 4548 } 4549 4550 /* give priority to outer filtering and don't bother if both 4551 * outer and inner filtering are enabled 4552 */ 4553 ethertype_init = vlan_v2_caps->filtering.ethertype_init; 4554 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { 4555 if (filtering_support->outer & 4556 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4557 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4558 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4559 if (filtering_support->outer & 4560 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4561 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4562 features |= NETIF_F_HW_VLAN_STAG_FILTER; 4563 } else if (filtering_support->inner != 4564 VIRTCHNL_VLAN_UNSUPPORTED) { 4565 if (filtering_support->inner & 4566 VIRTCHNL_VLAN_ETHERTYPE_8100 && 4567 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) 4568 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4569 if (filtering_support->inner & 4570 VIRTCHNL_VLAN_ETHERTYPE_88A8 && 4571 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) 4572 features |= NETIF_F_HW_VLAN_STAG_FILTER; 4573 } 4574 } 4575 4576 return features; 4577 } 4578 4579 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \ 4580 (!(((requested) & (feature_bit)) && \ 4581 !((allowed) & (feature_bit)))) 4582 4583 /** 4584 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support 4585 * @adapter: board private structure 4586 * @requested_features: stack requested NETDEV features 4587 **/ 4588 static netdev_features_t 4589 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, 4590 netdev_features_t requested_features) 4591 { 4592 netdev_features_t allowed_features; 4593 4594 allowed_features = iavf_get_netdev_vlan_hw_features(adapter) | 4595 iavf_get_netdev_vlan_features(adapter); 4596 4597 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4598 allowed_features, 4599 NETIF_F_HW_VLAN_CTAG_TX)) 4600 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX; 4601 4602 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4603 allowed_features, 4604 NETIF_F_HW_VLAN_CTAG_RX)) 4605 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 4606 4607 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4608 allowed_features, 4609 NETIF_F_HW_VLAN_STAG_TX)) 4610 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX; 4611 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4612 allowed_features, 4613 NETIF_F_HW_VLAN_STAG_RX)) 4614 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX; 4615 4616 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4617 allowed_features, 4618 NETIF_F_HW_VLAN_CTAG_FILTER)) 4619 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4620 4621 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, 4622 allowed_features, 4623 NETIF_F_HW_VLAN_STAG_FILTER)) 4624 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER; 4625 4626 if ((requested_features & 4627 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && 4628 (requested_features & 4629 (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) && 4630 adapter->vlan_v2_caps.offloads.ethertype_match == 4631 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) { 4632 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); 4633 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX | 4634 NETIF_F_HW_VLAN_STAG_TX); 4635 } 4636 4637 return requested_features; 4638 } 4639 4640 /** 4641 * iavf_fix_features - fix up the netdev feature bits 4642 * @netdev: our net device 4643 * @features: desired feature bits 4644 * 4645 * Returns fixed-up features bits 4646 **/ 4647 static netdev_features_t iavf_fix_features(struct net_device *netdev, 4648 netdev_features_t features) 4649 { 4650 struct iavf_adapter *adapter = netdev_priv(netdev); 4651 4652 return iavf_fix_netdev_vlan_features(adapter, features); 4653 } 4654 4655 static const struct net_device_ops iavf_netdev_ops = { 4656 .ndo_open = iavf_open, 4657 .ndo_stop = iavf_close, 4658 .ndo_start_xmit = iavf_xmit_frame, 4659 .ndo_set_rx_mode = iavf_set_rx_mode, 4660 .ndo_validate_addr = eth_validate_addr, 4661 .ndo_set_mac_address = iavf_set_mac, 4662 .ndo_change_mtu = iavf_change_mtu, 4663 .ndo_tx_timeout = iavf_tx_timeout, 4664 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid, 4665 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid, 4666 .ndo_features_check = iavf_features_check, 4667 .ndo_fix_features = iavf_fix_features, 4668 .ndo_set_features = iavf_set_features, 4669 .ndo_setup_tc = iavf_setup_tc, 4670 }; 4671 4672 /** 4673 * iavf_check_reset_complete - check that VF reset is complete 4674 * @hw: pointer to hw struct 4675 * 4676 * Returns 0 if device is ready to use, or -EBUSY if it's in reset. 4677 **/ 4678 static int iavf_check_reset_complete(struct iavf_hw *hw) 4679 { 4680 u32 rstat; 4681 int i; 4682 4683 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { 4684 rstat = rd32(hw, IAVF_VFGEN_RSTAT) & 4685 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 4686 if ((rstat == VIRTCHNL_VFR_VFACTIVE) || 4687 (rstat == VIRTCHNL_VFR_COMPLETED)) 4688 return 0; 4689 usleep_range(10, 20); 4690 } 4691 return -EBUSY; 4692 } 4693 4694 /** 4695 * iavf_process_config - Process the config information we got from the PF 4696 * @adapter: board private structure 4697 * 4698 * Verify that we have a valid config struct, and set up our netdev features 4699 * and our VSI struct. 4700 **/ 4701 int iavf_process_config(struct iavf_adapter *adapter) 4702 { 4703 struct virtchnl_vf_resource *vfres = adapter->vf_res; 4704 netdev_features_t hw_vlan_features, vlan_features; 4705 struct net_device *netdev = adapter->netdev; 4706 netdev_features_t hw_enc_features; 4707 netdev_features_t hw_features; 4708 4709 hw_enc_features = NETIF_F_SG | 4710 NETIF_F_IP_CSUM | 4711 NETIF_F_IPV6_CSUM | 4712 NETIF_F_HIGHDMA | 4713 NETIF_F_SOFT_FEATURES | 4714 NETIF_F_TSO | 4715 NETIF_F_TSO_ECN | 4716 NETIF_F_TSO6 | 4717 NETIF_F_SCTP_CRC | 4718 NETIF_F_RXHASH | 4719 NETIF_F_RXCSUM | 4720 0; 4721 4722 /* advertise to stack only if offloads for encapsulated packets is 4723 * supported 4724 */ 4725 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { 4726 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | 4727 NETIF_F_GSO_GRE | 4728 NETIF_F_GSO_GRE_CSUM | 4729 NETIF_F_GSO_IPXIP4 | 4730 NETIF_F_GSO_IPXIP6 | 4731 NETIF_F_GSO_UDP_TUNNEL_CSUM | 4732 NETIF_F_GSO_PARTIAL | 4733 0; 4734 4735 if (!(vfres->vf_cap_flags & 4736 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) 4737 netdev->gso_partial_features |= 4738 NETIF_F_GSO_UDP_TUNNEL_CSUM; 4739 4740 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; 4741 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 4742 netdev->hw_enc_features |= hw_enc_features; 4743 } 4744 /* record features VLANs can make use of */ 4745 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; 4746 4747 /* Write features and hw_features separately to avoid polluting 4748 * with, or dropping, features that are set when we registered. 4749 */ 4750 hw_features = hw_enc_features; 4751 4752 /* get HW VLAN features that can be toggled */ 4753 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); 4754 4755 /* Enable cloud filter if ADQ is supported */ 4756 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 4757 hw_features |= NETIF_F_HW_TC; 4758 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) 4759 hw_features |= NETIF_F_GSO_UDP_L4; 4760 4761 netdev->hw_features |= hw_features | hw_vlan_features; 4762 vlan_features = iavf_get_netdev_vlan_features(adapter); 4763 4764 netdev->features |= hw_features | vlan_features; 4765 4766 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 4767 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4768 4769 netdev->priv_flags |= IFF_UNICAST_FLT; 4770 4771 /* Do not turn on offloads when they are requested to be turned off. 4772 * TSO needs minimum 576 bytes to work correctly. 4773 */ 4774 if (netdev->wanted_features) { 4775 if (!(netdev->wanted_features & NETIF_F_TSO) || 4776 netdev->mtu < 576) 4777 netdev->features &= ~NETIF_F_TSO; 4778 if (!(netdev->wanted_features & NETIF_F_TSO6) || 4779 netdev->mtu < 576) 4780 netdev->features &= ~NETIF_F_TSO6; 4781 if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) 4782 netdev->features &= ~NETIF_F_TSO_ECN; 4783 if (!(netdev->wanted_features & NETIF_F_GRO)) 4784 netdev->features &= ~NETIF_F_GRO; 4785 if (!(netdev->wanted_features & NETIF_F_GSO)) 4786 netdev->features &= ~NETIF_F_GSO; 4787 } 4788 4789 return 0; 4790 } 4791 4792 /** 4793 * iavf_shutdown - Shutdown the device in preparation for a reboot 4794 * @pdev: pci device structure 4795 **/ 4796 static void iavf_shutdown(struct pci_dev *pdev) 4797 { 4798 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 4799 struct net_device *netdev = adapter->netdev; 4800 4801 netif_device_detach(netdev); 4802 4803 if (netif_running(netdev)) 4804 iavf_close(netdev); 4805 4806 if (iavf_lock_timeout(&adapter->crit_lock, 5000)) 4807 dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__); 4808 /* Prevent the watchdog from running. */ 4809 iavf_change_state(adapter, __IAVF_REMOVE); 4810 adapter->aq_required = 0; 4811 mutex_unlock(&adapter->crit_lock); 4812 4813 #ifdef CONFIG_PM 4814 pci_save_state(pdev); 4815 4816 #endif 4817 pci_disable_device(pdev); 4818 } 4819 4820 /** 4821 * iavf_probe - Device Initialization Routine 4822 * @pdev: PCI device information struct 4823 * @ent: entry in iavf_pci_tbl 4824 * 4825 * Returns 0 on success, negative on failure 4826 * 4827 * iavf_probe initializes an adapter identified by a pci_dev structure. 4828 * The OS initialization, configuring of the adapter private structure, 4829 * and a hardware reset occur. 4830 **/ 4831 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4832 { 4833 struct net_device *netdev; 4834 struct iavf_adapter *adapter = NULL; 4835 struct iavf_hw *hw = NULL; 4836 int err; 4837 4838 err = pci_enable_device(pdev); 4839 if (err) 4840 return err; 4841 4842 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4843 if (err) { 4844 dev_err(&pdev->dev, 4845 "DMA configuration failed: 0x%x\n", err); 4846 goto err_dma; 4847 } 4848 4849 err = pci_request_regions(pdev, iavf_driver_name); 4850 if (err) { 4851 dev_err(&pdev->dev, 4852 "pci_request_regions failed 0x%x\n", err); 4853 goto err_pci_reg; 4854 } 4855 4856 pci_set_master(pdev); 4857 4858 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), 4859 IAVF_MAX_REQ_QUEUES); 4860 if (!netdev) { 4861 err = -ENOMEM; 4862 goto err_alloc_etherdev; 4863 } 4864 4865 SET_NETDEV_DEV(netdev, &pdev->dev); 4866 4867 pci_set_drvdata(pdev, netdev); 4868 adapter = netdev_priv(netdev); 4869 4870 adapter->netdev = netdev; 4871 adapter->pdev = pdev; 4872 4873 hw = &adapter->hw; 4874 hw->back = adapter; 4875 4876 adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, 4877 iavf_driver_name); 4878 if (!adapter->wq) { 4879 err = -ENOMEM; 4880 goto err_alloc_wq; 4881 } 4882 4883 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 4884 iavf_change_state(adapter, __IAVF_STARTUP); 4885 4886 /* Call save state here because it relies on the adapter struct. */ 4887 pci_save_state(pdev); 4888 4889 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 4890 pci_resource_len(pdev, 0)); 4891 if (!hw->hw_addr) { 4892 err = -EIO; 4893 goto err_ioremap; 4894 } 4895 hw->vendor_id = pdev->vendor; 4896 hw->device_id = pdev->device; 4897 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4898 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4899 hw->subsystem_device_id = pdev->subsystem_device; 4900 hw->bus.device = PCI_SLOT(pdev->devfn); 4901 hw->bus.func = PCI_FUNC(pdev->devfn); 4902 hw->bus.bus_id = pdev->bus->number; 4903 4904 /* set up the locks for the AQ, do this only once in probe 4905 * and destroy them only once in remove 4906 */ 4907 mutex_init(&adapter->crit_lock); 4908 mutex_init(&adapter->client_lock); 4909 mutex_init(&hw->aq.asq_mutex); 4910 mutex_init(&hw->aq.arq_mutex); 4911 4912 spin_lock_init(&adapter->mac_vlan_list_lock); 4913 spin_lock_init(&adapter->cloud_filter_list_lock); 4914 spin_lock_init(&adapter->fdir_fltr_lock); 4915 spin_lock_init(&adapter->adv_rss_lock); 4916 4917 INIT_LIST_HEAD(&adapter->mac_filter_list); 4918 INIT_LIST_HEAD(&adapter->vlan_filter_list); 4919 INIT_LIST_HEAD(&adapter->cloud_filter_list); 4920 INIT_LIST_HEAD(&adapter->fdir_list_head); 4921 INIT_LIST_HEAD(&adapter->adv_rss_list_head); 4922 4923 INIT_WORK(&adapter->reset_task, iavf_reset_task); 4924 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 4925 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 4926 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 4927 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 4928 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 4929 4930 /* Setup the wait queue for indicating transition to down status */ 4931 init_waitqueue_head(&adapter->down_waitqueue); 4932 4933 /* Setup the wait queue for indicating virtchannel events */ 4934 init_waitqueue_head(&adapter->vc_waitqueue); 4935 4936 return 0; 4937 4938 err_ioremap: 4939 destroy_workqueue(adapter->wq); 4940 err_alloc_wq: 4941 free_netdev(netdev); 4942 err_alloc_etherdev: 4943 pci_release_regions(pdev); 4944 err_pci_reg: 4945 err_dma: 4946 pci_disable_device(pdev); 4947 return err; 4948 } 4949 4950 /** 4951 * iavf_suspend - Power management suspend routine 4952 * @dev_d: device info pointer 4953 * 4954 * Called when the system (VM) is entering sleep/suspend. 4955 **/ 4956 static int __maybe_unused iavf_suspend(struct device *dev_d) 4957 { 4958 struct net_device *netdev = dev_get_drvdata(dev_d); 4959 struct iavf_adapter *adapter = netdev_priv(netdev); 4960 4961 netif_device_detach(netdev); 4962 4963 while (!mutex_trylock(&adapter->crit_lock)) 4964 usleep_range(500, 1000); 4965 4966 if (netif_running(netdev)) { 4967 rtnl_lock(); 4968 iavf_down(adapter); 4969 rtnl_unlock(); 4970 } 4971 iavf_free_misc_irq(adapter); 4972 iavf_reset_interrupt_capability(adapter); 4973 4974 mutex_unlock(&adapter->crit_lock); 4975 4976 return 0; 4977 } 4978 4979 /** 4980 * iavf_resume - Power management resume routine 4981 * @dev_d: device info pointer 4982 * 4983 * Called when the system (VM) is resumed from sleep/suspend. 4984 **/ 4985 static int __maybe_unused iavf_resume(struct device *dev_d) 4986 { 4987 struct pci_dev *pdev = to_pci_dev(dev_d); 4988 struct iavf_adapter *adapter; 4989 u32 err; 4990 4991 adapter = iavf_pdev_to_adapter(pdev); 4992 4993 pci_set_master(pdev); 4994 4995 rtnl_lock(); 4996 err = iavf_set_interrupt_capability(adapter); 4997 if (err) { 4998 rtnl_unlock(); 4999 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); 5000 return err; 5001 } 5002 err = iavf_request_misc_irq(adapter); 5003 rtnl_unlock(); 5004 if (err) { 5005 dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); 5006 return err; 5007 } 5008 5009 queue_work(adapter->wq, &adapter->reset_task); 5010 5011 netif_device_attach(adapter->netdev); 5012 5013 return err; 5014 } 5015 5016 /** 5017 * iavf_remove - Device Removal Routine 5018 * @pdev: PCI device information struct 5019 * 5020 * iavf_remove is called by the PCI subsystem to alert the driver 5021 * that it should release a PCI device. The could be caused by a 5022 * Hot-Plug event, or because the driver is going to be removed from 5023 * memory. 5024 **/ 5025 static void iavf_remove(struct pci_dev *pdev) 5026 { 5027 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); 5028 struct iavf_fdir_fltr *fdir, *fdirtmp; 5029 struct iavf_vlan_filter *vlf, *vlftmp; 5030 struct iavf_cloud_filter *cf, *cftmp; 5031 struct iavf_adv_rss *rss, *rsstmp; 5032 struct iavf_mac_filter *f, *ftmp; 5033 struct net_device *netdev; 5034 struct iavf_hw *hw; 5035 int err; 5036 5037 netdev = adapter->netdev; 5038 hw = &adapter->hw; 5039 5040 if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) 5041 return; 5042 5043 /* Wait until port initialization is complete. 5044 * There are flows where register/unregister netdev may race. 5045 */ 5046 while (1) { 5047 mutex_lock(&adapter->crit_lock); 5048 if (adapter->state == __IAVF_RUNNING || 5049 adapter->state == __IAVF_DOWN || 5050 adapter->state == __IAVF_INIT_FAILED) { 5051 mutex_unlock(&adapter->crit_lock); 5052 break; 5053 } 5054 /* Simply return if we already went through iavf_shutdown */ 5055 if (adapter->state == __IAVF_REMOVE) { 5056 mutex_unlock(&adapter->crit_lock); 5057 return; 5058 } 5059 5060 mutex_unlock(&adapter->crit_lock); 5061 usleep_range(500, 1000); 5062 } 5063 cancel_delayed_work_sync(&adapter->watchdog_task); 5064 5065 if (adapter->netdev_registered) { 5066 rtnl_lock(); 5067 unregister_netdevice(netdev); 5068 adapter->netdev_registered = false; 5069 rtnl_unlock(); 5070 } 5071 if (CLIENT_ALLOWED(adapter)) { 5072 err = iavf_lan_del_device(adapter); 5073 if (err) 5074 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", 5075 err); 5076 } 5077 5078 mutex_lock(&adapter->crit_lock); 5079 dev_info(&adapter->pdev->dev, "Removing device\n"); 5080 iavf_change_state(adapter, __IAVF_REMOVE); 5081 5082 iavf_request_reset(adapter); 5083 msleep(50); 5084 /* If the FW isn't responding, kick it once, but only once. */ 5085 if (!iavf_asq_done(hw)) { 5086 iavf_request_reset(adapter); 5087 msleep(50); 5088 } 5089 5090 iavf_misc_irq_disable(adapter); 5091 /* Shut down all the garbage mashers on the detention level */ 5092 cancel_work_sync(&adapter->reset_task); 5093 cancel_delayed_work_sync(&adapter->watchdog_task); 5094 cancel_work_sync(&adapter->adminq_task); 5095 cancel_delayed_work_sync(&adapter->client_task); 5096 5097 adapter->aq_required = 0; 5098 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 5099 5100 iavf_free_all_tx_resources(adapter); 5101 iavf_free_all_rx_resources(adapter); 5102 iavf_free_misc_irq(adapter); 5103 5104 iavf_reset_interrupt_capability(adapter); 5105 iavf_free_q_vectors(adapter); 5106 5107 iavf_free_rss(adapter); 5108 5109 if (hw->aq.asq.count) 5110 iavf_shutdown_adminq(hw); 5111 5112 /* destroy the locks only once, here */ 5113 mutex_destroy(&hw->aq.arq_mutex); 5114 mutex_destroy(&hw->aq.asq_mutex); 5115 mutex_destroy(&adapter->client_lock); 5116 mutex_unlock(&adapter->crit_lock); 5117 mutex_destroy(&adapter->crit_lock); 5118 5119 iounmap(hw->hw_addr); 5120 pci_release_regions(pdev); 5121 iavf_free_queues(adapter); 5122 kfree(adapter->vf_res); 5123 spin_lock_bh(&adapter->mac_vlan_list_lock); 5124 /* If we got removed before an up/down sequence, we've got a filter 5125 * hanging out there that we need to get rid of. 5126 */ 5127 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 5128 list_del(&f->list); 5129 kfree(f); 5130 } 5131 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, 5132 list) { 5133 list_del(&vlf->list); 5134 kfree(vlf); 5135 } 5136 5137 spin_unlock_bh(&adapter->mac_vlan_list_lock); 5138 5139 spin_lock_bh(&adapter->cloud_filter_list_lock); 5140 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 5141 list_del(&cf->list); 5142 kfree(cf); 5143 } 5144 spin_unlock_bh(&adapter->cloud_filter_list_lock); 5145 5146 spin_lock_bh(&adapter->fdir_fltr_lock); 5147 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { 5148 list_del(&fdir->list); 5149 kfree(fdir); 5150 } 5151 spin_unlock_bh(&adapter->fdir_fltr_lock); 5152 5153 spin_lock_bh(&adapter->adv_rss_lock); 5154 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, 5155 list) { 5156 list_del(&rss->list); 5157 kfree(rss); 5158 } 5159 spin_unlock_bh(&adapter->adv_rss_lock); 5160 5161 destroy_workqueue(adapter->wq); 5162 5163 free_netdev(netdev); 5164 5165 pci_disable_device(pdev); 5166 } 5167 5168 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); 5169 5170 static struct pci_driver iavf_driver = { 5171 .name = iavf_driver_name, 5172 .id_table = iavf_pci_tbl, 5173 .probe = iavf_probe, 5174 .remove = iavf_remove, 5175 .driver.pm = &iavf_pm_ops, 5176 .shutdown = iavf_shutdown, 5177 }; 5178 5179 /** 5180 * iavf_init_module - Driver Registration Routine 5181 * 5182 * iavf_init_module is the first routine called when the driver is 5183 * loaded. All it does is register with the PCI subsystem. 5184 **/ 5185 static int __init iavf_init_module(void) 5186 { 5187 pr_info("iavf: %s\n", iavf_driver_string); 5188 5189 pr_info("%s\n", iavf_copyright); 5190 5191 return pci_register_driver(&iavf_driver); 5192 } 5193 5194 module_init(iavf_init_module); 5195 5196 /** 5197 * iavf_exit_module - Driver Exit Cleanup Routine 5198 * 5199 * iavf_exit_module is called just before the driver is removed 5200 * from memory. 5201 **/ 5202 static void __exit iavf_exit_module(void) 5203 { 5204 pci_unregister_driver(&iavf_driver); 5205 } 5206 5207 module_exit(iavf_exit_module); 5208 5209 /* iavf_main.c */ 5210